ramips: remove duplicate MAC assignment case
[openwrt/staging/rmilecki.git] / target / linux / generic / backport-4.19 / 392-v5.1-sch_cake-Make-the-dual-modes-fairer.patch
1 From 712639929912c5eefb09facccb48d55b3f72c9f8 Mon Sep 17 00:00:00 2001
2 From: George Amanakis <gamanakis@gmail.com>
3 Date: Fri, 1 Mar 2019 16:04:05 +0100
4 Subject: [PATCH] sch_cake: Make the dual modes fairer
5 MIME-Version: 1.0
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
8
9 CAKE host fairness does not work well with TCP flows in dual-srchost and
10 dual-dsthost setup. The reason is that ACKs generated by TCP flows are
11 classified as sparse flows, and affect flow isolation from other hosts. Fix
12 this by calculating host_load based only on the bulk flows a host
13 generates. In a hash collision the host_bulk_flow_count values must be
14 decremented on the old hosts and incremented on the new ones *if* the queue
15 is in the bulk set.
16
17 Reported-by: Pete Heist <peteheist@gmail.com>
18 Signed-off-by: George Amanakis <gamanakis@gmail.com>
19 Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
20 Signed-off-by: David S. Miller <davem@davemloft.net>
21 Signed-off-by: Kevin Darbyshire-Bryant <ldir@darbyshire-bryant.me.uk>
22 ---
23 net/sched/sch_cake.c | 92 ++++++++++++++++++++++++++++++--------------
24 1 file changed, 63 insertions(+), 29 deletions(-)
25
26 --- a/net/sched/sch_cake.c
27 +++ b/net/sched/sch_cake.c
28 @@ -138,8 +138,8 @@ struct cake_flow {
29 struct cake_host {
30 u32 srchost_tag;
31 u32 dsthost_tag;
32 - u16 srchost_refcnt;
33 - u16 dsthost_refcnt;
34 + u16 srchost_bulk_flow_count;
35 + u16 dsthost_bulk_flow_count;
36 };
37
38 struct cake_heap_entry {
39 @@ -746,8 +746,10 @@ skip_hash:
40 * queue, accept the collision, update the host tags.
41 */
42 q->way_collisions++;
43 - q->hosts[q->flows[reduced_hash].srchost].srchost_refcnt--;
44 - q->hosts[q->flows[reduced_hash].dsthost].dsthost_refcnt--;
45 + if (q->flows[outer_hash + k].set == CAKE_SET_BULK) {
46 + q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--;
47 + q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--;
48 + }
49 allocate_src = cake_dsrc(flow_mode);
50 allocate_dst = cake_ddst(flow_mode);
51 found:
52 @@ -767,13 +769,14 @@ found:
53 }
54 for (i = 0; i < CAKE_SET_WAYS;
55 i++, k = (k + 1) % CAKE_SET_WAYS) {
56 - if (!q->hosts[outer_hash + k].srchost_refcnt)
57 + if (!q->hosts[outer_hash + k].srchost_bulk_flow_count)
58 break;
59 }
60 q->hosts[outer_hash + k].srchost_tag = srchost_hash;
61 found_src:
62 srchost_idx = outer_hash + k;
63 - q->hosts[srchost_idx].srchost_refcnt++;
64 + if (q->flows[reduced_hash].set == CAKE_SET_BULK)
65 + q->hosts[srchost_idx].srchost_bulk_flow_count++;
66 q->flows[reduced_hash].srchost = srchost_idx;
67 }
68
69 @@ -789,13 +792,14 @@ found_src:
70 }
71 for (i = 0; i < CAKE_SET_WAYS;
72 i++, k = (k + 1) % CAKE_SET_WAYS) {
73 - if (!q->hosts[outer_hash + k].dsthost_refcnt)
74 + if (!q->hosts[outer_hash + k].dsthost_bulk_flow_count)
75 break;
76 }
77 q->hosts[outer_hash + k].dsthost_tag = dsthost_hash;
78 found_dst:
79 dsthost_idx = outer_hash + k;
80 - q->hosts[dsthost_idx].dsthost_refcnt++;
81 + if (q->flows[reduced_hash].set == CAKE_SET_BULK)
82 + q->hosts[dsthost_idx].dsthost_bulk_flow_count++;
83 q->flows[reduced_hash].dsthost = dsthost_idx;
84 }
85 }
86 @@ -1793,20 +1797,30 @@ static s32 cake_enqueue(struct sk_buff *
87 b->sparse_flow_count++;
88
89 if (cake_dsrc(q->flow_mode))
90 - host_load = max(host_load, srchost->srchost_refcnt);
91 + host_load = max(host_load, srchost->srchost_bulk_flow_count);
92
93 if (cake_ddst(q->flow_mode))
94 - host_load = max(host_load, dsthost->dsthost_refcnt);
95 + host_load = max(host_load, dsthost->dsthost_bulk_flow_count);
96
97 flow->deficit = (b->flow_quantum *
98 quantum_div[host_load]) >> 16;
99 } else if (flow->set == CAKE_SET_SPARSE_WAIT) {
100 + struct cake_host *srchost = &b->hosts[flow->srchost];
101 + struct cake_host *dsthost = &b->hosts[flow->dsthost];
102 +
103 /* this flow was empty, accounted as a sparse flow, but actually
104 * in the bulk rotation.
105 */
106 flow->set = CAKE_SET_BULK;
107 b->sparse_flow_count--;
108 b->bulk_flow_count++;
109 +
110 + if (cake_dsrc(q->flow_mode))
111 + srchost->srchost_bulk_flow_count++;
112 +
113 + if (cake_ddst(q->flow_mode))
114 + dsthost->dsthost_bulk_flow_count++;
115 +
116 }
117
118 if (q->buffer_used > q->buffer_max_used)
119 @@ -1974,23 +1988,8 @@ retry:
120 dsthost = &b->hosts[flow->dsthost];
121 host_load = 1;
122
123 - if (cake_dsrc(q->flow_mode))
124 - host_load = max(host_load, srchost->srchost_refcnt);
125 -
126 - if (cake_ddst(q->flow_mode))
127 - host_load = max(host_load, dsthost->dsthost_refcnt);
128 -
129 - WARN_ON(host_load > CAKE_QUEUES);
130 -
131 /* flow isolation (DRR++) */
132 if (flow->deficit <= 0) {
133 - /* The shifted prandom_u32() is a way to apply dithering to
134 - * avoid accumulating roundoff errors
135 - */
136 - flow->deficit += (b->flow_quantum * quantum_div[host_load] +
137 - (prandom_u32() >> 16)) >> 16;
138 - list_move_tail(&flow->flowchain, &b->old_flows);
139 -
140 /* Keep all flows with deficits out of the sparse and decaying
141 * rotations. No non-empty flow can go into the decaying
142 * rotation, so they can't get deficits
143 @@ -1999,6 +1998,13 @@ retry:
144 if (flow->head) {
145 b->sparse_flow_count--;
146 b->bulk_flow_count++;
147 +
148 + if (cake_dsrc(q->flow_mode))
149 + srchost->srchost_bulk_flow_count++;
150 +
151 + if (cake_ddst(q->flow_mode))
152 + dsthost->dsthost_bulk_flow_count++;
153 +
154 flow->set = CAKE_SET_BULK;
155 } else {
156 /* we've moved it to the bulk rotation for
157 @@ -2008,6 +2014,22 @@ retry:
158 flow->set = CAKE_SET_SPARSE_WAIT;
159 }
160 }
161 +
162 + if (cake_dsrc(q->flow_mode))
163 + host_load = max(host_load, srchost->srchost_bulk_flow_count);
164 +
165 + if (cake_ddst(q->flow_mode))
166 + host_load = max(host_load, dsthost->dsthost_bulk_flow_count);
167 +
168 + WARN_ON(host_load > CAKE_QUEUES);
169 +
170 + /* The shifted prandom_u32() is a way to apply dithering to
171 + * avoid accumulating roundoff errors
172 + */
173 + flow->deficit += (b->flow_quantum * quantum_div[host_load] +
174 + (prandom_u32() >> 16)) >> 16;
175 + list_move_tail(&flow->flowchain, &b->old_flows);
176 +
177 goto retry;
178 }
179
180 @@ -2028,6 +2050,13 @@ retry:
181 &b->decaying_flows);
182 if (flow->set == CAKE_SET_BULK) {
183 b->bulk_flow_count--;
184 +
185 + if (cake_dsrc(q->flow_mode))
186 + srchost->srchost_bulk_flow_count--;
187 +
188 + if (cake_ddst(q->flow_mode))
189 + dsthost->dsthost_bulk_flow_count--;
190 +
191 b->decaying_flow_count++;
192 } else if (flow->set == CAKE_SET_SPARSE ||
193 flow->set == CAKE_SET_SPARSE_WAIT) {
194 @@ -2041,14 +2070,19 @@ retry:
195 if (flow->set == CAKE_SET_SPARSE ||
196 flow->set == CAKE_SET_SPARSE_WAIT)
197 b->sparse_flow_count--;
198 - else if (flow->set == CAKE_SET_BULK)
199 + else if (flow->set == CAKE_SET_BULK) {
200 b->bulk_flow_count--;
201 - else
202 +
203 + if (cake_dsrc(q->flow_mode))
204 + srchost->srchost_bulk_flow_count--;
205 +
206 + if (cake_ddst(q->flow_mode))
207 + dsthost->dsthost_bulk_flow_count--;
208 +
209 + } else
210 b->decaying_flow_count--;
211
212 flow->set = CAKE_SET_NONE;
213 - srchost->srchost_refcnt--;
214 - dsthost->dsthost_refcnt--;
215 }
216 goto begin;
217 }