kernel: improve mtk ppe flow accounting
[openwrt/staging/hauke.git] / target / linux / generic / pending-5.15 / 736-03-net-ethernet-mtk_eth_soc-improve-keeping-track-of-of.patch
1 From: Felix Fietkau <nbd@nbd.name>
2 Date: Thu, 23 Mar 2023 10:24:11 +0100
3 Subject: [PATCH] net: ethernet: mtk_eth_soc: improve keeping track of
4 offloaded flows
5
6 Unify tracking of L2 and L3 flows. Use the generic list field in struct
7 mtk_foe_entry for tracking L2 subflows. Preparation for improving
8 flow accounting support.
9
10 Signed-off-by: Felix Fietkau <nbd@nbd.name>
11 ---
12
13 --- a/drivers/net/ethernet/mediatek/mtk_ppe.c
14 +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
15 @@ -466,26 +466,30 @@ int mtk_foe_entry_set_queue(struct mtk_e
16 return 0;
17 }
18
19 +static int
20 +mtk_flow_entry_match_len(struct mtk_eth *eth, struct mtk_foe_entry *entry)
21 +{
22 + int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
23 +
24 + if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
25 + return offsetof(struct mtk_foe_entry, ipv6._rsv);
26 + else
27 + return offsetof(struct mtk_foe_entry, ipv4.ib2);
28 +}
29 +
30 static bool
31 mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry,
32 - struct mtk_foe_entry *data)
33 + struct mtk_foe_entry *data, int len)
34 {
35 - int type, len;
36 -
37 if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
38 return false;
39
40 - type = mtk_get_ib1_pkt_type(eth, entry->data.ib1);
41 - if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
42 - len = offsetof(struct mtk_foe_entry, ipv6._rsv);
43 - else
44 - len = offsetof(struct mtk_foe_entry, ipv4.ib2);
45 -
46 return !memcmp(&entry->data.data, &data->data, len - 4);
47 }
48
49 static void
50 -__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
51 +__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
52 + bool set_state)
53 {
54 struct hlist_head *head;
55 struct hlist_node *tmp;
56 @@ -495,13 +499,12 @@ __mtk_foe_entry_clear(struct mtk_ppe *pp
57 mtk_flow_l2_ht_params);
58
59 head = &entry->l2_flows;
60 - hlist_for_each_entry_safe(entry, tmp, head, l2_data.list)
61 - __mtk_foe_entry_clear(ppe, entry);
62 + hlist_for_each_entry_safe(entry, tmp, head, list)
63 + __mtk_foe_entry_clear(ppe, entry, set_state);
64 return;
65 }
66
67 - hlist_del_init(&entry->list);
68 - if (entry->hash != 0xffff) {
69 + if (entry->hash != 0xffff && set_state) {
70 struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash);
71
72 hwe->ib1 &= ~MTK_FOE_IB1_STATE;
73 @@ -520,7 +523,7 @@ __mtk_foe_entry_clear(struct mtk_ppe *pp
74 if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW)
75 return;
76
77 - hlist_del_init(&entry->l2_data.list);
78 + hlist_del_init(&entry->list);
79 kfree(entry);
80 }
81
82 @@ -536,66 +539,55 @@ static int __mtk_foe_entry_idle_time(str
83 return now - timestamp;
84 }
85
86 +static bool
87 +mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
88 +{
89 + struct mtk_foe_entry foe = {};
90 + struct mtk_foe_entry *hwe;
91 + u16 hash = entry->hash;
92 + int len;
93 +
94 + if (hash == 0xffff)
95 + return false;
96 +
97 + hwe = mtk_foe_get_entry(ppe, hash);
98 + len = mtk_flow_entry_match_len(ppe->eth, &entry->data);
99 + memcpy(&foe, hwe, len);
100 +
101 + if (!mtk_flow_entry_match(ppe->eth, entry, &foe, len) ||
102 + FIELD_GET(MTK_FOE_IB1_STATE, foe.ib1) != MTK_FOE_STATE_BIND)
103 + return false;
104 +
105 + entry->data.ib1 = foe.ib1;
106 +
107 + return true;
108 +}
109 +
110 static void
111 mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
112 {
113 u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
114 struct mtk_flow_entry *cur;
115 - struct mtk_foe_entry *hwe;
116 struct hlist_node *tmp;
117 int idle;
118
119 idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
120 - hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) {
121 + hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, list) {
122 int cur_idle;
123 - u32 ib1;
124 -
125 - hwe = mtk_foe_get_entry(ppe, cur->hash);
126 - ib1 = READ_ONCE(hwe->ib1);
127
128 - if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
129 - cur->hash = 0xffff;
130 - __mtk_foe_entry_clear(ppe, cur);
131 + if (!mtk_flow_entry_update(ppe, cur)) {
132 + __mtk_foe_entry_clear(ppe, entry, false);
133 continue;
134 }
135
136 - cur_idle = __mtk_foe_entry_idle_time(ppe, ib1);
137 + cur_idle = __mtk_foe_entry_idle_time(ppe, cur->data.ib1);
138 if (cur_idle >= idle)
139 continue;
140
141 idle = cur_idle;
142 entry->data.ib1 &= ~ib1_ts_mask;
143 - entry->data.ib1 |= hwe->ib1 & ib1_ts_mask;
144 - }
145 -}
146 -
147 -static void
148 -mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
149 -{
150 - struct mtk_foe_entry foe = {};
151 - struct mtk_foe_entry *hwe;
152 -
153 - spin_lock_bh(&ppe_lock);
154 -
155 - if (entry->type == MTK_FLOW_TYPE_L2) {
156 - mtk_flow_entry_update_l2(ppe, entry);
157 - goto out;
158 + entry->data.ib1 |= cur->data.ib1 & ib1_ts_mask;
159 }
160 -
161 - if (entry->hash == 0xffff)
162 - goto out;
163 -
164 - hwe = mtk_foe_get_entry(ppe, entry->hash);
165 - memcpy(&foe, hwe, ppe->eth->soc->foe_entry_size);
166 - if (!mtk_flow_entry_match(ppe->eth, entry, &foe)) {
167 - entry->hash = 0xffff;
168 - goto out;
169 - }
170 -
171 - entry->data.ib1 = foe.ib1;
172 -
173 -out:
174 - spin_unlock_bh(&ppe_lock);
175 }
176
177 static void
178 @@ -632,7 +624,8 @@ __mtk_foe_entry_commit(struct mtk_ppe *p
179 void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
180 {
181 spin_lock_bh(&ppe_lock);
182 - __mtk_foe_entry_clear(ppe, entry);
183 + __mtk_foe_entry_clear(ppe, entry, true);
184 + hlist_del_init(&entry->list);
185 spin_unlock_bh(&ppe_lock);
186 }
187
188 @@ -679,8 +672,8 @@ mtk_foe_entry_commit_subflow(struct mtk_
189 {
190 const struct mtk_soc_data *soc = ppe->eth->soc;
191 struct mtk_flow_entry *flow_info;
192 - struct mtk_foe_entry foe = {}, *hwe;
193 struct mtk_foe_mac_info *l2;
194 + struct mtk_foe_entry *hwe;
195 u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP;
196 int type;
197
198 @@ -688,30 +681,30 @@ mtk_foe_entry_commit_subflow(struct mtk_
199 if (!flow_info)
200 return;
201
202 - flow_info->l2_data.base_flow = entry;
203 flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
204 flow_info->hash = hash;
205 hlist_add_head(&flow_info->list,
206 &ppe->foe_flow[hash / soc->hash_offset]);
207 - hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
208 + hlist_add_head(&flow_info->list, &entry->l2_flows);
209
210 hwe = mtk_foe_get_entry(ppe, hash);
211 - memcpy(&foe, hwe, soc->foe_entry_size);
212 - foe.ib1 &= ib1_mask;
213 - foe.ib1 |= entry->data.ib1 & ~ib1_mask;
214 + memcpy(&flow_info->data, hwe, soc->foe_entry_size);
215 + flow_info->data.ib1 &= ib1_mask;
216 + flow_info->data.ib1 |= entry->data.ib1 & ~ib1_mask;
217
218 - l2 = mtk_foe_entry_l2(ppe->eth, &foe);
219 + l2 = mtk_foe_entry_l2(ppe->eth, &flow_info->data);
220 memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
221
222 - type = mtk_get_ib1_pkt_type(ppe->eth, foe.ib1);
223 + type = mtk_get_ib1_pkt_type(ppe->eth, flow_info->data.ib1);
224 if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
225 - memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
226 + memcpy(&flow_info->data.ipv4.new, &flow_info->data.ipv4.orig,
227 + sizeof(flow_info->data.ipv4.new));
228 else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
229 l2->etype = ETH_P_IPV6;
230
231 - *mtk_foe_entry_ib2(ppe->eth, &foe) = entry->data.bridge.ib2;
232 + *mtk_foe_entry_ib2(ppe->eth, &flow_info->data) = entry->data.bridge.ib2;
233
234 - __mtk_foe_entry_commit(ppe, &foe, hash);
235 + __mtk_foe_entry_commit(ppe, &flow_info->data, hash);
236 }
237
238 void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
239 @@ -721,9 +714,11 @@ void __mtk_ppe_check_skb(struct mtk_ppe
240 struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, hash);
241 struct mtk_flow_entry *entry;
242 struct mtk_foe_bridge key = {};
243 + struct mtk_foe_entry foe = {};
244 struct hlist_node *n;
245 struct ethhdr *eh;
246 bool found = false;
247 + int entry_len;
248 u8 *tag;
249
250 spin_lock_bh(&ppe_lock);
251 @@ -731,20 +726,14 @@ void __mtk_ppe_check_skb(struct mtk_ppe
252 if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
253 goto out;
254
255 - hlist_for_each_entry_safe(entry, n, head, list) {
256 - if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) {
257 - if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) ==
258 - MTK_FOE_STATE_BIND))
259 - continue;
260 -
261 - entry->hash = 0xffff;
262 - __mtk_foe_entry_clear(ppe, entry);
263 - continue;
264 - }
265 + entry_len = mtk_flow_entry_match_len(ppe->eth, hwe);
266 + memcpy(&foe, hwe, entry_len);
267
268 - if (found || !mtk_flow_entry_match(ppe->eth, entry, hwe)) {
269 + hlist_for_each_entry_safe(entry, n, head, list) {
270 + if (found ||
271 + !mtk_flow_entry_match(ppe->eth, entry, &foe, entry_len)) {
272 if (entry->hash != 0xffff)
273 - entry->hash = 0xffff;
274 + __mtk_foe_entry_clear(ppe, entry, false);
275 continue;
276 }
277
278 @@ -795,9 +784,17 @@ out:
279
280 int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
281 {
282 - mtk_flow_entry_update(ppe, entry);
283 + int idle;
284 +
285 + spin_lock_bh(&ppe_lock);
286 + if (entry->type == MTK_FLOW_TYPE_L2)
287 + mtk_flow_entry_update_l2(ppe, entry);
288 + else
289 + mtk_flow_entry_update(ppe, entry);
290 + idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
291 + spin_unlock_bh(&ppe_lock);
292
293 - return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
294 + return idle;
295 }
296
297 int mtk_ppe_prepare_reset(struct mtk_ppe *ppe)
298 --- a/drivers/net/ethernet/mediatek/mtk_ppe.h
299 +++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
300 @@ -275,13 +275,7 @@ struct mtk_flow_entry {
301 s8 wed_index;
302 u8 ppe_index;
303 u16 hash;
304 - union {
305 - struct mtk_foe_entry data;
306 - struct {
307 - struct mtk_flow_entry *base_flow;
308 - struct hlist_node list;
309 - } l2_data;
310 - };
311 + struct mtk_foe_entry data;
312 struct rhash_head node;
313 unsigned long cookie;
314 };