gpio-nct5104d: fix compilation with kernel 6.6
[openwrt/openwrt.git] / target / linux / generic / backport-6.1 / 751-03-v6.4-net-ethernet-mtk_eth_soc-improve-keeping-track-of-of.patch
1 From: Felix Fietkau <nbd@nbd.name>
2 Date: Thu, 23 Mar 2023 10:24:11 +0100
3 Subject: [PATCH] net: ethernet: mtk_eth_soc: improve keeping track of
4 offloaded flows
5
6 Unify tracking of L2 and L3 flows. Use the generic list field in struct
7 mtk_foe_entry for tracking L2 subflows. Preparation for improving
8 flow accounting support.
9
10 Signed-off-by: Felix Fietkau <nbd@nbd.name>
11 ---
12 drivers/net/ethernet/mediatek/mtk_ppe.c | 162 ++++++++++++------------
13 drivers/net/ethernet/mediatek/mtk_ppe.h | 15 +--
14 2 files changed, 86 insertions(+), 91 deletions(-)
15
16 --- a/drivers/net/ethernet/mediatek/mtk_ppe.c
17 +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
18 @@ -476,42 +476,43 @@ int mtk_foe_entry_set_queue(struct mtk_e
19 return 0;
20 }
21
22 +static int
23 +mtk_flow_entry_match_len(struct mtk_eth *eth, struct mtk_foe_entry *entry)
24 +{
25 + int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
26 +
27 + if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
28 + return offsetof(struct mtk_foe_entry, ipv6._rsv);
29 + else
30 + return offsetof(struct mtk_foe_entry, ipv4.ib2);
31 +}
32 +
33 static bool
34 mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry,
35 - struct mtk_foe_entry *data)
36 + struct mtk_foe_entry *data, int len)
37 {
38 - int type, len;
39 -
40 if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
41 return false;
42
43 - type = mtk_get_ib1_pkt_type(eth, entry->data.ib1);
44 - if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
45 - len = offsetof(struct mtk_foe_entry, ipv6._rsv);
46 - else
47 - len = offsetof(struct mtk_foe_entry, ipv4.ib2);
48 -
49 return !memcmp(&entry->data.data, &data->data, len - 4);
50 }
51
52 static void
53 -__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
54 +__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
55 + bool set_state)
56 {
57 - struct hlist_head *head;
58 struct hlist_node *tmp;
59
60 if (entry->type == MTK_FLOW_TYPE_L2) {
61 rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node,
62 mtk_flow_l2_ht_params);
63
64 - head = &entry->l2_flows;
65 - hlist_for_each_entry_safe(entry, tmp, head, l2_data.list)
66 - __mtk_foe_entry_clear(ppe, entry);
67 + hlist_for_each_entry_safe(entry, tmp, &entry->l2_flows, l2_list)
68 + __mtk_foe_entry_clear(ppe, entry, set_state);
69 return;
70 }
71
72 - hlist_del_init(&entry->list);
73 - if (entry->hash != 0xffff) {
74 + if (entry->hash != 0xffff && set_state) {
75 struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash);
76
77 hwe->ib1 &= ~MTK_FOE_IB1_STATE;
78 @@ -531,7 +532,8 @@ __mtk_foe_entry_clear(struct mtk_ppe *pp
79 if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW)
80 return;
81
82 - hlist_del_init(&entry->l2_data.list);
83 + hlist_del_init(&entry->l2_list);
84 + hlist_del_init(&entry->list);
85 kfree(entry);
86 }
87
88 @@ -547,66 +549,55 @@ static int __mtk_foe_entry_idle_time(str
89 return now - timestamp;
90 }
91
92 +static bool
93 +mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
94 +{
95 + struct mtk_foe_entry foe = {};
96 + struct mtk_foe_entry *hwe;
97 + u16 hash = entry->hash;
98 + int len;
99 +
100 + if (hash == 0xffff)
101 + return false;
102 +
103 + hwe = mtk_foe_get_entry(ppe, hash);
104 + len = mtk_flow_entry_match_len(ppe->eth, &entry->data);
105 + memcpy(&foe, hwe, len);
106 +
107 + if (!mtk_flow_entry_match(ppe->eth, entry, &foe, len) ||
108 + FIELD_GET(MTK_FOE_IB1_STATE, foe.ib1) != MTK_FOE_STATE_BIND)
109 + return false;
110 +
111 + entry->data.ib1 = foe.ib1;
112 +
113 + return true;
114 +}
115 +
116 static void
117 mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
118 {
119 u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
120 struct mtk_flow_entry *cur;
121 - struct mtk_foe_entry *hwe;
122 struct hlist_node *tmp;
123 int idle;
124
125 idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
126 - hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) {
127 + hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_list) {
128 int cur_idle;
129 - u32 ib1;
130 -
131 - hwe = mtk_foe_get_entry(ppe, cur->hash);
132 - ib1 = READ_ONCE(hwe->ib1);
133
134 - if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
135 - cur->hash = 0xffff;
136 - __mtk_foe_entry_clear(ppe, cur);
137 + if (!mtk_flow_entry_update(ppe, cur)) {
138 + __mtk_foe_entry_clear(ppe, entry, false);
139 continue;
140 }
141
142 - cur_idle = __mtk_foe_entry_idle_time(ppe, ib1);
143 + cur_idle = __mtk_foe_entry_idle_time(ppe, cur->data.ib1);
144 if (cur_idle >= idle)
145 continue;
146
147 idle = cur_idle;
148 entry->data.ib1 &= ~ib1_ts_mask;
149 - entry->data.ib1 |= hwe->ib1 & ib1_ts_mask;
150 - }
151 -}
152 -
153 -static void
154 -mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
155 -{
156 - struct mtk_foe_entry foe = {};
157 - struct mtk_foe_entry *hwe;
158 -
159 - spin_lock_bh(&ppe_lock);
160 -
161 - if (entry->type == MTK_FLOW_TYPE_L2) {
162 - mtk_flow_entry_update_l2(ppe, entry);
163 - goto out;
164 + entry->data.ib1 |= cur->data.ib1 & ib1_ts_mask;
165 }
166 -
167 - if (entry->hash == 0xffff)
168 - goto out;
169 -
170 - hwe = mtk_foe_get_entry(ppe, entry->hash);
171 - memcpy(&foe, hwe, ppe->eth->soc->foe_entry_size);
172 - if (!mtk_flow_entry_match(ppe->eth, entry, &foe)) {
173 - entry->hash = 0xffff;
174 - goto out;
175 - }
176 -
177 - entry->data.ib1 = foe.ib1;
178 -
179 -out:
180 - spin_unlock_bh(&ppe_lock);
181 }
182
183 static void
184 @@ -649,7 +640,8 @@ __mtk_foe_entry_commit(struct mtk_ppe *p
185 void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
186 {
187 spin_lock_bh(&ppe_lock);
188 - __mtk_foe_entry_clear(ppe, entry);
189 + __mtk_foe_entry_clear(ppe, entry, true);
190 + hlist_del_init(&entry->list);
191 spin_unlock_bh(&ppe_lock);
192 }
193
194 @@ -696,8 +688,8 @@ mtk_foe_entry_commit_subflow(struct mtk_
195 {
196 const struct mtk_soc_data *soc = ppe->eth->soc;
197 struct mtk_flow_entry *flow_info;
198 - struct mtk_foe_entry foe = {}, *hwe;
199 struct mtk_foe_mac_info *l2;
200 + struct mtk_foe_entry *hwe;
201 u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP;
202 int type;
203
204 @@ -705,30 +697,30 @@ mtk_foe_entry_commit_subflow(struct mtk_
205 if (!flow_info)
206 return;
207
208 - flow_info->l2_data.base_flow = entry;
209 flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
210 flow_info->hash = hash;
211 hlist_add_head(&flow_info->list,
212 &ppe->foe_flow[hash / soc->hash_offset]);
213 - hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
214 + hlist_add_head(&flow_info->l2_list, &entry->l2_flows);
215
216 hwe = mtk_foe_get_entry(ppe, hash);
217 - memcpy(&foe, hwe, soc->foe_entry_size);
218 - foe.ib1 &= ib1_mask;
219 - foe.ib1 |= entry->data.ib1 & ~ib1_mask;
220 + memcpy(&flow_info->data, hwe, soc->foe_entry_size);
221 + flow_info->data.ib1 &= ib1_mask;
222 + flow_info->data.ib1 |= entry->data.ib1 & ~ib1_mask;
223
224 - l2 = mtk_foe_entry_l2(ppe->eth, &foe);
225 + l2 = mtk_foe_entry_l2(ppe->eth, &flow_info->data);
226 memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
227
228 - type = mtk_get_ib1_pkt_type(ppe->eth, foe.ib1);
229 + type = mtk_get_ib1_pkt_type(ppe->eth, flow_info->data.ib1);
230 if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
231 - memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
232 + memcpy(&flow_info->data.ipv4.new, &flow_info->data.ipv4.orig,
233 + sizeof(flow_info->data.ipv4.new));
234 else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
235 l2->etype = ETH_P_IPV6;
236
237 - *mtk_foe_entry_ib2(ppe->eth, &foe) = entry->data.bridge.ib2;
238 + *mtk_foe_entry_ib2(ppe->eth, &flow_info->data) = entry->data.bridge.ib2;
239
240 - __mtk_foe_entry_commit(ppe, &foe, hash);
241 + __mtk_foe_entry_commit(ppe, &flow_info->data, hash);
242 }
243
244 void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
245 @@ -738,9 +730,11 @@ void __mtk_ppe_check_skb(struct mtk_ppe
246 struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, hash);
247 struct mtk_flow_entry *entry;
248 struct mtk_foe_bridge key = {};
249 + struct mtk_foe_entry foe = {};
250 struct hlist_node *n;
251 struct ethhdr *eh;
252 bool found = false;
253 + int entry_len;
254 u8 *tag;
255
256 spin_lock_bh(&ppe_lock);
257 @@ -748,20 +742,14 @@ void __mtk_ppe_check_skb(struct mtk_ppe
258 if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
259 goto out;
260
261 - hlist_for_each_entry_safe(entry, n, head, list) {
262 - if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) {
263 - if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) ==
264 - MTK_FOE_STATE_BIND))
265 - continue;
266 -
267 - entry->hash = 0xffff;
268 - __mtk_foe_entry_clear(ppe, entry);
269 - continue;
270 - }
271 + entry_len = mtk_flow_entry_match_len(ppe->eth, hwe);
272 + memcpy(&foe, hwe, entry_len);
273
274 - if (found || !mtk_flow_entry_match(ppe->eth, entry, hwe)) {
275 + hlist_for_each_entry_safe(entry, n, head, list) {
276 + if (found ||
277 + !mtk_flow_entry_match(ppe->eth, entry, &foe, entry_len)) {
278 if (entry->hash != 0xffff)
279 - entry->hash = 0xffff;
280 + __mtk_foe_entry_clear(ppe, entry, false);
281 continue;
282 }
283
284 @@ -810,9 +798,17 @@ out:
285
286 int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
287 {
288 - mtk_flow_entry_update(ppe, entry);
289 + int idle;
290 +
291 + spin_lock_bh(&ppe_lock);
292 + if (entry->type == MTK_FLOW_TYPE_L2)
293 + mtk_flow_entry_update_l2(ppe, entry);
294 + else
295 + mtk_flow_entry_update(ppe, entry);
296 + idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
297 + spin_unlock_bh(&ppe_lock);
298
299 - return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
300 + return idle;
301 }
302
303 int mtk_ppe_prepare_reset(struct mtk_ppe *ppe)
304 --- a/drivers/net/ethernet/mediatek/mtk_ppe.h
305 +++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
306 @@ -286,7 +286,12 @@ enum {
307
308 struct mtk_flow_entry {
309 union {
310 - struct hlist_node list;
311 + /* regular flows + L2 subflows */
312 + struct {
313 + struct hlist_node list;
314 + struct hlist_node l2_list;
315 + };
316 + /* L2 flows */
317 struct {
318 struct rhash_head l2_node;
319 struct hlist_head l2_flows;
320 @@ -296,13 +301,7 @@ struct mtk_flow_entry {
321 s8 wed_index;
322 u8 ppe_index;
323 u16 hash;
324 - union {
325 - struct mtk_foe_entry data;
326 - struct {
327 - struct mtk_flow_entry *base_flow;
328 - struct hlist_node list;
329 - } l2_data;
330 - };
331 + struct mtk_foe_entry data;
332 struct rhash_head node;
333 unsigned long cookie;
334 };