9d71a741b25d5cf4e97ec68de4028d60a749e399
[openwrt/openwrt.git] / target / linux / generic / hack-5.15 / 600-bridge_offload.patch
1 From 11c3fae5afa6cac444d12622e2cf5af60a99c1ef Mon Sep 17 00:00:00 2001
2 From: OpenWrt community <openwrt-devel@lists.openwrt.org>
3 Date: Wed, 13 Jul 2022 13:43:15 +0200
4 Subject: [PATCH] net/bridge: add bridge offload
5
6 ---
7 include/linux/if_bridge.h | 1 +
8 net/bridge/Makefile | 2 +-
9 net/bridge/br.c | 8 +
10 net/bridge/br_device.c | 2 +
11 net/bridge/br_fdb.c | 5 +
12 net/bridge/br_forward.c | 3 +
13 net/bridge/br_if.c | 6 +-
14 net/bridge/br_input.c | 5 +
15 net/bridge/br_offload.c | 438 ++++++++++++++++++++++++++++++++
16 net/bridge/br_private.h | 22 +-
17 net/bridge/br_private_offload.h | 23 ++
18 net/bridge/br_stp.c | 3 +
19 net/bridge/br_sysfs_br.c | 35 +++
20 net/bridge/br_sysfs_if.c | 2 +
21 net/bridge/br_vlan_tunnel.c | 3 +
22 15 files changed, 555 insertions(+), 3 deletions(-)
23 create mode 100644 net/bridge/br_offload.c
24 create mode 100644 net/bridge/br_private_offload.h
25
26 --- a/include/linux/if_bridge.h
27 +++ b/include/linux/if_bridge.h
28 @@ -59,6 +59,7 @@ struct br_ip_list {
29 #define BR_MRP_LOST_IN_CONT BIT(19)
30 #define BR_TX_FWD_OFFLOAD BIT(20)
31 #define BR_BPDU_FILTER BIT(21)
32 +#define BR_OFFLOAD BIT(22)
33
34 #define BR_DEFAULT_AGEING_TIME (300 * HZ)
35
36 --- a/net/bridge/Makefile
37 +++ b/net/bridge/Makefile
38 @@ -5,7 +5,7 @@
39
40 obj-$(CONFIG_BRIDGE) += bridge.o
41
42 -bridge-y := br.o br_device.o br_fdb.o br_forward.o br_if.o br_input.o \
43 +bridge-y := br.o br_device.o br_fdb.o br_forward.o br_if.o br_input.o br_offload.o \
44 br_ioctl.o br_stp.o br_stp_bpdu.o \
45 br_stp_if.o br_stp_timer.o br_netlink.o \
46 br_netlink_tunnel.o br_arp_nd_proxy.o
47 --- a/net/bridge/br.c
48 +++ b/net/bridge/br.c
49 @@ -18,6 +18,7 @@
50 #include <net/switchdev.h>
51
52 #include "br_private.h"
53 +#include "br_private_offload.h"
54
55 /*
56 * Handle changes in state of network devices enslaved to a bridge.
57 @@ -381,6 +382,10 @@ static int __init br_init(void)
58 if (err)
59 goto err_out;
60
61 + err = br_offload_init();
62 + if (err)
63 + goto err_out0;
64 +
65 err = register_pernet_subsys(&br_net_ops);
66 if (err)
67 goto err_out1;
68 @@ -430,6 +435,8 @@ err_out3:
69 err_out2:
70 unregister_pernet_subsys(&br_net_ops);
71 err_out1:
72 + br_offload_fini();
73 +err_out0:
74 br_fdb_fini();
75 err_out:
76 stp_proto_unregister(&br_stp_proto);
77 @@ -452,6 +459,7 @@ static void __exit br_deinit(void)
78 #if IS_ENABLED(CONFIG_ATM_LANE)
79 br_fdb_test_addr_hook = NULL;
80 #endif
81 + br_offload_fini();
82 br_fdb_fini();
83 }
84
85 --- a/net/bridge/br_device.c
86 +++ b/net/bridge/br_device.c
87 @@ -524,6 +524,8 @@ void br_dev_setup(struct net_device *dev
88 br->bridge_hello_time = br->hello_time = 2 * HZ;
89 br->bridge_forward_delay = br->forward_delay = 15 * HZ;
90 br->bridge_ageing_time = br->ageing_time = BR_DEFAULT_AGEING_TIME;
91 + br->offload_cache_size = 128;
92 + br->offload_cache_reserved = 8;
93 dev->max_mtu = ETH_MAX_MTU;
94
95 br_netfilter_rtable_init(br);
96 --- a/net/bridge/br_fdb.c
97 +++ b/net/bridge/br_fdb.c
98 @@ -23,6 +23,7 @@
99 #include <net/switchdev.h>
100 #include <trace/events/bridge.h>
101 #include "br_private.h"
102 +#include "br_private_offload.h"
103
104 static const struct rhashtable_params br_fdb_rht_params = {
105 .head_offset = offsetof(struct net_bridge_fdb_entry, rhnode),
106 @@ -518,6 +519,8 @@ static struct net_bridge_fdb_entry *fdb_
107 fdb->key.vlan_id = vid;
108 fdb->flags = flags;
109 fdb->updated = fdb->used = jiffies;
110 + INIT_HLIST_HEAD(&fdb->offload_in);
111 + INIT_HLIST_HEAD(&fdb->offload_out);
112 if (rhashtable_lookup_insert_fast(&br->fdb_hash_tbl,
113 &fdb->rhnode,
114 br_fdb_rht_params)) {
115 @@ -794,6 +797,8 @@ static void fdb_notify(struct net_bridge
116 struct sk_buff *skb;
117 int err = -ENOBUFS;
118
119 + br_offload_fdb_update(fdb);
120 +
121 if (swdev_notify)
122 br_switchdev_fdb_notify(br, fdb, type);
123
124 --- a/net/bridge/br_forward.c
125 +++ b/net/bridge/br_forward.c
126 @@ -16,6 +16,7 @@
127 #include <linux/if_vlan.h>
128 #include <linux/netfilter_bridge.h>
129 #include "br_private.h"
130 +#include "br_private_offload.h"
131
132 /* Don't forward packets to originating port or forwarding disabled */
133 static inline int should_deliver(const struct net_bridge_port *p,
134 @@ -32,6 +33,8 @@ static inline int should_deliver(const s
135
136 int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
137 {
138 + br_offload_output(skb);
139 +
140 skb_push(skb, ETH_HLEN);
141 if (!is_skb_forwardable(skb->dev, skb))
142 goto drop;
143 --- a/net/bridge/br_if.c
144 +++ b/net/bridge/br_if.c
145 @@ -25,6 +25,7 @@
146 #include <net/net_namespace.h>
147
148 #include "br_private.h"
149 +#include "br_private_offload.h"
150
151 /*
152 * Determine initial path cost based on speed.
153 @@ -428,7 +429,7 @@ static struct net_bridge_port *new_nbp(s
154 p->path_cost = port_cost(dev);
155 p->priority = 0x8000 >> BR_PORT_BITS;
156 p->port_no = index;
157 - p->flags = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
158 + p->flags = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD | BR_OFFLOAD;
159 br_init_port(p);
160 br_set_state(p, BR_STATE_DISABLED);
161 br_stp_port_timer_init(p);
162 @@ -771,6 +772,9 @@ void br_port_flags_change(struct net_bri
163
164 if (mask & BR_NEIGH_SUPPRESS)
165 br_recalculate_neigh_suppress_enabled(br);
166 +
167 + if (mask & BR_OFFLOAD)
168 + br_offload_port_state(p);
169 }
170
171 bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag)
172 --- a/net/bridge/br_input.c
173 +++ b/net/bridge/br_input.c
174 @@ -22,6 +22,7 @@
175 #include <linux/rculist.h>
176 #include "br_private.h"
177 #include "br_private_tunnel.h"
178 +#include "br_private_offload.h"
179
180 static int
181 br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb)
182 @@ -171,6 +172,7 @@ int br_handle_frame_finish(struct net *n
183 dst->used = now;
184 br_forward(dst->dst, skb, local_rcv, false);
185 } else {
186 + br_offload_skb_disable(skb);
187 if (!mcast_hit)
188 br_flood(br, skb, pkt_type, local_rcv, false);
189 else
190 @@ -304,6 +306,9 @@ static rx_handler_result_t br_handle_fra
191 memset(skb->cb, 0, sizeof(struct br_input_skb_cb));
192
193 p = br_port_get_rcu(skb->dev);
194 + if (br_offload_input(p, skb))
195 + return RX_HANDLER_CONSUMED;
196 +
197 if (p->flags & BR_VLAN_TUNNEL)
198 br_handle_ingress_vlan_tunnel(skb, p, nbp_vlan_group_rcu(p));
199
200 --- /dev/null
201 +++ b/net/bridge/br_offload.c
202 @@ -0,0 +1,438 @@
203 +// SPDX-License-Identifier: GPL-2.0-only
204 +#include <linux/kernel.h>
205 +#include <linux/workqueue.h>
206 +#include "br_private.h"
207 +#include "br_private_offload.h"
208 +
209 +static DEFINE_SPINLOCK(offload_lock);
210 +
211 +struct bridge_flow_key {
212 + u8 dest[ETH_ALEN];
213 + u8 src[ETH_ALEN];
214 +#ifdef CONFIG_BRIDGE_VLAN_FILTERING
215 + u16 vlan_tag;
216 + bool vlan_present;
217 +#endif
218 +};
219 +
220 +struct bridge_flow {
221 + struct net_bridge_port *port;
222 + struct rhash_head node;
223 + struct bridge_flow_key key;
224 +#ifdef CONFIG_BRIDGE_VLAN_FILTERING
225 + bool vlan_out_present;
226 + u16 vlan_out;
227 +#endif
228 +
229 + unsigned long used;
230 + struct net_bridge_fdb_entry *fdb_in, *fdb_out;
231 + struct hlist_node fdb_list_in, fdb_list_out;
232 +
233 + struct rcu_head rcu;
234 +};
235 +
236 +static const struct rhashtable_params flow_params = {
237 + .automatic_shrinking = true,
238 + .head_offset = offsetof(struct bridge_flow, node),
239 + .key_len = sizeof(struct bridge_flow_key),
240 + .key_offset = offsetof(struct bridge_flow, key),
241 +};
242 +
243 +static struct kmem_cache *offload_cache __read_mostly;
244 +
245 +static void
246 +flow_rcu_free(struct rcu_head *head)
247 +{
248 + struct bridge_flow *flow;
249 +
250 + flow = container_of(head, struct bridge_flow, rcu);
251 + kmem_cache_free(offload_cache, flow);
252 +}
253 +
254 +static void
255 +__br_offload_flow_free(struct bridge_flow *flow)
256 +{
257 + flow->used = 0;
258 + hlist_del(&flow->fdb_list_in);
259 + hlist_del(&flow->fdb_list_out);
260 +
261 + call_rcu(&flow->rcu, flow_rcu_free);
262 +}
263 +
264 +static void
265 +br_offload_flow_free(struct bridge_flow *flow)
266 +{
267 + if (rhashtable_remove_fast(&flow->port->offload.rht, &flow->node,
268 + flow_params) != 0)
269 + return;
270 +
271 + __br_offload_flow_free(flow);
272 +}
273 +
274 +static bool
275 +br_offload_flow_fdb_refresh_time(struct bridge_flow *flow,
276 + struct net_bridge_fdb_entry *fdb)
277 +{
278 + if (!time_after(flow->used, fdb->updated))
279 + return false;
280 +
281 + fdb->updated = flow->used;
282 +
283 + return true;
284 +}
285 +
286 +
287 +static void
288 +br_offload_flow_refresh_time(struct bridge_flow *flow)
289 +{
290 + br_offload_flow_fdb_refresh_time(flow, flow->fdb_in);
291 + br_offload_flow_fdb_refresh_time(flow, flow->fdb_out);
292 +}
293 +
294 +static void
295 +br_offload_destroy_cb(void *ptr, void *arg)
296 +{
297 + struct bridge_flow *flow = ptr;
298 +
299 + __br_offload_flow_free(flow);
300 +}
301 +
302 +static bool
303 +br_offload_need_gc(struct net_bridge_port *p)
304 +{
305 + return (atomic_read(&p->offload.rht.nelems) +
306 + p->br->offload_cache_reserved) >= p->br->offload_cache_size;
307 +}
308 +
309 +static void
310 +br_offload_gc_work(struct work_struct *work)
311 +{
312 + struct rhashtable_iter hti;
313 + struct net_bridge_port *p;
314 + struct bridge_flow *gc_flow = NULL;
315 + struct bridge_flow *flow;
316 + unsigned long gc_used;
317 +
318 + p = container_of(work, struct net_bridge_port, offload.gc_work);
319 +
320 + if (!br_offload_need_gc(p))
321 + return;
322 +
323 + rhashtable_walk_enter(&p->offload.rht, &hti);
324 + rhashtable_walk_start(&hti);
325 + while ((flow = rhashtable_walk_next(&hti)) != NULL) {
326 + unsigned long used;
327 +
328 + if (IS_ERR(flow))
329 + continue;
330 +
331 + used = READ_ONCE(flow->used);
332 + if (!used)
333 + continue;
334 +
335 + if (gc_flow && !time_before(used, gc_used))
336 + continue;
337 +
338 + gc_flow = flow;
339 + gc_used = used;
340 + }
341 + rhashtable_walk_stop(&hti);
342 + rhashtable_walk_exit(&hti);
343 +
344 + if (!gc_flow)
345 + return;
346 +
347 + spin_lock_bh(&offload_lock);
348 + if (br_offload_need_gc(p) && gc_flow &&
349 + gc_flow->used == gc_used)
350 + br_offload_flow_free(gc_flow);
351 + if (p->offload.enabled && br_offload_need_gc(p))
352 + queue_work(system_long_wq, work);
353 + spin_unlock_bh(&offload_lock);
354 +
355 +}
356 +
357 +void br_offload_port_state(struct net_bridge_port *p)
358 +{
359 + struct net_bridge_port_offload *o = &p->offload;
360 + bool enabled = true;
361 + bool flush = false;
362 +
363 + if (p->state != BR_STATE_FORWARDING ||
364 + !(p->flags & BR_OFFLOAD))
365 + enabled = false;
366 +
367 + spin_lock_bh(&offload_lock);
368 + if (o->enabled == enabled)
369 + goto out;
370 +
371 + if (enabled) {
372 + if (!o->gc_work.func)
373 + INIT_WORK(&o->gc_work, br_offload_gc_work);
374 + rhashtable_init(&o->rht, &flow_params);
375 + } else {
376 + flush = true;
377 + rhashtable_free_and_destroy(&o->rht, br_offload_destroy_cb, o);
378 + }
379 +
380 + o->enabled = enabled;
381 +
382 +out:
383 + spin_unlock_bh(&offload_lock);
384 +
385 + if (flush)
386 + flush_work(&o->gc_work);
387 +}
388 +
389 +void br_offload_fdb_update(const struct net_bridge_fdb_entry *fdb)
390 +{
391 + struct bridge_flow *f;
392 + struct hlist_node *tmp;
393 +
394 + spin_lock_bh(&offload_lock);
395 +
396 + hlist_for_each_entry_safe(f, tmp, &fdb->offload_in, fdb_list_in)
397 + br_offload_flow_free(f);
398 +
399 + hlist_for_each_entry_safe(f, tmp, &fdb->offload_out, fdb_list_out)
400 + br_offload_flow_free(f);
401 +
402 + spin_unlock_bh(&offload_lock);
403 +}
404 +
405 +static void
406 +br_offload_prepare_key(struct net_bridge_port *p, struct bridge_flow_key *key,
407 + struct sk_buff *skb)
408 +{
409 + memset(key, 0, sizeof(*key));
410 + memcpy(key, eth_hdr(skb), 2 * ETH_ALEN);
411 +#ifdef CONFIG_BRIDGE_VLAN_FILTERING
412 + if (!br_opt_get(p->br, BROPT_VLAN_ENABLED))
413 + return;
414 +
415 + if (!skb_vlan_tag_present(skb) || skb->vlan_proto != p->br->vlan_proto)
416 + return;
417 +
418 + key->vlan_present = true;
419 + key->vlan_tag = skb_vlan_tag_get_id(skb);
420 +#endif
421 +}
422 +
423 +void br_offload_output(struct sk_buff *skb)
424 +{
425 + struct net_bridge_port_offload *o;
426 + struct br_input_skb_cb *cb = (struct br_input_skb_cb *)skb->cb;
427 + struct net_bridge_port *p, *inp;
428 + struct net_device *dev;
429 + struct net_bridge_fdb_entry *fdb_in, *fdb_out;
430 + struct net_bridge_vlan_group *vg;
431 + struct bridge_flow_key key;
432 + struct bridge_flow *flow;
433 + u16 vlan;
434 +
435 + if (!cb->offload)
436 + return;
437 +
438 + rcu_read_lock();
439 +
440 + p = br_port_get_rcu(skb->dev);
441 + if (!p)
442 + goto out;
443 +
444 + o = &p->offload;
445 + if (!o->enabled)
446 + goto out;
447 +
448 + if (atomic_read(&p->offload.rht.nelems) >= p->br->offload_cache_size)
449 + goto out;
450 +
451 + dev = dev_get_by_index_rcu(dev_net(p->br->dev), cb->input_ifindex);
452 + if (!dev)
453 + goto out;
454 +
455 + inp = br_port_get_rcu(dev);
456 + if (!inp)
457 + goto out;
458 +
459 + vg = nbp_vlan_group_rcu(inp);
460 + vlan = cb->input_vlan_present ? cb->input_vlan_tag : br_get_pvid(vg);
461 + fdb_in = br_fdb_find_rcu(p->br, eth_hdr(skb)->h_source, vlan);
462 + if (!fdb_in || !fdb_in->dst)
463 + goto out;
464 +
465 + vg = nbp_vlan_group_rcu(p);
466 + vlan = skb_vlan_tag_present(skb) ? skb_vlan_tag_get_id(skb) : br_get_pvid(vg);
467 + fdb_out = br_fdb_find_rcu(p->br, eth_hdr(skb)->h_dest, vlan);
468 + if (!fdb_out || !fdb_out->dst)
469 + goto out;
470 +
471 + br_offload_prepare_key(p, &key, skb);
472 +#ifdef CONFIG_BRIDGE_VLAN_FILTERING
473 + key.vlan_present = cb->input_vlan_present;
474 + key.vlan_tag = cb->input_vlan_tag;
475 +#endif
476 +
477 + flow = kmem_cache_alloc(offload_cache, GFP_ATOMIC);
478 + flow->port = inp;
479 + memcpy(&flow->key, &key, sizeof(key));
480 +
481 +#ifdef CONFIG_BRIDGE_VLAN_FILTERING
482 + flow->vlan_out_present = skb_vlan_tag_present(skb);
483 + flow->vlan_out = skb_vlan_tag_get(skb);
484 +#endif
485 +
486 + flow->fdb_in = fdb_in;
487 + flow->fdb_out = fdb_out;
488 + flow->used = jiffies;
489 +
490 + spin_lock_bh(&offload_lock);
491 + if (!o->enabled ||
492 + atomic_read(&p->offload.rht.nelems) >= p->br->offload_cache_size ||
493 + rhashtable_insert_fast(&inp->offload.rht, &flow->node, flow_params)) {
494 + kmem_cache_free(offload_cache, flow);
495 + goto out_unlock;
496 + }
497 +
498 + hlist_add_head(&flow->fdb_list_in, &fdb_in->offload_in);
499 + hlist_add_head(&flow->fdb_list_out, &fdb_out->offload_out);
500 +
501 + if (br_offload_need_gc(p))
502 + queue_work(system_long_wq, &p->offload.gc_work);
503 +
504 +out_unlock:
505 + spin_unlock_bh(&offload_lock);
506 +
507 +out:
508 + rcu_read_unlock();
509 +}
510 +
511 +bool br_offload_input(struct net_bridge_port *p, struct sk_buff *skb)
512 +{
513 + struct net_bridge_port_offload *o = &p->offload;
514 + struct br_input_skb_cb *cb = (struct br_input_skb_cb *)skb->cb;
515 + struct bridge_flow_key key;
516 + struct net_bridge_port *dst;
517 + struct bridge_flow *flow;
518 + unsigned long now = jiffies;
519 + bool ret = false;
520 +
521 + if (skb->len < sizeof(key))
522 + return false;
523 +
524 + if (!o->enabled)
525 + return false;
526 +
527 + if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
528 + return false;
529 +
530 + br_offload_prepare_key(p, &key, skb);
531 +
532 + rcu_read_lock();
533 + flow = rhashtable_lookup(&o->rht, &key, flow_params);
534 + if (!flow) {
535 + cb->offload = 1;
536 +#ifdef CONFIG_BRIDGE_VLAN_FILTERING
537 + cb->input_vlan_present = key.vlan_present != 0;
538 + cb->input_vlan_tag = key.vlan_tag;
539 +#endif
540 + cb->input_ifindex = p->dev->ifindex;
541 + goto out;
542 + }
543 +
544 + if (flow->fdb_in->dst != p)
545 + goto out;
546 +
547 + dst = flow->fdb_out->dst;
548 + if (!dst)
549 + goto out;
550 +
551 + ret = true;
552 +#ifdef CONFIG_BRIDGE_VLAN_FILTERING
553 + if (!flow->vlan_out_present && key.vlan_present) {
554 + __vlan_hwaccel_clear_tag(skb);
555 + } else if (flow->vlan_out_present) {
556 + if (skb_vlan_tag_present(skb) &&
557 + skb->vlan_proto != p->br->vlan_proto) {
558 + /* Protocol-mismatch, empty out vlan_tci for new tag */
559 + skb_push(skb, ETH_HLEN);
560 + skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
561 + skb_vlan_tag_get(skb));
562 + if (unlikely(!skb))
563 + goto out;
564 +
565 + skb_pull(skb, ETH_HLEN);
566 + skb_reset_mac_len(skb);
567 + }
568 +
569 + __vlan_hwaccel_put_tag(skb, p->br->vlan_proto,
570 + flow->vlan_out);
571 + }
572 +#endif
573 +
574 + skb->dev = dst->dev;
575 + skb_push(skb, ETH_HLEN);
576 +
577 + if (skb_warn_if_lro(skb) || !is_skb_forwardable(skb->dev, skb)) {
578 + kfree_skb(skb);
579 + goto out;
580 + }
581 +
582 + if (now - flow->used >= HZ) {
583 + flow->used = now;
584 + br_offload_flow_refresh_time(flow);
585 + }
586 +
587 + skb_forward_csum(skb);
588 + dev_queue_xmit(skb);
589 +
590 +out:
591 + rcu_read_unlock();
592 + return ret;
593 +}
594 +
595 +static void
596 +br_offload_check_gc(struct net_bridge *br)
597 +{
598 + struct net_bridge_port *p;
599 +
600 + spin_lock_bh(&br->lock);
601 + list_for_each_entry(p, &br->port_list, list)
602 + if (br_offload_need_gc(p))
603 + queue_work(system_long_wq, &p->offload.gc_work);
604 + spin_unlock_bh(&br->lock);
605 +}
606 +
607 +
608 +int br_offload_set_cache_size(struct net_bridge *br, unsigned long val,
609 + struct netlink_ext_ack *extack)
610 +{
611 + br->offload_cache_size = val;
612 + br_offload_check_gc(br);
613 +
614 + return 0;
615 +}
616 +
617 +int br_offload_set_cache_reserved(struct net_bridge *br, unsigned long val,
618 + struct netlink_ext_ack *extack)
619 +{
620 + br->offload_cache_reserved = val;
621 + br_offload_check_gc(br);
622 +
623 + return 0;
624 +}
625 +
626 +int __init br_offload_init(void)
627 +{
628 + offload_cache = kmem_cache_create("bridge_offload_cache",
629 + sizeof(struct bridge_flow),
630 + 0, SLAB_HWCACHE_ALIGN, NULL);
631 + if (!offload_cache)
632 + return -ENOMEM;
633 +
634 + return 0;
635 +}
636 +
637 +void br_offload_fini(void)
638 +{
639 + kmem_cache_destroy(offload_cache);
640 +}
641 --- a/net/bridge/br_private.h
642 +++ b/net/bridge/br_private.h
643 @@ -268,7 +268,13 @@ struct net_bridge_fdb_entry {
644 unsigned long updated ____cacheline_aligned_in_smp;
645 unsigned long used;
646
647 - struct rcu_head rcu;
648 + union {
649 + struct {
650 + struct hlist_head offload_in;
651 + struct hlist_head offload_out;
652 + };
653 + struct rcu_head rcu;
654 + };
655 };
656
657 #define MDB_PG_FLAGS_PERMANENT BIT(0)
658 @@ -343,6 +349,12 @@ struct net_bridge_mdb_entry {
659 struct rcu_head rcu;
660 };
661
662 +struct net_bridge_port_offload {
663 + struct rhashtable rht;
664 + struct work_struct gc_work;
665 + bool enabled;
666 +};
667 +
668 struct net_bridge_port {
669 struct net_bridge *br;
670 struct net_device *dev;
671 @@ -403,6 +415,7 @@ struct net_bridge_port {
672 u16 backup_redirected_cnt;
673
674 struct bridge_stp_xstats stp_xstats;
675 + struct net_bridge_port_offload offload;
676 };
677
678 #define kobj_to_brport(obj) container_of(obj, struct net_bridge_port, kobj)
679 @@ -519,6 +532,9 @@ struct net_bridge {
680 struct kobject *ifobj;
681 u32 auto_cnt;
682
683 + u32 offload_cache_size;
684 + u32 offload_cache_reserved;
685 +
686 #ifdef CONFIG_NET_SWITCHDEV
687 /* Counter used to make sure that hardware domains get unique
688 * identifiers in case a bridge spans multiple switchdev instances.
689 @@ -553,6 +569,10 @@ struct br_input_skb_cb {
690 #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
691 u8 br_netfilter_broute:1;
692 #endif
693 + u8 offload:1;
694 + u8 input_vlan_present:1;
695 + u16 input_vlan_tag;
696 + int input_ifindex;
697
698 #ifdef CONFIG_NET_SWITCHDEV
699 /* Set if TX data plane offloading is used towards at least one
700 --- /dev/null
701 +++ b/net/bridge/br_private_offload.h
702 @@ -0,0 +1,23 @@
703 +#ifndef __BR_OFFLOAD_H
704 +#define __BR_OFFLOAD_H
705 +
706 +bool br_offload_input(struct net_bridge_port *p, struct sk_buff *skb);
707 +void br_offload_output(struct sk_buff *skb);
708 +void br_offload_port_state(struct net_bridge_port *p);
709 +void br_offload_fdb_update(const struct net_bridge_fdb_entry *fdb);
710 +int br_offload_init(void);
711 +void br_offload_fini(void);
712 +int br_offload_set_cache_size(struct net_bridge *br, unsigned long val,
713 + struct netlink_ext_ack *extack);
714 +int br_offload_set_cache_reserved(struct net_bridge *br, unsigned long val,
715 + struct netlink_ext_ack *extack);
716 +
717 +static inline void br_offload_skb_disable(struct sk_buff *skb)
718 +{
719 + struct br_input_skb_cb *cb = (struct br_input_skb_cb *)skb->cb;
720 +
721 + if (cb->offload)
722 + cb->offload = 0;
723 +}
724 +
725 +#endif
726 --- a/net/bridge/br_stp.c
727 +++ b/net/bridge/br_stp.c
728 @@ -12,6 +12,7 @@
729
730 #include "br_private.h"
731 #include "br_private_stp.h"
732 +#include "br_private_offload.h"
733
734 /* since time values in bpdu are in jiffies and then scaled (1/256)
735 * before sending, make sure that is at least one STP tick.
736 @@ -52,6 +53,8 @@ void br_set_state(struct net_bridge_port
737 (unsigned int) p->port_no, p->dev->name,
738 br_port_state_names[p->state]);
739
740 + br_offload_port_state(p);
741 +
742 if (p->br->stp_enabled == BR_KERNEL_STP) {
743 switch (p->state) {
744 case BR_STATE_BLOCKING:
745 --- a/net/bridge/br_sysfs_br.c
746 +++ b/net/bridge/br_sysfs_br.c
747 @@ -18,6 +18,7 @@
748 #include <linux/sched/signal.h>
749
750 #include "br_private.h"
751 +#include "br_private_offload.h"
752
753 /* IMPORTANT: new bridge options must be added with netlink support only
754 * please do not add new sysfs entries
755 @@ -930,6 +931,38 @@ static ssize_t vlan_stats_per_port_store
756 static DEVICE_ATTR_RW(vlan_stats_per_port);
757 #endif
758
759 +static ssize_t offload_cache_size_show(struct device *d,
760 + struct device_attribute *attr,
761 + char *buf)
762 +{
763 + struct net_bridge *br = to_bridge(d);
764 + return sprintf(buf, "%u\n", br->offload_cache_size);
765 +}
766 +
767 +static ssize_t offload_cache_size_store(struct device *d,
768 + struct device_attribute *attr,
769 + const char *buf, size_t len)
770 +{
771 + return store_bridge_parm(d, buf, len, br_offload_set_cache_size);
772 +}
773 +static DEVICE_ATTR_RW(offload_cache_size);
774 +
775 +static ssize_t offload_cache_reserved_show(struct device *d,
776 + struct device_attribute *attr,
777 + char *buf)
778 +{
779 + struct net_bridge *br = to_bridge(d);
780 + return sprintf(buf, "%u\n", br->offload_cache_reserved);
781 +}
782 +
783 +static ssize_t offload_cache_reserved_store(struct device *d,
784 + struct device_attribute *attr,
785 + const char *buf, size_t len)
786 +{
787 + return store_bridge_parm(d, buf, len, br_offload_set_cache_reserved);
788 +}
789 +static DEVICE_ATTR_RW(offload_cache_reserved);
790 +
791 static struct attribute *bridge_attrs[] = {
792 &dev_attr_forward_delay.attr,
793 &dev_attr_hello_time.attr,
794 @@ -984,6 +1017,8 @@ static struct attribute *bridge_attrs[]
795 &dev_attr_vlan_stats_enabled.attr,
796 &dev_attr_vlan_stats_per_port.attr,
797 #endif
798 + &dev_attr_offload_cache_size.attr,
799 + &dev_attr_offload_cache_reserved.attr,
800 NULL
801 };
802
803 --- a/net/bridge/br_sysfs_if.c
804 +++ b/net/bridge/br_sysfs_if.c
805 @@ -241,6 +241,7 @@ BRPORT_ATTR_FLAG(broadcast_flood, BR_BCA
806 BRPORT_ATTR_FLAG(neigh_suppress, BR_NEIGH_SUPPRESS);
807 BRPORT_ATTR_FLAG(isolated, BR_ISOLATED);
808 BRPORT_ATTR_FLAG(bpdu_filter, BR_BPDU_FILTER);
809 +BRPORT_ATTR_FLAG(offload, BR_OFFLOAD);
810
811 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
812 static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf)
813 @@ -295,6 +296,7 @@ static const struct brport_attribute *br
814 &brport_attr_isolated,
815 &brport_attr_bpdu_filter,
816 &brport_attr_backup_port,
817 + &brport_attr_offload,
818 NULL
819 };
820
821 --- a/net/bridge/br_vlan_tunnel.c
822 +++ b/net/bridge/br_vlan_tunnel.c
823 @@ -15,6 +15,7 @@
824
825 #include "br_private.h"
826 #include "br_private_tunnel.h"
827 +#include "br_private_offload.h"
828
829 static inline int br_vlan_tunid_cmp(struct rhashtable_compare_arg *arg,
830 const void *ptr)
831 @@ -180,6 +181,7 @@ void br_handle_ingress_vlan_tunnel(struc
832 skb_dst_drop(skb);
833
834 __vlan_hwaccel_put_tag(skb, p->br->vlan_proto, vlan->vid);
835 + br_offload_skb_disable(skb);
836 }
837
838 int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
839 @@ -201,6 +203,7 @@ int br_handle_egress_vlan_tunnel(struct
840 if (err)
841 return err;
842
843 + br_offload_skb_disable(skb);
844 tunnel_dst = rcu_dereference(vlan->tinfo.tunnel_dst);
845 if (tunnel_dst && dst_hold_safe(&tunnel_dst->dst))
846 skb_dst_set(skb, &tunnel_dst->dst);