linux/generic/hack-5.15: add missing patch headers
[openwrt/staging/hauke.git] / target / linux / generic / hack-5.15 / 600-bridge_offload.patch
1 From 11c3fae5afa6cac444d12622e2cf5af60a99c1ef Mon Sep 17 00:00:00 2001
2 From: OpenWrt community <openwrt-devel@lists.openwrt.org>
3 Date: Wed, 13 Jul 2022 13:43:15 +0200
4 Subject: [PATCH] net/bridge: add bridge offload
5
6 ---
7 include/linux/if_bridge.h | 1 +
8 net/bridge/Makefile | 2 +-
9 net/bridge/br.c | 8 +
10 net/bridge/br_device.c | 2 +
11 net/bridge/br_fdb.c | 5 +
12 net/bridge/br_forward.c | 3 +
13 net/bridge/br_if.c | 6 +-
14 net/bridge/br_input.c | 5 +
15 net/bridge/br_offload.c | 438 ++++++++++++++++++++++++++++++++
16 net/bridge/br_private.h | 22 +-
17 net/bridge/br_private_offload.h | 23 ++
18 net/bridge/br_stp.c | 3 +
19 net/bridge/br_sysfs_br.c | 35 +++
20 net/bridge/br_sysfs_if.c | 2 +
21 net/bridge/br_vlan_tunnel.c | 3 +
22 15 files changed, 555 insertions(+), 3 deletions(-)
23 create mode 100644 net/bridge/br_offload.c
24 create mode 100644 net/bridge/br_private_offload.h
25
26 diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
27 index 18d3b264b754..944630df0ec3 100644
28 --- a/include/linux/if_bridge.h
29 +++ b/include/linux/if_bridge.h
30 @@ -59,6 +59,7 @@ struct br_ip_list {
31 #define BR_MRP_LOST_IN_CONT BIT(19)
32 #define BR_TX_FWD_OFFLOAD BIT(20)
33 #define BR_BPDU_FILTER BIT(21)
34 +#define BR_OFFLOAD BIT(22)
35
36 #define BR_DEFAULT_AGEING_TIME (300 * HZ)
37
38 diff --git a/net/bridge/Makefile b/net/bridge/Makefile
39 index 7fb9a021873b..0ebf3665c216 100644
40 --- a/net/bridge/Makefile
41 +++ b/net/bridge/Makefile
42 @@ -5,7 +5,7 @@
43
44 obj-$(CONFIG_BRIDGE) += bridge.o
45
46 -bridge-y := br.o br_device.o br_fdb.o br_forward.o br_if.o br_input.o \
47 +bridge-y := br.o br_device.o br_fdb.o br_forward.o br_if.o br_input.o br_offload.o \
48 br_ioctl.o br_stp.o br_stp_bpdu.o \
49 br_stp_if.o br_stp_timer.o br_netlink.o \
50 br_netlink_tunnel.o br_arp_nd_proxy.o
51 diff --git a/net/bridge/br.c b/net/bridge/br.c
52 index d3a32c6813e0..42e4d4fec604 100644
53 --- a/net/bridge/br.c
54 +++ b/net/bridge/br.c
55 @@ -18,6 +18,7 @@
56 #include <net/switchdev.h>
57
58 #include "br_private.h"
59 +#include "br_private_offload.h"
60
61 /*
62 * Handle changes in state of network devices enslaved to a bridge.
63 @@ -381,6 +382,10 @@ static int __init br_init(void)
64 if (err)
65 goto err_out;
66
67 + err = br_offload_init();
68 + if (err)
69 + goto err_out0;
70 +
71 err = register_pernet_subsys(&br_net_ops);
72 if (err)
73 goto err_out1;
74 @@ -430,6 +435,8 @@ static int __init br_init(void)
75 err_out2:
76 unregister_pernet_subsys(&br_net_ops);
77 err_out1:
78 + br_offload_fini();
79 +err_out0:
80 br_fdb_fini();
81 err_out:
82 stp_proto_unregister(&br_stp_proto);
83 @@ -452,6 +459,7 @@ static void __exit br_deinit(void)
84 #if IS_ENABLED(CONFIG_ATM_LANE)
85 br_fdb_test_addr_hook = NULL;
86 #endif
87 + br_offload_fini();
88 br_fdb_fini();
89 }
90
91 diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
92 index 8d6bab244c4a..d69d8e9ed7aa 100644
93 --- a/net/bridge/br_device.c
94 +++ b/net/bridge/br_device.c
95 @@ -524,6 +524,8 @@ void br_dev_setup(struct net_device *dev)
96 br->bridge_hello_time = br->hello_time = 2 * HZ;
97 br->bridge_forward_delay = br->forward_delay = 15 * HZ;
98 br->bridge_ageing_time = br->ageing_time = BR_DEFAULT_AGEING_TIME;
99 + br->offload_cache_size = 128;
100 + br->offload_cache_reserved = 8;
101 dev->max_mtu = ETH_MAX_MTU;
102
103 br_netfilter_rtable_init(br);
104 diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
105 index 46812b659710..20ea8f75d140 100644
106 --- a/net/bridge/br_fdb.c
107 +++ b/net/bridge/br_fdb.c
108 @@ -23,6 +23,7 @@
109 #include <net/switchdev.h>
110 #include <trace/events/bridge.h>
111 #include "br_private.h"
112 +#include "br_private_offload.h"
113
114 static const struct rhashtable_params br_fdb_rht_params = {
115 .head_offset = offsetof(struct net_bridge_fdb_entry, rhnode),
116 @@ -518,6 +519,8 @@ static struct net_bridge_fdb_entry *fdb_create(struct net_bridge *br,
117 fdb->key.vlan_id = vid;
118 fdb->flags = flags;
119 fdb->updated = fdb->used = jiffies;
120 + INIT_HLIST_HEAD(&fdb->offload_in);
121 + INIT_HLIST_HEAD(&fdb->offload_out);
122 if (rhashtable_lookup_insert_fast(&br->fdb_hash_tbl,
123 &fdb->rhnode,
124 br_fdb_rht_params)) {
125 @@ -794,6 +797,8 @@ static void fdb_notify(struct net_bridge *br,
126 struct sk_buff *skb;
127 int err = -ENOBUFS;
128
129 + br_offload_fdb_update(fdb);
130 +
131 if (swdev_notify)
132 br_switchdev_fdb_notify(br, fdb, type);
133
134 diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
135 index 9fe5c888f27d..6d9025106d9d 100644
136 --- a/net/bridge/br_forward.c
137 +++ b/net/bridge/br_forward.c
138 @@ -16,6 +16,7 @@
139 #include <linux/if_vlan.h>
140 #include <linux/netfilter_bridge.h>
141 #include "br_private.h"
142 +#include "br_private_offload.h"
143
144 /* Don't forward packets to originating port or forwarding disabled */
145 static inline int should_deliver(const struct net_bridge_port *p,
146 @@ -32,6 +33,8 @@ static inline int should_deliver(const struct net_bridge_port *p,
147
148 int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
149 {
150 + br_offload_output(skb);
151 +
152 skb_push(skb, ETH_HLEN);
153 if (!is_skb_forwardable(skb->dev, skb))
154 goto drop;
155 diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
156 index 4a02f8bb278a..b1d3295b861c 100644
157 --- a/net/bridge/br_if.c
158 +++ b/net/bridge/br_if.c
159 @@ -25,6 +25,7 @@
160 #include <net/net_namespace.h>
161
162 #include "br_private.h"
163 +#include "br_private_offload.h"
164
165 /*
166 * Determine initial path cost based on speed.
167 @@ -428,7 +429,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
168 p->path_cost = port_cost(dev);
169 p->priority = 0x8000 >> BR_PORT_BITS;
170 p->port_no = index;
171 - p->flags = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
172 + p->flags = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD | BR_OFFLOAD;
173 br_init_port(p);
174 br_set_state(p, BR_STATE_DISABLED);
175 br_stp_port_timer_init(p);
176 @@ -771,6 +772,9 @@ void br_port_flags_change(struct net_bridge_port *p, unsigned long mask)
177
178 if (mask & BR_NEIGH_SUPPRESS)
179 br_recalculate_neigh_suppress_enabled(br);
180 +
181 + if (mask & BR_OFFLOAD)
182 + br_offload_port_state(p);
183 }
184
185 bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag)
186 diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
187 index 65416af73714..b0601e6aed8c 100644
188 --- a/net/bridge/br_input.c
189 +++ b/net/bridge/br_input.c
190 @@ -22,6 +22,7 @@
191 #include <linux/rculist.h>
192 #include "br_private.h"
193 #include "br_private_tunnel.h"
194 +#include "br_private_offload.h"
195
196 static int
197 br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb)
198 @@ -171,6 +172,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
199 dst->used = now;
200 br_forward(dst->dst, skb, local_rcv, false);
201 } else {
202 + br_offload_skb_disable(skb);
203 if (!mcast_hit)
204 br_flood(br, skb, pkt_type, local_rcv, false);
205 else
206 @@ -304,6 +306,9 @@ static rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
207 memset(skb->cb, 0, sizeof(struct br_input_skb_cb));
208
209 p = br_port_get_rcu(skb->dev);
210 + if (br_offload_input(p, skb))
211 + return RX_HANDLER_CONSUMED;
212 +
213 if (p->flags & BR_VLAN_TUNNEL)
214 br_handle_ingress_vlan_tunnel(skb, p, nbp_vlan_group_rcu(p));
215
216 diff --git a/net/bridge/br_offload.c b/net/bridge/br_offload.c
217 new file mode 100644
218 index 000000000000..88173ed11093
219 --- /dev/null
220 +++ b/net/bridge/br_offload.c
221 @@ -0,0 +1,438 @@
222 +// SPDX-License-Identifier: GPL-2.0-only
223 +#include <linux/kernel.h>
224 +#include <linux/workqueue.h>
225 +#include "br_private.h"
226 +#include "br_private_offload.h"
227 +
228 +static DEFINE_SPINLOCK(offload_lock);
229 +
230 +struct bridge_flow_key {
231 + u8 dest[ETH_ALEN];
232 + u8 src[ETH_ALEN];
233 +#ifdef CONFIG_BRIDGE_VLAN_FILTERING
234 + u16 vlan_tag;
235 + bool vlan_present;
236 +#endif
237 +};
238 +
239 +struct bridge_flow {
240 + struct net_bridge_port *port;
241 + struct rhash_head node;
242 + struct bridge_flow_key key;
243 +#ifdef CONFIG_BRIDGE_VLAN_FILTERING
244 + bool vlan_out_present;
245 + u16 vlan_out;
246 +#endif
247 +
248 + unsigned long used;
249 + struct net_bridge_fdb_entry *fdb_in, *fdb_out;
250 + struct hlist_node fdb_list_in, fdb_list_out;
251 +
252 + struct rcu_head rcu;
253 +};
254 +
255 +static const struct rhashtable_params flow_params = {
256 + .automatic_shrinking = true,
257 + .head_offset = offsetof(struct bridge_flow, node),
258 + .key_len = sizeof(struct bridge_flow_key),
259 + .key_offset = offsetof(struct bridge_flow, key),
260 +};
261 +
262 +static struct kmem_cache *offload_cache __read_mostly;
263 +
264 +static void
265 +flow_rcu_free(struct rcu_head *head)
266 +{
267 + struct bridge_flow *flow;
268 +
269 + flow = container_of(head, struct bridge_flow, rcu);
270 + kmem_cache_free(offload_cache, flow);
271 +}
272 +
273 +static void
274 +__br_offload_flow_free(struct bridge_flow *flow)
275 +{
276 + flow->used = 0;
277 + hlist_del(&flow->fdb_list_in);
278 + hlist_del(&flow->fdb_list_out);
279 +
280 + call_rcu(&flow->rcu, flow_rcu_free);
281 +}
282 +
283 +static void
284 +br_offload_flow_free(struct bridge_flow *flow)
285 +{
286 + if (rhashtable_remove_fast(&flow->port->offload.rht, &flow->node,
287 + flow_params) != 0)
288 + return;
289 +
290 + __br_offload_flow_free(flow);
291 +}
292 +
293 +static bool
294 +br_offload_flow_fdb_refresh_time(struct bridge_flow *flow,
295 + struct net_bridge_fdb_entry *fdb)
296 +{
297 + if (!time_after(flow->used, fdb->updated))
298 + return false;
299 +
300 + fdb->updated = flow->used;
301 +
302 + return true;
303 +}
304 +
305 +
306 +static void
307 +br_offload_flow_refresh_time(struct bridge_flow *flow)
308 +{
309 + br_offload_flow_fdb_refresh_time(flow, flow->fdb_in);
310 + br_offload_flow_fdb_refresh_time(flow, flow->fdb_out);
311 +}
312 +
313 +static void
314 +br_offload_destroy_cb(void *ptr, void *arg)
315 +{
316 + struct bridge_flow *flow = ptr;
317 +
318 + __br_offload_flow_free(flow);
319 +}
320 +
321 +static bool
322 +br_offload_need_gc(struct net_bridge_port *p)
323 +{
324 + return (atomic_read(&p->offload.rht.nelems) +
325 + p->br->offload_cache_reserved) >= p->br->offload_cache_size;
326 +}
327 +
328 +static void
329 +br_offload_gc_work(struct work_struct *work)
330 +{
331 + struct rhashtable_iter hti;
332 + struct net_bridge_port *p;
333 + struct bridge_flow *gc_flow = NULL;
334 + struct bridge_flow *flow;
335 + unsigned long gc_used;
336 +
337 + p = container_of(work, struct net_bridge_port, offload.gc_work);
338 +
339 + if (!br_offload_need_gc(p))
340 + return;
341 +
342 + rhashtable_walk_enter(&p->offload.rht, &hti);
343 + rhashtable_walk_start(&hti);
344 + while ((flow = rhashtable_walk_next(&hti)) != NULL) {
345 + unsigned long used;
346 +
347 + if (IS_ERR(flow))
348 + continue;
349 +
350 + used = READ_ONCE(flow->used);
351 + if (!used)
352 + continue;
353 +
354 + if (gc_flow && !time_before(used, gc_used))
355 + continue;
356 +
357 + gc_flow = flow;
358 + gc_used = used;
359 + }
360 + rhashtable_walk_stop(&hti);
361 + rhashtable_walk_exit(&hti);
362 +
363 + if (!gc_flow)
364 + return;
365 +
366 + spin_lock_bh(&offload_lock);
367 + if (br_offload_need_gc(p) && gc_flow &&
368 + gc_flow->used == gc_used)
369 + br_offload_flow_free(gc_flow);
370 + if (p->offload.enabled && br_offload_need_gc(p))
371 + queue_work(system_long_wq, work);
372 + spin_unlock_bh(&offload_lock);
373 +
374 +}
375 +
376 +void br_offload_port_state(struct net_bridge_port *p)
377 +{
378 + struct net_bridge_port_offload *o = &p->offload;
379 + bool enabled = true;
380 + bool flush = false;
381 +
382 + if (p->state != BR_STATE_FORWARDING ||
383 + !(p->flags & BR_OFFLOAD))
384 + enabled = false;
385 +
386 + spin_lock_bh(&offload_lock);
387 + if (o->enabled == enabled)
388 + goto out;
389 +
390 + if (enabled) {
391 + if (!o->gc_work.func)
392 + INIT_WORK(&o->gc_work, br_offload_gc_work);
393 + rhashtable_init(&o->rht, &flow_params);
394 + } else {
395 + flush = true;
396 + rhashtable_free_and_destroy(&o->rht, br_offload_destroy_cb, o);
397 + }
398 +
399 + o->enabled = enabled;
400 +
401 +out:
402 + spin_unlock_bh(&offload_lock);
403 +
404 + if (flush)
405 + flush_work(&o->gc_work);
406 +}
407 +
408 +void br_offload_fdb_update(const struct net_bridge_fdb_entry *fdb)
409 +{
410 + struct bridge_flow *f;
411 + struct hlist_node *tmp;
412 +
413 + spin_lock_bh(&offload_lock);
414 +
415 + hlist_for_each_entry_safe(f, tmp, &fdb->offload_in, fdb_list_in)
416 + br_offload_flow_free(f);
417 +
418 + hlist_for_each_entry_safe(f, tmp, &fdb->offload_out, fdb_list_out)
419 + br_offload_flow_free(f);
420 +
421 + spin_unlock_bh(&offload_lock);
422 +}
423 +
424 +static void
425 +br_offload_prepare_key(struct net_bridge_port *p, struct bridge_flow_key *key,
426 + struct sk_buff *skb)
427 +{
428 + memset(key, 0, sizeof(*key));
429 + memcpy(key, eth_hdr(skb), 2 * ETH_ALEN);
430 +#ifdef CONFIG_BRIDGE_VLAN_FILTERING
431 + if (!br_opt_get(p->br, BROPT_VLAN_ENABLED))
432 + return;
433 +
434 + if (!skb_vlan_tag_present(skb) || skb->vlan_proto != p->br->vlan_proto)
435 + return;
436 +
437 + key->vlan_present = true;
438 + key->vlan_tag = skb_vlan_tag_get_id(skb);
439 +#endif
440 +}
441 +
442 +void br_offload_output(struct sk_buff *skb)
443 +{
444 + struct net_bridge_port_offload *o;
445 + struct br_input_skb_cb *cb = (struct br_input_skb_cb *)skb->cb;
446 + struct net_bridge_port *p, *inp;
447 + struct net_device *dev;
448 + struct net_bridge_fdb_entry *fdb_in, *fdb_out;
449 + struct net_bridge_vlan_group *vg;
450 + struct bridge_flow_key key;
451 + struct bridge_flow *flow;
452 + u16 vlan;
453 +
454 + if (!cb->offload)
455 + return;
456 +
457 + rcu_read_lock();
458 +
459 + p = br_port_get_rcu(skb->dev);
460 + if (!p)
461 + goto out;
462 +
463 + o = &p->offload;
464 + if (!o->enabled)
465 + goto out;
466 +
467 + if (atomic_read(&p->offload.rht.nelems) >= p->br->offload_cache_size)
468 + goto out;
469 +
470 + dev = dev_get_by_index_rcu(dev_net(p->br->dev), cb->input_ifindex);
471 + if (!dev)
472 + goto out;
473 +
474 + inp = br_port_get_rcu(dev);
475 + if (!inp)
476 + goto out;
477 +
478 + vg = nbp_vlan_group_rcu(inp);
479 + vlan = cb->input_vlan_present ? cb->input_vlan_tag : br_get_pvid(vg);
480 + fdb_in = br_fdb_find_rcu(p->br, eth_hdr(skb)->h_source, vlan);
481 + if (!fdb_in || !fdb_in->dst)
482 + goto out;
483 +
484 + vg = nbp_vlan_group_rcu(p);
485 + vlan = skb_vlan_tag_present(skb) ? skb_vlan_tag_get_id(skb) : br_get_pvid(vg);
486 + fdb_out = br_fdb_find_rcu(p->br, eth_hdr(skb)->h_dest, vlan);
487 + if (!fdb_out || !fdb_out->dst)
488 + goto out;
489 +
490 + br_offload_prepare_key(p, &key, skb);
491 +#ifdef CONFIG_BRIDGE_VLAN_FILTERING
492 + key.vlan_present = cb->input_vlan_present;
493 + key.vlan_tag = cb->input_vlan_tag;
494 +#endif
495 +
496 + flow = kmem_cache_alloc(offload_cache, GFP_ATOMIC);
497 + flow->port = inp;
498 + memcpy(&flow->key, &key, sizeof(key));
499 +
500 +#ifdef CONFIG_BRIDGE_VLAN_FILTERING
501 + flow->vlan_out_present = skb_vlan_tag_present(skb);
502 + flow->vlan_out = skb_vlan_tag_get(skb);
503 +#endif
504 +
505 + flow->fdb_in = fdb_in;
506 + flow->fdb_out = fdb_out;
507 + flow->used = jiffies;
508 +
509 + spin_lock_bh(&offload_lock);
510 + if (!o->enabled ||
511 + atomic_read(&p->offload.rht.nelems) >= p->br->offload_cache_size ||
512 + rhashtable_insert_fast(&inp->offload.rht, &flow->node, flow_params)) {
513 + kmem_cache_free(offload_cache, flow);
514 + goto out_unlock;
515 + }
516 +
517 + hlist_add_head(&flow->fdb_list_in, &fdb_in->offload_in);
518 + hlist_add_head(&flow->fdb_list_out, &fdb_out->offload_out);
519 +
520 + if (br_offload_need_gc(p))
521 + queue_work(system_long_wq, &p->offload.gc_work);
522 +
523 +out_unlock:
524 + spin_unlock_bh(&offload_lock);
525 +
526 +out:
527 + rcu_read_unlock();
528 +}
529 +
530 +bool br_offload_input(struct net_bridge_port *p, struct sk_buff *skb)
531 +{
532 + struct net_bridge_port_offload *o = &p->offload;
533 + struct br_input_skb_cb *cb = (struct br_input_skb_cb *)skb->cb;
534 + struct bridge_flow_key key;
535 + struct net_bridge_port *dst;
536 + struct bridge_flow *flow;
537 + unsigned long now = jiffies;
538 + bool ret = false;
539 +
540 + if (skb->len < sizeof(key))
541 + return false;
542 +
543 + if (!o->enabled)
544 + return false;
545 +
546 + if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
547 + return false;
548 +
549 + br_offload_prepare_key(p, &key, skb);
550 +
551 + rcu_read_lock();
552 + flow = rhashtable_lookup(&o->rht, &key, flow_params);
553 + if (!flow) {
554 + cb->offload = 1;
555 +#ifdef CONFIG_BRIDGE_VLAN_FILTERING
556 + cb->input_vlan_present = key.vlan_present != 0;
557 + cb->input_vlan_tag = key.vlan_tag;
558 +#endif
559 + cb->input_ifindex = p->dev->ifindex;
560 + goto out;
561 + }
562 +
563 + if (flow->fdb_in->dst != p)
564 + goto out;
565 +
566 + dst = flow->fdb_out->dst;
567 + if (!dst)
568 + goto out;
569 +
570 + ret = true;
571 +#ifdef CONFIG_BRIDGE_VLAN_FILTERING
572 + if (!flow->vlan_out_present && key.vlan_present) {
573 + __vlan_hwaccel_clear_tag(skb);
574 + } else if (flow->vlan_out_present) {
575 + if (skb_vlan_tag_present(skb) &&
576 + skb->vlan_proto != p->br->vlan_proto) {
577 + /* Protocol-mismatch, empty out vlan_tci for new tag */
578 + skb_push(skb, ETH_HLEN);
579 + skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
580 + skb_vlan_tag_get(skb));
581 + if (unlikely(!skb))
582 + goto out;
583 +
584 + skb_pull(skb, ETH_HLEN);
585 + skb_reset_mac_len(skb);
586 + }
587 +
588 + __vlan_hwaccel_put_tag(skb, p->br->vlan_proto,
589 + flow->vlan_out);
590 + }
591 +#endif
592 +
593 + skb->dev = dst->dev;
594 + skb_push(skb, ETH_HLEN);
595 +
596 + if (skb_warn_if_lro(skb) || !is_skb_forwardable(skb->dev, skb)) {
597 + kfree_skb(skb);
598 + goto out;
599 + }
600 +
601 + if (now - flow->used >= HZ) {
602 + flow->used = now;
603 + br_offload_flow_refresh_time(flow);
604 + }
605 +
606 + skb_forward_csum(skb);
607 + dev_queue_xmit(skb);
608 +
609 +out:
610 + rcu_read_unlock();
611 + return ret;
612 +}
613 +
614 +static void
615 +br_offload_check_gc(struct net_bridge *br)
616 +{
617 + struct net_bridge_port *p;
618 +
619 + spin_lock_bh(&br->lock);
620 + list_for_each_entry(p, &br->port_list, list)
621 + if (br_offload_need_gc(p))
622 + queue_work(system_long_wq, &p->offload.gc_work);
623 + spin_unlock_bh(&br->lock);
624 +}
625 +
626 +
627 +int br_offload_set_cache_size(struct net_bridge *br, unsigned long val,
628 + struct netlink_ext_ack *extack)
629 +{
630 + br->offload_cache_size = val;
631 + br_offload_check_gc(br);
632 +
633 + return 0;
634 +}
635 +
636 +int br_offload_set_cache_reserved(struct net_bridge *br, unsigned long val,
637 + struct netlink_ext_ack *extack)
638 +{
639 + br->offload_cache_reserved = val;
640 + br_offload_check_gc(br);
641 +
642 + return 0;
643 +}
644 +
645 +int __init br_offload_init(void)
646 +{
647 + offload_cache = kmem_cache_create("bridge_offload_cache",
648 + sizeof(struct bridge_flow),
649 + 0, SLAB_HWCACHE_ALIGN, NULL);
650 + if (!offload_cache)
651 + return -ENOMEM;
652 +
653 + return 0;
654 +}
655 +
656 +void br_offload_fini(void)
657 +{
658 + kmem_cache_destroy(offload_cache);
659 +}
660 diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
661 index bd218c2b2cd9..951ba1d993ca 100644
662 --- a/net/bridge/br_private.h
663 +++ b/net/bridge/br_private.h
664 @@ -268,7 +268,13 @@ struct net_bridge_fdb_entry {
665 unsigned long updated ____cacheline_aligned_in_smp;
666 unsigned long used;
667
668 - struct rcu_head rcu;
669 + union {
670 + struct {
671 + struct hlist_head offload_in;
672 + struct hlist_head offload_out;
673 + };
674 + struct rcu_head rcu;
675 + };
676 };
677
678 #define MDB_PG_FLAGS_PERMANENT BIT(0)
679 @@ -343,6 +349,12 @@ struct net_bridge_mdb_entry {
680 struct rcu_head rcu;
681 };
682
683 +struct net_bridge_port_offload {
684 + struct rhashtable rht;
685 + struct work_struct gc_work;
686 + bool enabled;
687 +};
688 +
689 struct net_bridge_port {
690 struct net_bridge *br;
691 struct net_device *dev;
692 @@ -403,6 +415,7 @@ struct net_bridge_port {
693 u16 backup_redirected_cnt;
694
695 struct bridge_stp_xstats stp_xstats;
696 + struct net_bridge_port_offload offload;
697 };
698
699 #define kobj_to_brport(obj) container_of(obj, struct net_bridge_port, kobj)
700 @@ -519,6 +532,9 @@ struct net_bridge {
701 struct kobject *ifobj;
702 u32 auto_cnt;
703
704 + u32 offload_cache_size;
705 + u32 offload_cache_reserved;
706 +
707 #ifdef CONFIG_NET_SWITCHDEV
708 /* Counter used to make sure that hardware domains get unique
709 * identifiers in case a bridge spans multiple switchdev instances.
710 @@ -553,6 +569,10 @@ struct br_input_skb_cb {
711 #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
712 u8 br_netfilter_broute:1;
713 #endif
714 + u8 offload:1;
715 + u8 input_vlan_present:1;
716 + u16 input_vlan_tag;
717 + int input_ifindex;
718
719 #ifdef CONFIG_NET_SWITCHDEV
720 /* Set if TX data plane offloading is used towards at least one
721 diff --git a/net/bridge/br_private_offload.h b/net/bridge/br_private_offload.h
722 new file mode 100644
723 index 000000000000..97c13af2866b
724 --- /dev/null
725 +++ b/net/bridge/br_private_offload.h
726 @@ -0,0 +1,23 @@
727 +#ifndef __BR_OFFLOAD_H
728 +#define __BR_OFFLOAD_H
729 +
730 +bool br_offload_input(struct net_bridge_port *p, struct sk_buff *skb);
731 +void br_offload_output(struct sk_buff *skb);
732 +void br_offload_port_state(struct net_bridge_port *p);
733 +void br_offload_fdb_update(const struct net_bridge_fdb_entry *fdb);
734 +int br_offload_init(void);
735 +void br_offload_fini(void);
736 +int br_offload_set_cache_size(struct net_bridge *br, unsigned long val,
737 + struct netlink_ext_ack *extack);
738 +int br_offload_set_cache_reserved(struct net_bridge *br, unsigned long val,
739 + struct netlink_ext_ack *extack);
740 +
741 +static inline void br_offload_skb_disable(struct sk_buff *skb)
742 +{
743 + struct br_input_skb_cb *cb = (struct br_input_skb_cb *)skb->cb;
744 +
745 + if (cb->offload)
746 + cb->offload = 0;
747 +}
748 +
749 +#endif
750 diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
751 index 1d80f34a139c..b57788b53d24 100644
752 --- a/net/bridge/br_stp.c
753 +++ b/net/bridge/br_stp.c
754 @@ -12,6 +12,7 @@
755
756 #include "br_private.h"
757 #include "br_private_stp.h"
758 +#include "br_private_offload.h"
759
760 /* since time values in bpdu are in jiffies and then scaled (1/256)
761 * before sending, make sure that is at least one STP tick.
762 @@ -52,6 +53,8 @@ void br_set_state(struct net_bridge_port *p, unsigned int state)
763 (unsigned int) p->port_no, p->dev->name,
764 br_port_state_names[p->state]);
765
766 + br_offload_port_state(p);
767 +
768 if (p->br->stp_enabled == BR_KERNEL_STP) {
769 switch (p->state) {
770 case BR_STATE_BLOCKING:
771 diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
772 index 7b0c19772111..814cbfb77d25 100644
773 --- a/net/bridge/br_sysfs_br.c
774 +++ b/net/bridge/br_sysfs_br.c
775 @@ -18,6 +18,7 @@
776 #include <linux/sched/signal.h>
777
778 #include "br_private.h"
779 +#include "br_private_offload.h"
780
781 /* IMPORTANT: new bridge options must be added with netlink support only
782 * please do not add new sysfs entries
783 @@ -930,6 +931,38 @@ static ssize_t vlan_stats_per_port_store(struct device *d,
784 static DEVICE_ATTR_RW(vlan_stats_per_port);
785 #endif
786
787 +static ssize_t offload_cache_size_show(struct device *d,
788 + struct device_attribute *attr,
789 + char *buf)
790 +{
791 + struct net_bridge *br = to_bridge(d);
792 + return sprintf(buf, "%u\n", br->offload_cache_size);
793 +}
794 +
795 +static ssize_t offload_cache_size_store(struct device *d,
796 + struct device_attribute *attr,
797 + const char *buf, size_t len)
798 +{
799 + return store_bridge_parm(d, buf, len, br_offload_set_cache_size);
800 +}
801 +static DEVICE_ATTR_RW(offload_cache_size);
802 +
803 +static ssize_t offload_cache_reserved_show(struct device *d,
804 + struct device_attribute *attr,
805 + char *buf)
806 +{
807 + struct net_bridge *br = to_bridge(d);
808 + return sprintf(buf, "%u\n", br->offload_cache_reserved);
809 +}
810 +
811 +static ssize_t offload_cache_reserved_store(struct device *d,
812 + struct device_attribute *attr,
813 + const char *buf, size_t len)
814 +{
815 + return store_bridge_parm(d, buf, len, br_offload_set_cache_reserved);
816 +}
817 +static DEVICE_ATTR_RW(offload_cache_reserved);
818 +
819 static struct attribute *bridge_attrs[] = {
820 &dev_attr_forward_delay.attr,
821 &dev_attr_hello_time.attr,
822 @@ -984,6 +1017,8 @@ static struct attribute *bridge_attrs[] = {
823 &dev_attr_vlan_stats_enabled.attr,
824 &dev_attr_vlan_stats_per_port.attr,
825 #endif
826 + &dev_attr_offload_cache_size.attr,
827 + &dev_attr_offload_cache_reserved.attr,
828 NULL
829 };
830
831 diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
832 index 9ee9c60738e2..2b44e5fb19e4 100644
833 --- a/net/bridge/br_sysfs_if.c
834 +++ b/net/bridge/br_sysfs_if.c
835 @@ -241,6 +241,7 @@ BRPORT_ATTR_FLAG(broadcast_flood, BR_BCAST_FLOOD);
836 BRPORT_ATTR_FLAG(neigh_suppress, BR_NEIGH_SUPPRESS);
837 BRPORT_ATTR_FLAG(isolated, BR_ISOLATED);
838 BRPORT_ATTR_FLAG(bpdu_filter, BR_BPDU_FILTER);
839 +BRPORT_ATTR_FLAG(offload, BR_OFFLOAD);
840
841 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
842 static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf)
843 @@ -295,6 +296,7 @@ static const struct brport_attribute *brport_attrs[] = {
844 &brport_attr_isolated,
845 &brport_attr_bpdu_filter,
846 &brport_attr_backup_port,
847 + &brport_attr_offload,
848 NULL
849 };
850
851 diff --git a/net/bridge/br_vlan_tunnel.c b/net/bridge/br_vlan_tunnel.c
852 index 6399a8a69d07..ffc65dc4eea8 100644
853 --- a/net/bridge/br_vlan_tunnel.c
854 +++ b/net/bridge/br_vlan_tunnel.c
855 @@ -15,6 +15,7 @@
856
857 #include "br_private.h"
858 #include "br_private_tunnel.h"
859 +#include "br_private_offload.h"
860
861 static inline int br_vlan_tunid_cmp(struct rhashtable_compare_arg *arg,
862 const void *ptr)
863 @@ -180,6 +181,7 @@ void br_handle_ingress_vlan_tunnel(struct sk_buff *skb,
864 skb_dst_drop(skb);
865
866 __vlan_hwaccel_put_tag(skb, p->br->vlan_proto, vlan->vid);
867 + br_offload_skb_disable(skb);
868 }
869
870 int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
871 @@ -201,6 +203,7 @@ int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
872 if (err)
873 return err;
874
875 + br_offload_skb_disable(skb);
876 tunnel_dst = rcu_dereference(vlan->tinfo.tunnel_dst);
877 if (tunnel_dst && dst_hold_safe(&tunnel_dst->dst))
878 skb_dst_set(skb, &tunnel_dst->dst);
879 --
880