ath25: switch default kernel to 5.15
[openwrt/openwrt.git] / target / linux / realtek / patches-5.10 / 709-lag-offloading.patch
1 From afa3ab54c03d5126b14651f367b38165fab5b3cc Mon Sep 17 00:00:00 2001
2 From: Birger Koblitz <git@birger-koblitz.de>
3 Date: Tue, 18 Jan 2022 17:18:43 +0100
4 Subject: [PATCH] realtek: Backport bridge configuration for DSA
5
6 Adds the DSA API for bridge configuration (flooding, L2 learning,
7 and aging) offload as found in Linux 5.12 so that we can implement
8 it in our drivver.
9
10 Submitted-by: Sebastian Gottschall <s.gottschall@dd-wrt.com>
11 Submitted-by: Birger Koblitz <git@birger-koblitz.de>
12 ---
13 drivers/net/bonding/bond_main.c | 2 ++
14 include/net/dsa.h | 79 ++++++++++++++++-
15 net/dsa/dsa2.c | 88 +++++++++++++++++++
16 net/dsa/dsa_priv.h | 74 ++++++++++++++
17 net/dsa/port.c | 92 ++++++++++++++++++++
18 net/dsa/slave.c | 88 ++++++++++++++++---
19 net/dsa/switch.c | 49 ++++++++++
20 net/sda/tag_dsa.c | 13 +++++-
21 8 file changed, 460 insertions(+), 25 deletions(-)
22
23 --- a/drivers/net/bonding/bond_main.c
24 +++ b/drivers/net/bonding/bond_main.c
25 @@ -2045,6 +2045,8 @@ int bond_enslave(struct net_device *bond
26 goto err_unregister;
27 }
28
29 + bond_lower_state_changed(new_slave);
30 +
31 res = bond_sysfs_slave_add(new_slave);
32 if (res) {
33 slave_dbg(bond_dev, slave_dev, "Error %d calling bond_sysfs_slave_add\n", res);
34 --- a/include/net/dsa.h
35 +++ b/include/net/dsa.h
36 @@ -149,8 +149,41 @@ struct dsa_switch_tree {
37
38 /* List of DSA links composing the routing table */
39 struct list_head rtable;
40 +
41 + /* Maps offloaded LAG netdevs to a zero-based linear ID for
42 + * drivers that need it.
43 + */
44 + struct net_device **lags;
45 + unsigned int lags_len;
46 };
47
48 +#define dsa_lags_foreach_id(_id, _dst) \
49 + for ((_id) = 0; (_id) < (_dst)->lags_len; (_id)++) \
50 + if ((_dst)->lags[(_id)])
51 +
52 +#define dsa_lag_foreach_port(_dp, _dst, _lag) \
53 + list_for_each_entry((_dp), &(_dst)->ports, list) \
54 + if ((_dp)->lag_dev == (_lag))
55 +
56 +static inline struct net_device *dsa_lag_dev(struct dsa_switch_tree *dst,
57 + unsigned int id)
58 +{
59 + return dst->lags[id];
60 +}
61 +
62 +static inline int dsa_lag_id(struct dsa_switch_tree *dst,
63 + struct net_device *lag)
64 +{
65 + unsigned int id;
66 +
67 + dsa_lags_foreach_id(id, dst) {
68 + if (dsa_lag_dev(dst, id) == lag)
69 + return id;
70 + }
71 +
72 + return -ENODEV;
73 +}
74 +
75 /* TC matchall action types */
76 enum dsa_port_mall_action_type {
77 DSA_PORT_MALL_MIRROR,
78 @@ -220,6 +253,8 @@ struct dsa_port {
79 bool devlink_port_setup;
80 struct phylink *pl;
81 struct phylink_config pl_config;
82 + struct net_device *lag_dev;
83 + bool lag_tx_enabled;
84
85 struct list_head list;
86
87 @@ -340,6 +375,14 @@ struct dsa_switch {
88 */
89 bool mtu_enforcement_ingress;
90
91 + /* Drivers that benefit from having an ID associated with each
92 + * offloaded LAG should set this to the maximum number of
93 + * supported IDs. DSA will then maintain a mapping of _at
94 + * least_ these many IDs, accessible to drivers via
95 + * dsa_lag_id().
96 + */
97 + unsigned int num_lag_ids;
98 +
99 size_t num_ports;
100 };
101
102 @@ -432,6 +475,18 @@ static inline bool dsa_port_is_vlan_filt
103 return dp->vlan_filtering;
104 }
105
106 +static inline
107 +struct net_device *dsa_port_to_bridge_port(const struct dsa_port *dp)
108 +{
109 + if (!dp->bridge_dev)
110 + return NULL;
111 +
112 + if (dp->lag_dev)
113 + return dp->lag_dev;
114 +
115 + return dp->slave;
116 +}
117 +
118 typedef int dsa_fdb_dump_cb_t(const unsigned char *addr, u16 vid,
119 bool is_static, void *data);
120 struct dsa_switch_ops {
121 @@ -629,6 +684,13 @@ struct dsa_switch_ops {
122 void (*crosschip_bridge_leave)(struct dsa_switch *ds, int tree_index,
123 int sw_index, int port,
124 struct net_device *br);
125 + int (*crosschip_lag_change)(struct dsa_switch *ds, int sw_index,
126 + int port);
127 + int (*crosschip_lag_join)(struct dsa_switch *ds, int sw_index,
128 + int port, struct net_device *lag,
129 + struct netdev_lag_upper_info *info);
130 + int (*crosschip_lag_leave)(struct dsa_switch *ds, int sw_index,
131 + int port, struct net_device *lag);
132
133 /*
134 * PTP functionality
135 @@ -660,6 +722,16 @@ struct dsa_switch_ops {
136 int (*port_change_mtu)(struct dsa_switch *ds, int port,
137 int new_mtu);
138 int (*port_max_mtu)(struct dsa_switch *ds, int port);
139 +
140 + /*
141 + * LAG integration
142 + */
143 + int (*port_lag_change)(struct dsa_switch *ds, int port);
144 + int (*port_lag_join)(struct dsa_switch *ds, int port,
145 + struct net_device *lag,
146 + struct netdev_lag_upper_info *info);
147 + int (*port_lag_leave)(struct dsa_switch *ds, int port,
148 + struct net_device *lag);
149 };
150
151 #define DSA_DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes) \
152 --- a/net/dsa/dsa.c
153 +++ b/net/dsa/dsa.c
154 @@ -220,11 +220,21 @@ static int dsa_switch_rcv(struct sk_buff
155 }
156
157 skb = nskb;
158 - p = netdev_priv(skb->dev);
159 skb_push(skb, ETH_HLEN);
160 skb->pkt_type = PACKET_HOST;
161 skb->protocol = eth_type_trans(skb, skb->dev);
162
163 + if (unlikely(!dsa_slave_dev_check(skb->dev))) {
164 + /* Packet is to be injected directly on an upper
165 + * device, e.g. a team/bond, so skip all DSA-port
166 + * specific actions.
167 + */
168 + netif_rx(skb);
169 + return 0;
170 + }
171 +
172 + p = netdev_priv(skb->dev);
173 +
174 if (unlikely(cpu_dp->ds->untag_bridge_pvid)) {
175 nskb = dsa_untag_bridge_pvid(skb);
176 if (!nskb) {
177 --- a/net/dsa/dsa2.c
178 +++ b/net/dsa/dsa2.c
179 @@ -21,6 +21,65 @@
180 static DEFINE_MUTEX(dsa2_mutex);
181 LIST_HEAD(dsa_tree_list);
182
183 +/**
184 + * dsa_lag_map() - Map LAG netdev to a linear LAG ID
185 + * @dst: Tree in which to record the mapping.
186 + * @lag: Netdev that is to be mapped to an ID.
187 + *
188 + * dsa_lag_id/dsa_lag_dev can then be used to translate between the
189 + * two spaces. The size of the mapping space is determined by the
190 + * driver by setting ds->num_lag_ids. It is perfectly legal to leave
191 + * it unset if it is not needed, in which case these functions become
192 + * no-ops.
193 + */
194 +void dsa_lag_map(struct dsa_switch_tree *dst, struct net_device *lag)
195 +{
196 + unsigned int id;
197 +
198 + if (dsa_lag_id(dst, lag) >= 0)
199 + /* Already mapped */
200 + return;
201 +
202 + for (id = 0; id < dst->lags_len; id++) {
203 + if (!dsa_lag_dev(dst, id)) {
204 + dst->lags[id] = lag;
205 + return;
206 + }
207 + }
208 +
209 + /* No IDs left, which is OK. Some drivers do not need it. The
210 + * ones that do, e.g. mv88e6xxx, will discover that dsa_lag_id
211 + * returns an error for this device when joining the LAG. The
212 + * driver can then return -EOPNOTSUPP back to DSA, which will
213 + * fall back to a software LAG.
214 + */
215 +}
216 +
217 +/**
218 + * dsa_lag_unmap() - Remove a LAG ID mapping
219 + * @dst: Tree in which the mapping is recorded.
220 + * @lag: Netdev that was mapped.
221 + *
222 + * As there may be multiple users of the mapping, it is only removed
223 + * if there are no other references to it.
224 + */
225 +void dsa_lag_unmap(struct dsa_switch_tree *dst, struct net_device *lag)
226 +{
227 + struct dsa_port *dp;
228 + unsigned int id;
229 +
230 + dsa_lag_foreach_port(dp, dst, lag)
231 + /* There are remaining users of this mapping */
232 + return;
233 +
234 + dsa_lags_foreach_id(id, dst) {
235 + if (dsa_lag_dev(dst, id) == lag) {
236 + dst->lags[id] = NULL;
237 + break;
238 + }
239 + }
240 +}
241 +
242 struct dsa_switch *dsa_switch_find(int tree_index, int sw_index)
243 {
244 struct dsa_switch_tree *dst;
245 @@ -597,6 +656,32 @@ static void dsa_tree_teardown_master(str
246 dsa_master_teardown(dp->master);
247 }
248
249 +static int dsa_tree_setup_lags(struct dsa_switch_tree *dst)
250 +{
251 + unsigned int len = 0;
252 + struct dsa_port *dp;
253 +
254 + list_for_each_entry(dp, &dst->ports, list) {
255 + if (dp->ds->num_lag_ids > len)
256 + len = dp->ds->num_lag_ids;
257 + }
258 +
259 + if (!len)
260 + return 0;
261 +
262 + dst->lags = kcalloc(len, sizeof(*dst->lags), GFP_KERNEL);
263 + if (!dst->lags)
264 + return -ENOMEM;
265 +
266 + dst->lags_len = len;
267 + return 0;
268 +}
269 +
270 +static void dsa_tree_teardown_lags(struct dsa_switch_tree *dst)
271 +{
272 + kfree(dst->lags);
273 +}
274 +
275 static int dsa_tree_setup(struct dsa_switch_tree *dst)
276 {
277 bool complete;
278 @@ -624,12 +709,18 @@ static int dsa_tree_setup(struct dsa_swi
279 if (err)
280 goto teardown_switches;
281
282 + err = dsa_tree_setup_lags(dst);
283 + if (err)
284 + goto teardown_master;
285 +
286 dst->setup = true;
287
288 pr_info("DSA: tree %d setup\n", dst->index);
289
290 return 0;
291
292 +teardown_master:
293 + dsa_tree_teardown_master(dst);
294 teardown_switches:
295 dsa_tree_teardown_switches(dst);
296 teardown_default_cpu:
297 @@ -645,6 +736,8 @@ static void dsa_tree_teardown(struct dsa
298 if (!dst->setup)
299 return;
300
301 + dsa_tree_teardown_lags(dst);
302 +
303 dsa_tree_teardown_master(dst);
304
305 dsa_tree_teardown_switches(dst);
306 --- a/net/dsa/dsa_priv.h
307 +++ b/net/dsa/dsa_priv.h
308 @@ -20,6 +20,9 @@ enum {
309 DSA_NOTIFIER_BRIDGE_LEAVE,
310 DSA_NOTIFIER_FDB_ADD,
311 DSA_NOTIFIER_FDB_DEL,
312 + DSA_NOTIFIER_LAG_CHANGE,
313 + DSA_NOTIFIER_LAG_JOIN,
314 + DSA_NOTIFIER_LAG_LEAVE,
315 DSA_NOTIFIER_MDB_ADD,
316 DSA_NOTIFIER_MDB_DEL,
317 DSA_NOTIFIER_VLAN_ADD,
318 @@ -57,6 +60,15 @@ struct dsa_notifier_mdb_info {
319 int port;
320 };
321
322 +/* DSA_NOTIFIER_LAG_* */
323 +struct dsa_notifier_lag_info {
324 + struct net_device *lag;
325 + int sw_index;
326 + int port;
327 +
328 + struct netdev_lag_upper_info *info;
329 +};
330 +
331 /* DSA_NOTIFIER_VLAN_* */
332 struct dsa_notifier_vlan_info {
333 const struct switchdev_obj_port_vlan *vlan;
334 @@ -149,6 +161,11 @@ void dsa_port_disable_rt(struct dsa_port
335 void dsa_port_disable(struct dsa_port *dp);
336 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br);
337 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br);
338 +int dsa_port_lag_change(struct dsa_port *dp,
339 + struct netdev_lag_lower_state_info *linfo);
340 +int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag_dev,
341 + struct netdev_lag_upper_info *uinfo);
342 +void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag_dev);
343 int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
344 struct switchdev_trans *trans);
345 bool dsa_port_skip_vlan_configuration(struct dsa_port *dp);
346 @@ -181,6 +198,71 @@ int dsa_port_link_register_of(struct dsa
347 void dsa_port_link_unregister_of(struct dsa_port *dp);
348 extern const struct phylink_mac_ops dsa_port_phylink_mac_ops;
349
350 +static inline bool dsa_port_offloads_netdev(struct dsa_port *dp,
351 + struct net_device *dev)
352 +{
353 + /* Switchdev offloading can be configured on: */
354 +
355 + if (dev == dp->slave)
356 + /* DSA ports directly connected to a bridge, and event
357 + * was emitted for the ports themselves.
358 + */
359 + return true;
360 +
361 + if (dp->bridge_dev == dev)
362 + /* DSA ports connected to a bridge, and event was emitted
363 + * for the bridge.
364 + */
365 + return true;
366 +
367 + if (dp->lag_dev == dev)
368 + /* DSA ports connected to a bridge via a LAG */
369 + return true;
370 +
371 + return false;
372 +}
373 +
374 +static inline bool dsa_port_offloads_bridge_port(struct dsa_port *dp,
375 + struct net_device *dev)
376 +{
377 + return dsa_port_to_bridge_port(dp) == dev;
378 +}
379 +
380 +static inline bool dsa_port_offloads_bridge(struct dsa_port *dp,
381 + struct net_device *bridge_dev)
382 +{
383 + /* DSA ports connected to a bridge, and event was emitted
384 + * for the bridge.
385 + */
386 + return dp->bridge_dev == bridge_dev;
387 +}
388 +
389 +/* Returns true if any port of this tree offloads the given net_device */
390 +static inline bool dsa_tree_offloads_bridge_port(struct dsa_switch_tree *dst,
391 + struct net_device *dev)
392 +{
393 + struct dsa_port *dp;
394 +
395 + list_for_each_entry(dp, &dst->ports, list)
396 + if (dsa_port_offloads_bridge_port(dp, dev))
397 + return true;
398 +
399 + return false;
400 +}
401 +
402 +/* Returns true if any port of this tree offloads the given net_device */
403 +static inline bool dsa_tree_offloads_netdev(struct dsa_switch_tree *dst,
404 + struct net_device *dev)
405 +{
406 + struct dsa_port *dp;
407 +
408 + list_for_each_entry(dp, &dst->ports, list)
409 + if (dsa_port_offloads_netdev(dp, dev))
410 + return true;
411 +
412 + return false;
413 +}
414 +
415 /* slave.c */
416 extern const struct dsa_device_ops notag_netdev_ops;
417 void dsa_slave_mii_bus_init(struct dsa_switch *ds);
418 @@ -285,6 +367,9 @@ int dsa_switch_register_notifier(struct
419 void dsa_switch_unregister_notifier(struct dsa_switch *ds);
420
421 /* dsa2.c */
422 +void dsa_lag_map(struct dsa_switch_tree *dst, struct net_device *lag);
423 +void dsa_lag_unmap(struct dsa_switch_tree *dst, struct net_device *lag);
424 +
425 extern struct list_head dsa_tree_list;
426
427 #endif
428 --- a/net/dsa/port.c
429 +++ b/net/dsa/port.c
430 @@ -193,6 +193,99 @@ void dsa_port_bridge_leave(struct dsa_po
431 dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
432 }
433
434 +int dsa_port_lag_change(struct dsa_port *dp,
435 + struct netdev_lag_lower_state_info *linfo)
436 +{
437 + struct dsa_notifier_lag_info info = {
438 + .sw_index = dp->ds->index,
439 + .port = dp->index,
440 + };
441 + bool tx_enabled;
442 +
443 + if (!dp->lag_dev)
444 + return 0;
445 +
446 + /* On statically configured aggregates (e.g. loadbalance
447 + * without LACP) ports will always be tx_enabled, even if the
448 + * link is down. Thus we require both link_up and tx_enabled
449 + * in order to include it in the tx set.
450 + */
451 + tx_enabled = linfo->link_up && linfo->tx_enabled;
452 +
453 + if (tx_enabled == dp->lag_tx_enabled)
454 + return 0;
455 +
456 + dp->lag_tx_enabled = tx_enabled;
457 +
458 + return dsa_port_notify(dp, DSA_NOTIFIER_LAG_CHANGE, &info);
459 +}
460 +
461 +int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag,
462 + struct netdev_lag_upper_info *uinfo)
463 +{
464 + struct dsa_notifier_lag_info info = {
465 + .sw_index = dp->ds->index,
466 + .port = dp->index,
467 + .lag = lag,
468 + .info = uinfo,
469 + };
470 + struct net_device *bridge_dev;
471 + int err;
472 +
473 + dsa_lag_map(dp->ds->dst, lag);
474 + dp->lag_dev = lag;
475 +
476 + err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_JOIN, &info);
477 + if (err)
478 + goto err_lag_join;
479 +
480 + bridge_dev = netdev_master_upper_dev_get(lag);
481 + if (!bridge_dev || !netif_is_bridge_master(bridge_dev))
482 + return 0;
483 +
484 + err = dsa_port_bridge_join(dp, bridge_dev);
485 + if (err)
486 + goto err_bridge_join;
487 +
488 + return 0;
489 +
490 +err_bridge_join:
491 + dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
492 +err_lag_join:
493 + dp->lag_dev = NULL;
494 + dsa_lag_unmap(dp->ds->dst, lag);
495 + return err;
496 +}
497 +
498 +void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag)
499 +{
500 + struct dsa_notifier_lag_info info = {
501 + .sw_index = dp->ds->index,
502 + .port = dp->index,
503 + .lag = lag,
504 + };
505 + int err;
506 +
507 + if (!dp->lag_dev)
508 + return;
509 +
510 + /* Port might have been part of a LAG that in turn was
511 + * attached to a bridge.
512 + */
513 + if (dp->bridge_dev)
514 + dsa_port_bridge_leave(dp, dp->bridge_dev);
515 +
516 + dp->lag_tx_enabled = false;
517 + dp->lag_dev = NULL;
518 +
519 + err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
520 + if (err)
521 + pr_err("DSA: failed to notify DSA_NOTIFIER_LAG_LEAVE: %d\n",
522 + err);
523 +
524 + dsa_lag_unmap(dp->ds->dst, lag);
525 +}
526 +
527 /* Must be called under rcu_read_lock() */
528 static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp,
529 bool vlan_filtering)
530 --- a/net/dsa/slave.c
531 +++ b/net/dsa/slave.c
532 @@ -337,9 +337,6 @@ static int dsa_slave_vlan_add(struct net
533 struct switchdev_obj_port_vlan vlan;
534 int vid, err;
535
536 - if (obj->orig_dev != dev)
537 - return -EOPNOTSUPP;
538 -
539 if (dsa_port_skip_vlan_configuration(dp))
540 return 0;
541
542 @@ -394,11 +391,13 @@ static int dsa_slave_port_obj_add(struct
543
544 switch (obj->id) {
545 case SWITCHDEV_OBJ_ID_PORT_MDB:
546 - if (obj->orig_dev != dev)
547 + if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
548 return -EOPNOTSUPP;
549 err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj), trans);
550 break;
551 case SWITCHDEV_OBJ_ID_HOST_MDB:
552 + if (!dsa_port_offloads_bridge(dp, obj->orig_dev))
553 + return -EOPNOTSUPP;
554 /* DSA can directly translate this to a normal MDB add,
555 * but on the CPU port.
556 */
557 @@ -406,6 +405,9 @@ static int dsa_slave_port_obj_add(struct
558 trans);
559 break;
560 case SWITCHDEV_OBJ_ID_PORT_VLAN:
561 + if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
562 + return -EOPNOTSUPP;
563 +
564 err = dsa_slave_vlan_add(dev, obj, trans);
565 break;
566 default:
567 @@ -424,9 +426,6 @@ static int dsa_slave_vlan_del(struct net
568 struct switchdev_obj_port_vlan *vlan;
569 int vid, err;
570
571 - if (obj->orig_dev != dev)
572 - return -EOPNOTSUPP;
573 -
574 if (dsa_port_skip_vlan_configuration(dp))
575 return 0;
576
577 @@ -453,17 +452,22 @@ static int dsa_slave_port_obj_del(struct
578
579 switch (obj->id) {
580 case SWITCHDEV_OBJ_ID_PORT_MDB:
581 - if (obj->orig_dev != dev)
582 + if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
583 return -EOPNOTSUPP;
584 err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
585 break;
586 case SWITCHDEV_OBJ_ID_HOST_MDB:
587 + if (!dsa_port_offloads_bridge(dp, obj->orig_dev))
588 + return -EOPNOTSUPP;
589 /* DSA can directly translate this to a normal MDB add,
590 * but on the CPU port.
591 */
592 err = dsa_port_mdb_del(dp->cpu_dp, SWITCHDEV_OBJ_PORT_MDB(obj));
593 break;
594 case SWITCHDEV_OBJ_ID_PORT_VLAN:
595 + if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
596 + return -EOPNOTSUPP;
597 +
598 err = dsa_slave_vlan_del(dev, obj);
599 break;
600 default:
601 @@ -1993,6 +1997,46 @@ static int dsa_slave_changeupper(struct
602 dsa_port_bridge_leave(dp, info->upper_dev);
603 err = NOTIFY_OK;
604 }
605 + } else if (netif_is_lag_master(info->upper_dev)) {
606 + if (info->linking) {
607 + err = dsa_port_lag_join(dp, info->upper_dev,
608 + info->upper_info);
609 + if (err == -EOPNOTSUPP) {
610 + NL_SET_ERR_MSG_MOD(info->info.extack,
611 + "Offloading not supported");
612 + err = 0;
613 + }
614 + err = notifier_from_errno(err);
615 + } else {
616 + dsa_port_lag_leave(dp, info->upper_dev);
617 + err = NOTIFY_OK;
618 + }
619 + }
620 +
621 + return err;
622 +}
623 +
624 +static int
625 +dsa_slave_lag_changeupper(struct net_device *dev,
626 + struct netdev_notifier_changeupper_info *info)
627 +{
628 + struct net_device *lower;
629 + struct list_head *iter;
630 + int err = NOTIFY_DONE;
631 + struct dsa_port *dp;
632 +
633 + netdev_for_each_lower_dev(dev, lower, iter) {
634 + if (!dsa_slave_dev_check(lower))
635 + continue;
636 +
637 + dp = dsa_slave_to_port(lower);
638 + if (!dp->lag_dev)
639 + /* Software LAG */
640 + continue;
641 +
642 + err = dsa_slave_changeupper(lower, info);
643 + if (notifier_to_errno(err))
644 + break;
645 }
646
647 return err;
648 @@ -2078,10 +2122,26 @@ static int dsa_slave_netdevice_event(str
649 break;
650 }
651 case NETDEV_CHANGEUPPER:
652 + if (dsa_slave_dev_check(dev))
653 + return dsa_slave_changeupper(dev, ptr);
654 +
655 + if (netif_is_lag_master(dev))
656 + return dsa_slave_lag_changeupper(dev, ptr);
657 +
658 + break;
659 + case NETDEV_CHANGELOWERSTATE: {
660 + struct netdev_notifier_changelowerstate_info *info = ptr;
661 + struct dsa_port *dp;
662 + int err;
663 +
664 if (!dsa_slave_dev_check(dev))
665 - return NOTIFY_DONE;
666 + break;
667
668 - return dsa_slave_changeupper(dev, ptr);
669 + dp = dsa_slave_to_port(dev);
670 +
671 + err = dsa_port_lag_change(dp, info->lower_state_info);
672 + return notifier_from_errno(err);
673 + }
674 }
675
676 return NOTIFY_DONE;
677 @@ -2229,6 +2289,15 @@ static int dsa_slave_switchdev_event(str
678 if (!fdb_info->added_by_user &&
679 !dp->ds->assisted_learning_on_cpu_port)
680 return NOTIFY_DONE;
681 +
682 + /* When the bridge learns an address on an offloaded
683 + * LAG we don't want to send traffic to the CPU, the
684 + * other ports bridged with the LAG should be able to
685 + * autonomously forward towards it.
686 + */
687 + if (dsa_tree_offloads_netdev(dp->ds->dst, dev))
688 + return NOTIFY_DONE;
689 +
690 }
691
692 if (!dp->ds->ops->port_fdb_add || !dp->ds->ops->port_fdb_del)
693 --- a/net/dsa/switch.c
694 +++ b/net/dsa/switch.c
695 @@ -193,6 +193,47 @@ static int dsa_switch_fdb_del(struct dsa
696 return ds->ops->port_fdb_del(ds, port, info->addr, info->vid);
697 }
698
699 +static int dsa_switch_lag_change(struct dsa_switch *ds,
700 + struct dsa_notifier_lag_info *info)
701 +{
702 + if (ds->index == info->sw_index && ds->ops->port_lag_change)
703 + return ds->ops->port_lag_change(ds, info->port);
704 +
705 + if (ds->index != info->sw_index && ds->ops->crosschip_lag_change)
706 + return ds->ops->crosschip_lag_change(ds, info->sw_index,
707 + info->port);
708 +
709 + return 0;
710 +}
711 +
712 +static int dsa_switch_lag_join(struct dsa_switch *ds,
713 + struct dsa_notifier_lag_info *info)
714 +{
715 + if (ds->index == info->sw_index && ds->ops->port_lag_join)
716 + return ds->ops->port_lag_join(ds, info->port, info->lag,
717 + info->info);
718 +
719 + if (ds->index != info->sw_index && ds->ops->crosschip_lag_join)
720 + return ds->ops->crosschip_lag_join(ds, info->sw_index,
721 + info->port, info->lag,
722 + info->info);
723 +
724 + return -EOPNOTSUPP;
725 +}
726 +
727 +static int dsa_switch_lag_leave(struct dsa_switch *ds,
728 + struct dsa_notifier_lag_info *info)
729 +{
730 + if (ds->index == info->sw_index && ds->ops->port_lag_leave)
731 + return ds->ops->port_lag_leave(ds, info->port, info->lag);
732 +
733 + if (ds->index != info->sw_index && ds->ops->crosschip_lag_leave)
734 + return ds->ops->crosschip_lag_leave(ds, info->sw_index,
735 + info->port, info->lag);
736 +
737 + return -EOPNOTSUPP;
738 +}
739 +
740 static bool dsa_switch_mdb_match(struct dsa_switch *ds, int port,
741 struct dsa_notifier_mdb_info *info)
742 {
743 @@ -340,6 +381,15 @@ static int dsa_switch_event(struct notif
744 case DSA_NOTIFIER_FDB_DEL:
745 err = dsa_switch_fdb_del(ds, info);
746 break;
747 + case DSA_NOTIFIER_LAG_CHANGE:
748 + err = dsa_switch_lag_change(ds, info);
749 + break;
750 + case DSA_NOTIFIER_LAG_JOIN:
751 + err = dsa_switch_lag_join(ds, info);
752 + break;
753 + case DSA_NOTIFIER_LAG_LEAVE:
754 + err = dsa_switch_lag_leave(ds, info);
755 + break;
756 case DSA_NOTIFIER_MDB_ADD:
757 err = dsa_switch_mdb_add(ds, info);
758 break;
759 --- a/net/dsa/tag_dsa.c
760 +++ b/net/dsa/tag_dsa.c
761 @@ -82,7 +82,19 @@ static struct sk_buff *dsa_rcv(struct sk
762 source_device = dsa_header[0] & 0x1f;
763 source_port = (dsa_header[1] >> 3) & 0x1f;
764
765 - skb->dev = dsa_master_find_slave(dev, source_device, source_port);
766 + if (trunk) {
767 + struct dsa_port *cpu_dp = dev->dsa_ptr;
768 +
769 + /* The exact source port is not available in the tag,
770 + * so we inject the frame directly on the upper
771 + * team/bond.
772 + */
773 + skb->dev = dsa_lag_dev(cpu_dp->dst, source_port);
774 + } else {
775 + skb->dev = dsa_master_find_slave(dev, source_device,
776 + source_port);
777 + }
778 +
779 if (!skb->dev)
780 return NULL;
781