kernel: bump 5.10 to 5.10.106
[openwrt/staging/dedeckeh.git] / target / linux / realtek / patches-5.10 / 709-lag-offloading.patch
1 --- a/drivers/net/bonding/bond_main.c
2 +++ b/drivers/net/bonding/bond_main.c
3 @@ -2046,6 +2046,8 @@ int bond_enslave(struct net_device *bond
4 goto err_unregister;
5 }
6
7 + bond_lower_state_changed(new_slave);
8 +
9 res = bond_sysfs_slave_add(new_slave);
10 if (res) {
11 slave_dbg(bond_dev, slave_dev, "Error %d calling bond_sysfs_slave_add\n", res);
12 --- a/include/net/dsa.h
13 +++ b/include/net/dsa.h
14 @@ -149,8 +149,41 @@ struct dsa_switch_tree {
15
16 /* List of DSA links composing the routing table */
17 struct list_head rtable;
18 +
19 + /* Maps offloaded LAG netdevs to a zero-based linear ID for
20 + * drivers that need it.
21 + */
22 + struct net_device **lags;
23 + unsigned int lags_len;
24 };
25
26 +#define dsa_lags_foreach_id(_id, _dst) \
27 + for ((_id) = 0; (_id) < (_dst)->lags_len; (_id)++) \
28 + if ((_dst)->lags[(_id)])
29 +
30 +#define dsa_lag_foreach_port(_dp, _dst, _lag) \
31 + list_for_each_entry((_dp), &(_dst)->ports, list) \
32 + if ((_dp)->lag_dev == (_lag))
33 +
34 +static inline struct net_device *dsa_lag_dev(struct dsa_switch_tree *dst,
35 + unsigned int id)
36 +{
37 + return dst->lags[id];
38 +}
39 +
40 +static inline int dsa_lag_id(struct dsa_switch_tree *dst,
41 + struct net_device *lag)
42 +{
43 + unsigned int id;
44 +
45 + dsa_lags_foreach_id(id, dst) {
46 + if (dsa_lag_dev(dst, id) == lag)
47 + return id;
48 + }
49 +
50 + return -ENODEV;
51 +}
52 +
53 /* TC matchall action types */
54 enum dsa_port_mall_action_type {
55 DSA_PORT_MALL_MIRROR,
56 @@ -220,6 +253,8 @@ struct dsa_port {
57 bool devlink_port_setup;
58 struct phylink *pl;
59 struct phylink_config pl_config;
60 + struct net_device *lag_dev;
61 + bool lag_tx_enabled;
62
63 struct list_head list;
64
65 @@ -340,6 +375,14 @@ struct dsa_switch {
66 */
67 bool mtu_enforcement_ingress;
68
69 + /* Drivers that benefit from having an ID associated with each
70 + * offloaded LAG should set this to the maximum number of
71 + * supported IDs. DSA will then maintain a mapping of _at
72 + * least_ these many IDs, accessible to drivers via
73 + * dsa_lag_id().
74 + */
75 + unsigned int num_lag_ids;
76 +
77 size_t num_ports;
78 };
79
80 @@ -432,6 +475,18 @@ static inline bool dsa_port_is_vlan_filt
81 return dp->vlan_filtering;
82 }
83
84 +static inline
85 +struct net_device *dsa_port_to_bridge_port(const struct dsa_port *dp)
86 +{
87 + if (!dp->bridge_dev)
88 + return NULL;
89 +
90 + if (dp->lag_dev)
91 + return dp->lag_dev;
92 +
93 + return dp->slave;
94 +}
95 +
96 typedef int dsa_fdb_dump_cb_t(const unsigned char *addr, u16 vid,
97 bool is_static, void *data);
98 struct dsa_switch_ops {
99 @@ -629,6 +684,13 @@ struct dsa_switch_ops {
100 void (*crosschip_bridge_leave)(struct dsa_switch *ds, int tree_index,
101 int sw_index, int port,
102 struct net_device *br);
103 + int (*crosschip_lag_change)(struct dsa_switch *ds, int sw_index,
104 + int port);
105 + int (*crosschip_lag_join)(struct dsa_switch *ds, int sw_index,
106 + int port, struct net_device *lag,
107 + struct netdev_lag_upper_info *info);
108 + int (*crosschip_lag_leave)(struct dsa_switch *ds, int sw_index,
109 + int port, struct net_device *lag);
110
111 /*
112 * PTP functionality
113 @@ -660,6 +722,16 @@ struct dsa_switch_ops {
114 int (*port_change_mtu)(struct dsa_switch *ds, int port,
115 int new_mtu);
116 int (*port_max_mtu)(struct dsa_switch *ds, int port);
117 +
118 + /*
119 + * LAG integration
120 + */
121 + int (*port_lag_change)(struct dsa_switch *ds, int port);
122 + int (*port_lag_join)(struct dsa_switch *ds, int port,
123 + struct net_device *lag,
124 + struct netdev_lag_upper_info *info);
125 + int (*port_lag_leave)(struct dsa_switch *ds, int port,
126 + struct net_device *lag);
127 };
128
129 #define DSA_DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes) \
130 --- a/net/dsa/dsa.c
131 +++ b/net/dsa/dsa.c
132 @@ -220,11 +220,21 @@ static int dsa_switch_rcv(struct sk_buff
133 }
134
135 skb = nskb;
136 - p = netdev_priv(skb->dev);
137 skb_push(skb, ETH_HLEN);
138 skb->pkt_type = PACKET_HOST;
139 skb->protocol = eth_type_trans(skb, skb->dev);
140
141 + if (unlikely(!dsa_slave_dev_check(skb->dev))) {
142 + /* Packet is to be injected directly on an upper
143 + * device, e.g. a team/bond, so skip all DSA-port
144 + * specific actions.
145 + */
146 + netif_rx(skb);
147 + return 0;
148 + }
149 +
150 + p = netdev_priv(skb->dev);
151 +
152 if (unlikely(cpu_dp->ds->untag_bridge_pvid)) {
153 nskb = dsa_untag_bridge_pvid(skb);
154 if (!nskb) {
155 --- a/net/dsa/dsa2.c
156 +++ b/net/dsa/dsa2.c
157 @@ -21,6 +21,65 @@
158 static DEFINE_MUTEX(dsa2_mutex);
159 LIST_HEAD(dsa_tree_list);
160
161 +/**
162 + * dsa_lag_map() - Map LAG netdev to a linear LAG ID
163 + * @dst: Tree in which to record the mapping.
164 + * @lag: Netdev that is to be mapped to an ID.
165 + *
166 + * dsa_lag_id/dsa_lag_dev can then be used to translate between the
167 + * two spaces. The size of the mapping space is determined by the
168 + * driver by setting ds->num_lag_ids. It is perfectly legal to leave
169 + * it unset if it is not needed, in which case these functions become
170 + * no-ops.
171 + */
172 +void dsa_lag_map(struct dsa_switch_tree *dst, struct net_device *lag)
173 +{
174 + unsigned int id;
175 +
176 + if (dsa_lag_id(dst, lag) >= 0)
177 + /* Already mapped */
178 + return;
179 +
180 + for (id = 0; id < dst->lags_len; id++) {
181 + if (!dsa_lag_dev(dst, id)) {
182 + dst->lags[id] = lag;
183 + return;
184 + }
185 + }
186 +
187 + /* No IDs left, which is OK. Some drivers do not need it. The
188 + * ones that do, e.g. mv88e6xxx, will discover that dsa_lag_id
189 + * returns an error for this device when joining the LAG. The
190 + * driver can then return -EOPNOTSUPP back to DSA, which will
191 + * fall back to a software LAG.
192 + */
193 +}
194 +
195 +/**
196 + * dsa_lag_unmap() - Remove a LAG ID mapping
197 + * @dst: Tree in which the mapping is recorded.
198 + * @lag: Netdev that was mapped.
199 + *
200 + * As there may be multiple users of the mapping, it is only removed
201 + * if there are no other references to it.
202 + */
203 +void dsa_lag_unmap(struct dsa_switch_tree *dst, struct net_device *lag)
204 +{
205 + struct dsa_port *dp;
206 + unsigned int id;
207 +
208 + dsa_lag_foreach_port(dp, dst, lag)
209 + /* There are remaining users of this mapping */
210 + return;
211 +
212 + dsa_lags_foreach_id(id, dst) {
213 + if (dsa_lag_dev(dst, id) == lag) {
214 + dst->lags[id] = NULL;
215 + break;
216 + }
217 + }
218 +}
219 +
220 struct dsa_switch *dsa_switch_find(int tree_index, int sw_index)
221 {
222 struct dsa_switch_tree *dst;
223 @@ -597,6 +656,32 @@ static void dsa_tree_teardown_master(str
224 dsa_master_teardown(dp->master);
225 }
226
227 +static int dsa_tree_setup_lags(struct dsa_switch_tree *dst)
228 +{
229 + unsigned int len = 0;
230 + struct dsa_port *dp;
231 +
232 + list_for_each_entry(dp, &dst->ports, list) {
233 + if (dp->ds->num_lag_ids > len)
234 + len = dp->ds->num_lag_ids;
235 + }
236 +
237 + if (!len)
238 + return 0;
239 +
240 + dst->lags = kcalloc(len, sizeof(*dst->lags), GFP_KERNEL);
241 + if (!dst->lags)
242 + return -ENOMEM;
243 +
244 + dst->lags_len = len;
245 + return 0;
246 +}
247 +
248 +static void dsa_tree_teardown_lags(struct dsa_switch_tree *dst)
249 +{
250 + kfree(dst->lags);
251 +}
252 +
253 static int dsa_tree_setup(struct dsa_switch_tree *dst)
254 {
255 bool complete;
256 @@ -624,12 +709,18 @@ static int dsa_tree_setup(struct dsa_swi
257 if (err)
258 goto teardown_switches;
259
260 + err = dsa_tree_setup_lags(dst);
261 + if (err)
262 + goto teardown_master;
263 +
264 dst->setup = true;
265
266 pr_info("DSA: tree %d setup\n", dst->index);
267
268 return 0;
269
270 +teardown_master:
271 + dsa_tree_teardown_master(dst);
272 teardown_switches:
273 dsa_tree_teardown_switches(dst);
274 teardown_default_cpu:
275 @@ -645,6 +736,8 @@ static void dsa_tree_teardown(struct dsa
276 if (!dst->setup)
277 return;
278
279 + dsa_tree_teardown_lags(dst);
280 +
281 dsa_tree_teardown_master(dst);
282
283 dsa_tree_teardown_switches(dst);
284 --- a/net/dsa/dsa_priv.h
285 +++ b/net/dsa/dsa_priv.h
286 @@ -20,6 +20,9 @@ enum {
287 DSA_NOTIFIER_BRIDGE_LEAVE,
288 DSA_NOTIFIER_FDB_ADD,
289 DSA_NOTIFIER_FDB_DEL,
290 + DSA_NOTIFIER_LAG_CHANGE,
291 + DSA_NOTIFIER_LAG_JOIN,
292 + DSA_NOTIFIER_LAG_LEAVE,
293 DSA_NOTIFIER_MDB_ADD,
294 DSA_NOTIFIER_MDB_DEL,
295 DSA_NOTIFIER_VLAN_ADD,
296 @@ -57,6 +60,15 @@ struct dsa_notifier_mdb_info {
297 int port;
298 };
299
300 +/* DSA_NOTIFIER_LAG_* */
301 +struct dsa_notifier_lag_info {
302 + struct net_device *lag;
303 + int sw_index;
304 + int port;
305 +
306 + struct netdev_lag_upper_info *info;
307 +};
308 +
309 /* DSA_NOTIFIER_VLAN_* */
310 struct dsa_notifier_vlan_info {
311 const struct switchdev_obj_port_vlan *vlan;
312 @@ -149,6 +161,11 @@ void dsa_port_disable_rt(struct dsa_port
313 void dsa_port_disable(struct dsa_port *dp);
314 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br);
315 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br);
316 +int dsa_port_lag_change(struct dsa_port *dp,
317 + struct netdev_lag_lower_state_info *linfo);
318 +int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag_dev,
319 + struct netdev_lag_upper_info *uinfo);
320 +void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag_dev);
321 int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
322 struct switchdev_trans *trans);
323 bool dsa_port_skip_vlan_configuration(struct dsa_port *dp);
324 @@ -181,6 +198,71 @@ int dsa_port_link_register_of(struct dsa
325 void dsa_port_link_unregister_of(struct dsa_port *dp);
326 extern const struct phylink_mac_ops dsa_port_phylink_mac_ops;
327
328 +static inline bool dsa_port_offloads_netdev(struct dsa_port *dp,
329 + struct net_device *dev)
330 +{
331 + /* Switchdev offloading can be configured on: */
332 +
333 + if (dev == dp->slave)
334 + /* DSA ports directly connected to a bridge, and event
335 + * was emitted for the ports themselves.
336 + */
337 + return true;
338 +
339 + if (dp->bridge_dev == dev)
340 + /* DSA ports connected to a bridge, and event was emitted
341 + * for the bridge.
342 + */
343 + return true;
344 +
345 + if (dp->lag_dev == dev)
346 + /* DSA ports connected to a bridge via a LAG */
347 + return true;
348 +
349 + return false;
350 +}
351 +
352 +static inline bool dsa_port_offloads_bridge_port(struct dsa_port *dp,
353 + struct net_device *dev)
354 +{
355 + return dsa_port_to_bridge_port(dp) == dev;
356 +}
357 +
358 +static inline bool dsa_port_offloads_bridge(struct dsa_port *dp,
359 + struct net_device *bridge_dev)
360 +{
361 + /* DSA ports connected to a bridge, and event was emitted
362 + * for the bridge.
363 + */
364 + return dp->bridge_dev == bridge_dev;
365 +}
366 +
367 +/* Returns true if any port of this tree offloads the given net_device */
368 +static inline bool dsa_tree_offloads_bridge_port(struct dsa_switch_tree *dst,
369 + struct net_device *dev)
370 +{
371 + struct dsa_port *dp;
372 +
373 + list_for_each_entry(dp, &dst->ports, list)
374 + if (dsa_port_offloads_bridge_port(dp, dev))
375 + return true;
376 +
377 + return false;
378 +}
379 +
380 +/* Returns true if any port of this tree offloads the given net_device */
381 +static inline bool dsa_tree_offloads_netdev(struct dsa_switch_tree *dst,
382 + struct net_device *dev)
383 +{
384 + struct dsa_port *dp;
385 +
386 + list_for_each_entry(dp, &dst->ports, list)
387 + if (dsa_port_offloads_netdev(dp, dev))
388 + return true;
389 +
390 + return false;
391 +}
392 +
393 /* slave.c */
394 extern const struct dsa_device_ops notag_netdev_ops;
395 void dsa_slave_mii_bus_init(struct dsa_switch *ds);
396 @@ -285,6 +367,9 @@ int dsa_switch_register_notifier(struct
397 void dsa_switch_unregister_notifier(struct dsa_switch *ds);
398
399 /* dsa2.c */
400 +void dsa_lag_map(struct dsa_switch_tree *dst, struct net_device *lag);
401 +void dsa_lag_unmap(struct dsa_switch_tree *dst, struct net_device *lag);
402 +
403 extern struct list_head dsa_tree_list;
404
405 #endif
406 --- a/net/dsa/port.c
407 +++ b/net/dsa/port.c
408 @@ -193,6 +193,99 @@ void dsa_port_bridge_leave(struct dsa_po
409 dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
410 }
411
412 +int dsa_port_lag_change(struct dsa_port *dp,
413 + struct netdev_lag_lower_state_info *linfo)
414 +{
415 + struct dsa_notifier_lag_info info = {
416 + .sw_index = dp->ds->index,
417 + .port = dp->index,
418 + };
419 + bool tx_enabled;
420 +
421 + if (!dp->lag_dev)
422 + return 0;
423 +
424 + /* On statically configured aggregates (e.g. loadbalance
425 + * without LACP) ports will always be tx_enabled, even if the
426 + * link is down. Thus we require both link_up and tx_enabled
427 + * in order to include it in the tx set.
428 + */
429 + tx_enabled = linfo->link_up && linfo->tx_enabled;
430 +
431 + if (tx_enabled == dp->lag_tx_enabled)
432 + return 0;
433 +
434 + dp->lag_tx_enabled = tx_enabled;
435 +
436 + return dsa_port_notify(dp, DSA_NOTIFIER_LAG_CHANGE, &info);
437 +}
438 +
439 +int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag,
440 + struct netdev_lag_upper_info *uinfo)
441 +{
442 + struct dsa_notifier_lag_info info = {
443 + .sw_index = dp->ds->index,
444 + .port = dp->index,
445 + .lag = lag,
446 + .info = uinfo,
447 + };
448 + struct net_device *bridge_dev;
449 + int err;
450 +
451 + dsa_lag_map(dp->ds->dst, lag);
452 + dp->lag_dev = lag;
453 +
454 + err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_JOIN, &info);
455 + if (err)
456 + goto err_lag_join;
457 +
458 + bridge_dev = netdev_master_upper_dev_get(lag);
459 + if (!bridge_dev || !netif_is_bridge_master(bridge_dev))
460 + return 0;
461 +
462 + err = dsa_port_bridge_join(dp, bridge_dev);
463 + if (err)
464 + goto err_bridge_join;
465 +
466 + return 0;
467 +
468 +err_bridge_join:
469 + dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
470 +err_lag_join:
471 + dp->lag_dev = NULL;
472 + dsa_lag_unmap(dp->ds->dst, lag);
473 + return err;
474 +}
475 +
476 +void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag)
477 +{
478 + struct dsa_notifier_lag_info info = {
479 + .sw_index = dp->ds->index,
480 + .port = dp->index,
481 + .lag = lag,
482 + };
483 + int err;
484 +
485 + if (!dp->lag_dev)
486 + return;
487 +
488 + /* Port might have been part of a LAG that in turn was
489 + * attached to a bridge.
490 + */
491 + if (dp->bridge_dev)
492 + dsa_port_bridge_leave(dp, dp->bridge_dev);
493 +
494 + dp->lag_tx_enabled = false;
495 + dp->lag_dev = NULL;
496 +
497 + err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
498 + if (err)
499 + pr_err("DSA: failed to notify DSA_NOTIFIER_LAG_LEAVE: %d\n",
500 + err);
501 +
502 + dsa_lag_unmap(dp->ds->dst, lag);
503 +}
504 +
505 /* Must be called under rcu_read_lock() */
506 static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp,
507 bool vlan_filtering)
508 --- a/net/dsa/slave.c
509 +++ b/net/dsa/slave.c
510 @@ -337,9 +337,6 @@ static int dsa_slave_vlan_add(struct net
511 struct switchdev_obj_port_vlan vlan;
512 int vid, err;
513
514 - if (obj->orig_dev != dev)
515 - return -EOPNOTSUPP;
516 -
517 if (dsa_port_skip_vlan_configuration(dp))
518 return 0;
519
520 @@ -394,11 +391,13 @@ static int dsa_slave_port_obj_add(struct
521
522 switch (obj->id) {
523 case SWITCHDEV_OBJ_ID_PORT_MDB:
524 - if (obj->orig_dev != dev)
525 + if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
526 return -EOPNOTSUPP;
527 err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj), trans);
528 break;
529 case SWITCHDEV_OBJ_ID_HOST_MDB:
530 + if (!dsa_port_offloads_bridge(dp, obj->orig_dev))
531 + return -EOPNOTSUPP;
532 /* DSA can directly translate this to a normal MDB add,
533 * but on the CPU port.
534 */
535 @@ -406,6 +405,9 @@ static int dsa_slave_port_obj_add(struct
536 trans);
537 break;
538 case SWITCHDEV_OBJ_ID_PORT_VLAN:
539 + if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
540 + return -EOPNOTSUPP;
541 +
542 err = dsa_slave_vlan_add(dev, obj, trans);
543 break;
544 default:
545 @@ -424,9 +426,6 @@ static int dsa_slave_vlan_del(struct net
546 struct switchdev_obj_port_vlan *vlan;
547 int vid, err;
548
549 - if (obj->orig_dev != dev)
550 - return -EOPNOTSUPP;
551 -
552 if (dsa_port_skip_vlan_configuration(dp))
553 return 0;
554
555 @@ -453,17 +452,22 @@ static int dsa_slave_port_obj_del(struct
556
557 switch (obj->id) {
558 case SWITCHDEV_OBJ_ID_PORT_MDB:
559 - if (obj->orig_dev != dev)
560 + if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
561 return -EOPNOTSUPP;
562 err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
563 break;
564 case SWITCHDEV_OBJ_ID_HOST_MDB:
565 + if (!dsa_port_offloads_bridge(dp, obj->orig_dev))
566 + return -EOPNOTSUPP;
567 /* DSA can directly translate this to a normal MDB add,
568 * but on the CPU port.
569 */
570 err = dsa_port_mdb_del(dp->cpu_dp, SWITCHDEV_OBJ_PORT_MDB(obj));
571 break;
572 case SWITCHDEV_OBJ_ID_PORT_VLAN:
573 + if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
574 + return -EOPNOTSUPP;
575 +
576 err = dsa_slave_vlan_del(dev, obj);
577 break;
578 default:
579 @@ -1993,6 +1997,46 @@ static int dsa_slave_changeupper(struct
580 dsa_port_bridge_leave(dp, info->upper_dev);
581 err = NOTIFY_OK;
582 }
583 + } else if (netif_is_lag_master(info->upper_dev)) {
584 + if (info->linking) {
585 + err = dsa_port_lag_join(dp, info->upper_dev,
586 + info->upper_info);
587 + if (err == -EOPNOTSUPP) {
588 + NL_SET_ERR_MSG_MOD(info->info.extack,
589 + "Offloading not supported");
590 + err = 0;
591 + }
592 + err = notifier_from_errno(err);
593 + } else {
594 + dsa_port_lag_leave(dp, info->upper_dev);
595 + err = NOTIFY_OK;
596 + }
597 + }
598 +
599 + return err;
600 +}
601 +
602 +static int
603 +dsa_slave_lag_changeupper(struct net_device *dev,
604 + struct netdev_notifier_changeupper_info *info)
605 +{
606 + struct net_device *lower;
607 + struct list_head *iter;
608 + int err = NOTIFY_DONE;
609 + struct dsa_port *dp;
610 +
611 + netdev_for_each_lower_dev(dev, lower, iter) {
612 + if (!dsa_slave_dev_check(lower))
613 + continue;
614 +
615 + dp = dsa_slave_to_port(lower);
616 + if (!dp->lag_dev)
617 + /* Software LAG */
618 + continue;
619 +
620 + err = dsa_slave_changeupper(lower, info);
621 + if (notifier_to_errno(err))
622 + break;
623 }
624
625 return err;
626 @@ -2078,10 +2122,26 @@ static int dsa_slave_netdevice_event(str
627 break;
628 }
629 case NETDEV_CHANGEUPPER:
630 + if (dsa_slave_dev_check(dev))
631 + return dsa_slave_changeupper(dev, ptr);
632 +
633 + if (netif_is_lag_master(dev))
634 + return dsa_slave_lag_changeupper(dev, ptr);
635 +
636 + break;
637 + case NETDEV_CHANGELOWERSTATE: {
638 + struct netdev_notifier_changelowerstate_info *info = ptr;
639 + struct dsa_port *dp;
640 + int err;
641 +
642 if (!dsa_slave_dev_check(dev))
643 - return NOTIFY_DONE;
644 + break;
645
646 - return dsa_slave_changeupper(dev, ptr);
647 + dp = dsa_slave_to_port(dev);
648 +
649 + err = dsa_port_lag_change(dp, info->lower_state_info);
650 + return notifier_from_errno(err);
651 + }
652 }
653
654 return NOTIFY_DONE;
655 @@ -2229,6 +2289,15 @@ static int dsa_slave_switchdev_event(str
656 if (!fdb_info->added_by_user &&
657 !dp->ds->assisted_learning_on_cpu_port)
658 return NOTIFY_DONE;
659 +
660 + /* When the bridge learns an address on an offloaded
661 + * LAG we don't want to send traffic to the CPU, the
662 + * other ports bridged with the LAG should be able to
663 + * autonomously forward towards it.
664 + */
665 + if (dsa_tree_offloads_netdev(dp->ds->dst, dev))
666 + return NOTIFY_DONE;
667 +
668 }
669
670 if (!dp->ds->ops->port_fdb_add || !dp->ds->ops->port_fdb_del)
671 --- a/net/dsa/switch.c
672 +++ b/net/dsa/switch.c
673 @@ -193,6 +193,47 @@ static int dsa_switch_fdb_del(struct dsa
674 return ds->ops->port_fdb_del(ds, port, info->addr, info->vid);
675 }
676
677 +static int dsa_switch_lag_change(struct dsa_switch *ds,
678 + struct dsa_notifier_lag_info *info)
679 +{
680 + if (ds->index == info->sw_index && ds->ops->port_lag_change)
681 + return ds->ops->port_lag_change(ds, info->port);
682 +
683 + if (ds->index != info->sw_index && ds->ops->crosschip_lag_change)
684 + return ds->ops->crosschip_lag_change(ds, info->sw_index,
685 + info->port);
686 +
687 + return 0;
688 +}
689 +
690 +static int dsa_switch_lag_join(struct dsa_switch *ds,
691 + struct dsa_notifier_lag_info *info)
692 +{
693 + if (ds->index == info->sw_index && ds->ops->port_lag_join)
694 + return ds->ops->port_lag_join(ds, info->port, info->lag,
695 + info->info);
696 +
697 + if (ds->index != info->sw_index && ds->ops->crosschip_lag_join)
698 + return ds->ops->crosschip_lag_join(ds, info->sw_index,
699 + info->port, info->lag,
700 + info->info);
701 +
702 + return -EOPNOTSUPP;
703 +}
704 +
705 +static int dsa_switch_lag_leave(struct dsa_switch *ds,
706 + struct dsa_notifier_lag_info *info)
707 +{
708 + if (ds->index == info->sw_index && ds->ops->port_lag_leave)
709 + return ds->ops->port_lag_leave(ds, info->port, info->lag);
710 +
711 + if (ds->index != info->sw_index && ds->ops->crosschip_lag_leave)
712 + return ds->ops->crosschip_lag_leave(ds, info->sw_index,
713 + info->port, info->lag);
714 +
715 + return -EOPNOTSUPP;
716 +}
717 +
718 static bool dsa_switch_mdb_match(struct dsa_switch *ds, int port,
719 struct dsa_notifier_mdb_info *info)
720 {
721 @@ -340,6 +381,15 @@ static int dsa_switch_event(struct notif
722 case DSA_NOTIFIER_FDB_DEL:
723 err = dsa_switch_fdb_del(ds, info);
724 break;
725 + case DSA_NOTIFIER_LAG_CHANGE:
726 + err = dsa_switch_lag_change(ds, info);
727 + break;
728 + case DSA_NOTIFIER_LAG_JOIN:
729 + err = dsa_switch_lag_join(ds, info);
730 + break;
731 + case DSA_NOTIFIER_LAG_LEAVE:
732 + err = dsa_switch_lag_leave(ds, info);
733 + break;
734 case DSA_NOTIFIER_MDB_ADD:
735 err = dsa_switch_mdb_add(ds, info);
736 break;
737 --- a/net/dsa/tag_dsa.c
738 +++ b/net/dsa/tag_dsa.c
739 @@ -82,7 +82,19 @@ static struct sk_buff *dsa_rcv(struct sk
740 source_device = dsa_header[0] & 0x1f;
741 source_port = (dsa_header[1] >> 3) & 0x1f;
742
743 - skb->dev = dsa_master_find_slave(dev, source_device, source_port);
744 + if (trunk) {
745 + struct dsa_port *cpu_dp = dev->dsa_ptr;
746 +
747 + /* The exact source port is not available in the tag,
748 + * so we inject the frame directly on the upper
749 + * team/bond.
750 + */
751 + skb->dev = dsa_lag_dev(cpu_dp->dst, source_port);
752 + } else {
753 + skb->dev = dsa_master_find_slave(dev, source_device,
754 + source_port);
755 + }
756 +
757 if (!skb->dev)
758 return NULL;
759