4b928f248be696f685ddb75fa6516e3669479d08
[openwrt/staging/stintel.git] / target / linux / realtek / files-5.15 / drivers / net / dsa / rtl83xx / common.c
1 // SPDX-License-Identifier: GPL-2.0-only
2
3 #include <linux/of_mdio.h>
4 #include <linux/of_platform.h>
5 #include <net/arp.h>
6 #include <net/nexthop.h>
7 #include <net/neighbour.h>
8 #include <net/netevent.h>
9 #include <linux/inetdevice.h>
10 #include <linux/rhashtable.h>
11 #include <linux/of_net.h>
12 #include <asm/mach-rtl838x/mach-rtl83xx.h>
13
14 #include "rtl83xx.h"
15
16 extern struct rtl83xx_soc_info soc_info;
17
18 extern const struct rtl838x_reg rtl838x_reg;
19 extern const struct rtl838x_reg rtl839x_reg;
20 extern const struct rtl838x_reg rtl930x_reg;
21 extern const struct rtl838x_reg rtl931x_reg;
22
23 extern const struct dsa_switch_ops rtl83xx_switch_ops;
24 extern const struct dsa_switch_ops rtl930x_switch_ops;
25
26 DEFINE_MUTEX(smi_lock);
27
28 int rtl83xx_port_get_stp_state(struct rtl838x_switch_priv *priv, int port)
29 {
30 u32 msti = 0;
31 u32 port_state[4];
32 int index, bit;
33 int pos = port;
34 int n = priv->port_width << 1;
35
36 /* Ports above or equal CPU port can never be configured */
37 if (port >= priv->cpu_port)
38 return -1;
39
40 mutex_lock(&priv->reg_mutex);
41
42 /* For the RTL839x and following, the bits are left-aligned in the 64/128 bit field */
43 if (priv->family_id == RTL8390_FAMILY_ID)
44 pos += 12;
45 if (priv->family_id == RTL9300_FAMILY_ID)
46 pos += 3;
47 if (priv->family_id == RTL9310_FAMILY_ID)
48 pos += 8;
49
50 index = n - (pos >> 4) - 1;
51 bit = (pos << 1) % 32;
52
53 priv->r->stp_get(priv, msti, port_state);
54
55 mutex_unlock(&priv->reg_mutex);
56
57 return (port_state[index] >> bit) & 3;
58 }
59
60 static struct table_reg rtl838x_tbl_regs[] = {
61 TBL_DESC(0x6900, 0x6908, 3, 15, 13, 1), /* RTL8380_TBL_L2 */
62 TBL_DESC(0x6914, 0x6918, 18, 14, 12, 1), /* RTL8380_TBL_0 */
63 TBL_DESC(0xA4C8, 0xA4CC, 6, 14, 12, 1), /* RTL8380_TBL_1 */
64
65 TBL_DESC(0x1180, 0x1184, 3, 16, 14, 0), /* RTL8390_TBL_L2 */
66 TBL_DESC(0x1190, 0x1194, 17, 15, 12, 0), /* RTL8390_TBL_0 */
67 TBL_DESC(0x6B80, 0x6B84, 4, 14, 12, 0), /* RTL8390_TBL_1 */
68 TBL_DESC(0x611C, 0x6120, 9, 8, 6, 0), /* RTL8390_TBL_2 */
69
70 TBL_DESC(0xB320, 0xB334, 3, 18, 16, 0), /* RTL9300_TBL_L2 */
71 TBL_DESC(0xB340, 0xB344, 19, 16, 12, 0), /* RTL9300_TBL_0 */
72 TBL_DESC(0xB3A0, 0xB3A4, 20, 16, 13, 0), /* RTL9300_TBL_1 */
73 TBL_DESC(0xCE04, 0xCE08, 6, 14, 12, 0), /* RTL9300_TBL_2 */
74 TBL_DESC(0xD600, 0xD604, 30, 7, 6, 0), /* RTL9300_TBL_HSB */
75 TBL_DESC(0x7880, 0x7884, 22, 9, 8, 0), /* RTL9300_TBL_HSA */
76
77 TBL_DESC(0x8500, 0x8508, 8, 19, 15, 0), /* RTL9310_TBL_0 */
78 TBL_DESC(0x40C0, 0x40C4, 22, 16, 14, 0), /* RTL9310_TBL_1 */
79 TBL_DESC(0x8528, 0x852C, 6, 18, 14, 0), /* RTL9310_TBL_2 */
80 TBL_DESC(0x0200, 0x0204, 9, 15, 12, 0), /* RTL9310_TBL_3 */
81 TBL_DESC(0x20dc, 0x20e0, 29, 7, 6, 0), /* RTL9310_TBL_4 */
82 TBL_DESC(0x7e1c, 0x7e20, 53, 8, 6, 0), /* RTL9310_TBL_5 */
83 };
84
85 void rtl_table_init(void)
86 {
87 for (int i = 0; i < RTL_TBL_END; i++)
88 mutex_init(&rtl838x_tbl_regs[i].lock);
89 }
90
91 /* Request access to table t in table access register r
92 * Returns a handle to a lock for that table
93 */
94 struct table_reg *rtl_table_get(rtl838x_tbl_reg_t r, int t)
95 {
96 if (r >= RTL_TBL_END)
97 return NULL;
98
99 if (t >= BIT(rtl838x_tbl_regs[r].c_bit-rtl838x_tbl_regs[r].t_bit))
100 return NULL;
101
102 mutex_lock(&rtl838x_tbl_regs[r].lock);
103 rtl838x_tbl_regs[r].tbl = t;
104
105 return &rtl838x_tbl_regs[r];
106 }
107
108 /* Release a table r, unlock the corresponding lock */
109 void rtl_table_release(struct table_reg *r)
110 {
111 if (!r)
112 return;
113
114 /* pr_info("Unlocking %08x\n", (u32)r); */
115 mutex_unlock(&r->lock);
116 /* pr_info("Unlock done\n"); */
117 }
118
119 static int rtl_table_exec(struct table_reg *r, bool is_write, int idx)
120 {
121 int ret = 0;
122 u32 cmd, val;
123
124 /* Read/write bit has inverted meaning on RTL838x */
125 if (r->rmode)
126 cmd = is_write ? 0 : BIT(r->c_bit);
127 else
128 cmd = is_write ? BIT(r->c_bit) : 0;
129
130 cmd |= BIT(r->c_bit + 1); /* Execute bit */
131 cmd |= r->tbl << r->t_bit; /* Table type */
132 cmd |= idx & (BIT(r->t_bit) - 1); /* Index */
133
134 sw_w32(cmd, r->addr);
135
136 ret = readx_poll_timeout(sw_r32, r->addr, val,
137 !(val & BIT(r->c_bit + 1)), 20, 10000);
138 if (ret)
139 pr_err("%s: timeout\n", __func__);
140
141 return ret;
142 }
143
144 /* Reads table index idx into the data registers of the table */
145 int rtl_table_read(struct table_reg *r, int idx)
146 {
147 return rtl_table_exec(r, false, idx);
148 }
149
150 /* Writes the content of the table data registers into the table at index idx */
151 int rtl_table_write(struct table_reg *r, int idx)
152 {
153 return rtl_table_exec(r, true, idx);
154 }
155
156 /* Returns the address of the ith data register of table register r
157 * the address is relative to the beginning of the Switch-IO block at 0xbb000000
158 */
159 inline u16 rtl_table_data(struct table_reg *r, int i)
160 {
161 if (i >= r->max_data)
162 i = r->max_data - 1;
163 return r->data + i * 4;
164 }
165
166 inline u32 rtl_table_data_r(struct table_reg *r, int i)
167 {
168 return sw_r32(rtl_table_data(r, i));
169 }
170
171 inline void rtl_table_data_w(struct table_reg *r, u32 v, int i)
172 {
173 sw_w32(v, rtl_table_data(r, i));
174 }
175
176 /* Port register accessor functions for the RTL838x and RTL930X SoCs */
177 void rtl838x_mask_port_reg(u64 clear, u64 set, int reg)
178 {
179 sw_w32_mask((u32)clear, (u32)set, reg);
180 }
181
182 void rtl838x_set_port_reg(u64 set, int reg)
183 {
184 sw_w32((u32)set, reg);
185 }
186
187 u64 rtl838x_get_port_reg(int reg)
188 {
189 return ((u64)sw_r32(reg));
190 }
191
192 /* Port register accessor functions for the RTL839x and RTL931X SoCs */
193 void rtl839x_mask_port_reg_be(u64 clear, u64 set, int reg)
194 {
195 sw_w32_mask((u32)(clear >> 32), (u32)(set >> 32), reg);
196 sw_w32_mask((u32)(clear & 0xffffffff), (u32)(set & 0xffffffff), reg + 4);
197 }
198
199 u64 rtl839x_get_port_reg_be(int reg)
200 {
201 u64 v = sw_r32(reg);
202
203 v <<= 32;
204 v |= sw_r32(reg + 4);
205
206 return v;
207 }
208
209 void rtl839x_set_port_reg_be(u64 set, int reg)
210 {
211 sw_w32(set >> 32, reg);
212 sw_w32(set & 0xffffffff, reg + 4);
213 }
214
215 void rtl839x_mask_port_reg_le(u64 clear, u64 set, int reg)
216 {
217 sw_w32_mask((u32)clear, (u32)set, reg);
218 sw_w32_mask((u32)(clear >> 32), (u32)(set >> 32), reg + 4);
219 }
220
221 void rtl839x_set_port_reg_le(u64 set, int reg)
222 {
223 sw_w32(set, reg);
224 sw_w32(set >> 32, reg + 4);
225 }
226
227 u64 rtl839x_get_port_reg_le(int reg)
228 {
229 u64 v = sw_r32(reg + 4);
230
231 v <<= 32;
232 v |= sw_r32(reg);
233
234 return v;
235 }
236
237 int read_phy(u32 port, u32 page, u32 reg, u32 *val)
238 {
239 switch (soc_info.family) {
240 case RTL8380_FAMILY_ID:
241 return rtl838x_read_phy(port, page, reg, val);
242 case RTL8390_FAMILY_ID:
243 return rtl839x_read_phy(port, page, reg, val);
244 case RTL9300_FAMILY_ID:
245 return rtl930x_read_phy(port, page, reg, val);
246 case RTL9310_FAMILY_ID:
247 return rtl931x_read_phy(port, page, reg, val);
248 }
249
250 return -1;
251 }
252
253 int write_phy(u32 port, u32 page, u32 reg, u32 val)
254 {
255 switch (soc_info.family) {
256 case RTL8380_FAMILY_ID:
257 return rtl838x_write_phy(port, page, reg, val);
258 case RTL8390_FAMILY_ID:
259 return rtl839x_write_phy(port, page, reg, val);
260 case RTL9300_FAMILY_ID:
261 return rtl930x_write_phy(port, page, reg, val);
262 case RTL9310_FAMILY_ID:
263 return rtl931x_write_phy(port, page, reg, val);
264 }
265
266 return -1;
267 }
268
269 static int __init rtl83xx_mdio_probe(struct rtl838x_switch_priv *priv)
270 {
271 struct device *dev = priv->dev;
272 struct device_node *dn, *phy_node, *mii_np = dev->of_node;
273 struct mii_bus *bus;
274 int ret;
275 u32 pn;
276
277 pr_debug("In %s\n", __func__);
278 mii_np = of_find_compatible_node(NULL, NULL, "realtek,rtl838x-mdio");
279 if (mii_np) {
280 pr_debug("Found compatible MDIO node!\n");
281 } else {
282 dev_err(priv->dev, "no %s child node found", "mdio-bus");
283 return -ENODEV;
284 }
285
286 priv->mii_bus = of_mdio_find_bus(mii_np);
287 if (!priv->mii_bus) {
288 pr_debug("Deferring probe of mdio bus\n");
289 return -EPROBE_DEFER;
290 }
291 if (!of_device_is_available(mii_np))
292 ret = -ENODEV;
293
294 bus = devm_mdiobus_alloc(priv->ds->dev);
295 if (!bus)
296 return -ENOMEM;
297
298 bus->name = "rtl838x slave mii";
299
300 /* Since the NIC driver is loaded first, we can use the mdio rw functions
301 * assigned there.
302 */
303 bus->read = priv->mii_bus->read;
304 bus->write = priv->mii_bus->write;
305 bus->read_paged = priv->mii_bus->read_paged;
306 bus->write_paged = priv->mii_bus->write_paged;
307 snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", bus->name, dev->id);
308
309 bus->parent = dev;
310 priv->ds->slave_mii_bus = bus;
311 priv->ds->slave_mii_bus->priv = priv->mii_bus->priv;
312 priv->ds->slave_mii_bus->access_capabilities = priv->mii_bus->access_capabilities;
313
314 ret = mdiobus_register(priv->ds->slave_mii_bus);
315 if (ret && mii_np) {
316 of_node_put(dn);
317 return ret;
318 }
319
320 dn = of_find_compatible_node(NULL, NULL, "realtek,rtl83xx-switch");
321 if (!dn) {
322 dev_err(priv->dev, "No RTL switch node in DTS\n");
323 return -ENODEV;
324 }
325
326 for_each_node_by_name(dn, "port") {
327 phy_interface_t interface;
328 u32 led_set;
329
330 if (!of_device_is_available(dn))
331 continue;
332
333 if (of_property_read_u32(dn, "reg", &pn))
334 continue;
335
336 phy_node = of_parse_phandle(dn, "phy-handle", 0);
337 if (!phy_node) {
338 if (pn != priv->cpu_port)
339 dev_err(priv->dev, "Port node %d misses phy-handle\n", pn);
340 continue;
341 }
342
343 if (of_property_read_u32(phy_node, "sds", &priv->ports[pn].sds_num))
344 priv->ports[pn].sds_num = -1;
345 pr_debug("%s port %d has SDS %d\n", __func__, pn, priv->ports[pn].sds_num);
346
347 if (of_get_phy_mode(dn, &interface))
348 interface = PHY_INTERFACE_MODE_NA;
349 if (interface == PHY_INTERFACE_MODE_HSGMII)
350 priv->ports[pn].is2G5 = true;
351 if (interface == PHY_INTERFACE_MODE_USXGMII)
352 priv->ports[pn].is2G5 = priv->ports[pn].is10G = true;
353 if (interface == PHY_INTERFACE_MODE_10GBASER)
354 priv->ports[pn].is10G = true;
355
356 if (of_property_read_u32(dn, "led-set", &led_set))
357 led_set = 0;
358 priv->ports[pn].led_set = led_set;
359
360 /* Check for the integrated SerDes of the RTL8380M first */
361 if (of_property_read_bool(phy_node, "phy-is-integrated")
362 && priv->id == 0x8380 && pn >= 24) {
363 pr_debug("----> FÓUND A SERDES\n");
364 priv->ports[pn].phy = PHY_RTL838X_SDS;
365 continue;
366 }
367
368 if (priv->id >= 0x9300) {
369 priv->ports[pn].phy_is_integrated = false;
370 if (of_property_read_bool(phy_node, "phy-is-integrated")) {
371 priv->ports[pn].phy_is_integrated = true;
372 priv->ports[pn].phy = PHY_RTL930X_SDS;
373 }
374 } else {
375 if (of_property_read_bool(phy_node, "phy-is-integrated") &&
376 !of_property_read_bool(phy_node, "sfp")) {
377 priv->ports[pn].phy = PHY_RTL8218B_INT;
378 continue;
379 }
380 }
381
382 if (!of_property_read_bool(phy_node, "phy-is-integrated") &&
383 of_property_read_bool(phy_node, "sfp")) {
384 priv->ports[pn].phy = PHY_RTL8214FC;
385 continue;
386 }
387
388 if (!of_property_read_bool(phy_node, "phy-is-integrated") &&
389 !of_property_read_bool(phy_node, "sfp")) {
390 priv->ports[pn].phy = PHY_RTL8218B_EXT;
391 continue;
392 }
393 }
394
395 /* Disable MAC polling the PHY so that we can start configuration */
396 priv->r->set_port_reg_le(0ULL, priv->r->smi_poll_ctrl);
397
398 /* Enable PHY control via SoC */
399 if (priv->family_id == RTL8380_FAMILY_ID) {
400 /* Enable SerDes NWAY and PHY control via SoC */
401 sw_w32_mask(BIT(7), BIT(15), RTL838X_SMI_GLB_CTRL);
402 } else if (priv->family_id == RTL8390_FAMILY_ID) {
403 /* Disable PHY polling via SoC */
404 sw_w32_mask(BIT(7), 0, RTL839X_SMI_GLB_CTRL);
405 }
406
407 /* Power on fibre ports and reset them if necessary */
408 if (priv->ports[24].phy == PHY_RTL838X_SDS) {
409 pr_debug("Powering on fibre ports & reset\n");
410 rtl8380_sds_power(24, 1);
411 rtl8380_sds_power(26, 1);
412 }
413
414 pr_debug("%s done\n", __func__);
415
416 return 0;
417 }
418
419 static int __init rtl83xx_get_l2aging(struct rtl838x_switch_priv *priv)
420 {
421 int t = sw_r32(priv->r->l2_ctrl_1);
422
423 t &= priv->family_id == RTL8380_FAMILY_ID ? 0x7fffff : 0x1FFFFF;
424
425 if (priv->family_id == RTL8380_FAMILY_ID)
426 t = t * 128 / 625; /* Aging time in seconds. 0: L2 aging disabled */
427 else
428 t = (t * 3) / 5;
429
430 pr_debug("L2 AGING time: %d sec\n", t);
431 pr_debug("Dynamic aging for ports: %x\n", sw_r32(priv->r->l2_port_aging_out));
432
433 return t;
434 }
435
436 /* Caller must hold priv->reg_mutex */
437 int rtl83xx_lag_add(struct dsa_switch *ds, int group, int port, struct netdev_lag_upper_info *info)
438 {
439 struct rtl838x_switch_priv *priv = ds->priv;
440 int i;
441 u32 algomsk = 0;
442 u32 algoidx = 0;
443
444 if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
445 pr_err("%s: Only mode LACP 802.3ad (4) allowed.\n", __func__);
446 return -EINVAL;
447 }
448
449 if (group >= priv->n_lags) {
450 pr_err("%s: LAG %d invalid.\n", __func__, group);
451 return -EINVAL;
452 }
453
454 if (port >= priv->cpu_port) {
455 pr_err("%s: Port %d invalid.\n", __func__, port);
456 return -EINVAL;
457 }
458
459 for (i = 0; i < priv->n_lags; i++) {
460 if (priv->lags_port_members[i] & BIT_ULL(port))
461 break;
462 }
463 if (i != priv->n_lags) {
464 pr_err("%s: Port %d already member of LAG %d.\n", __func__, port, i);
465 return -ENOSPC;
466 }
467
468 switch(info->hash_type) {
469 case NETDEV_LAG_HASH_L2:
470 algomsk |= TRUNK_DISTRIBUTION_ALGO_DMAC_BIT;
471 algomsk |= TRUNK_DISTRIBUTION_ALGO_SMAC_BIT;
472 break;
473 case NETDEV_LAG_HASH_L23:
474 algomsk |= TRUNK_DISTRIBUTION_ALGO_DMAC_BIT;
475 algomsk |= TRUNK_DISTRIBUTION_ALGO_SMAC_BIT;
476 algomsk |= TRUNK_DISTRIBUTION_ALGO_SIP_BIT; /* source ip */
477 algomsk |= TRUNK_DISTRIBUTION_ALGO_DIP_BIT; /* dest ip */
478 algoidx = 1;
479 break;
480 case NETDEV_LAG_HASH_L34:
481 algomsk |= TRUNK_DISTRIBUTION_ALGO_SRC_L4PORT_BIT; /* sport */
482 algomsk |= TRUNK_DISTRIBUTION_ALGO_DST_L4PORT_BIT; /* dport */
483 algomsk |= TRUNK_DISTRIBUTION_ALGO_SIP_BIT; /* source ip */
484 algomsk |= TRUNK_DISTRIBUTION_ALGO_DIP_BIT; /* dest ip */
485 algoidx = 2;
486 break;
487 default:
488 algomsk |= 0x7f;
489 }
490 priv->r->set_distribution_algorithm(group, algoidx, algomsk);
491 priv->r->mask_port_reg_be(0, BIT_ULL(port), priv->r->trk_mbr_ctr(group));
492 priv->lags_port_members[group] |= BIT_ULL(port);
493
494 pr_info("%s: Added port %d to LAG %d. Members now %016llx.\n",
495 __func__, port, group, priv->lags_port_members[group]);
496
497 return 0;
498 }
499
500 /* Caller must hold priv->reg_mutex */
501 int rtl83xx_lag_del(struct dsa_switch *ds, int group, int port)
502 {
503 struct rtl838x_switch_priv *priv = ds->priv;
504
505 if (group >= priv->n_lags) {
506 pr_err("%s: LAG %d invalid.\n", __func__, group);
507 return -EINVAL;
508 }
509
510 if (port >= priv->cpu_port) {
511 pr_err("%s: Port %d invalid.\n", __func__, port);
512 return -EINVAL;
513 }
514
515 if (!(priv->lags_port_members[group] & BIT_ULL(port))) {
516 pr_err("%s: Port %d not member of LAG %d.\n", __func__, port, group);
517 return -ENOSPC;
518 }
519
520 /* 0x7f algo mask all */
521 priv->r->mask_port_reg_be(BIT_ULL(port), 0, priv->r->trk_mbr_ctr(group));
522 priv->lags_port_members[group] &= ~BIT_ULL(port);
523
524 pr_info("%s: Removed port %d from LAG %d. Members now %016llx.\n",
525 __func__, port, group, priv->lags_port_members[group]);
526
527 return 0;
528 }
529
530 // Currently Unused
531 // /* Allocate a 64 bit octet counter located in the LOG HW table */
532 // static int rtl83xx_octet_cntr_alloc(struct rtl838x_switch_priv *priv)
533 // {
534 // int idx;
535
536 // mutex_lock(&priv->reg_mutex);
537
538 // idx = find_first_zero_bit(priv->octet_cntr_use_bm, MAX_COUNTERS);
539 // if (idx >= priv->n_counters) {
540 // mutex_unlock(&priv->reg_mutex);
541 // return -1;
542 // }
543
544 // set_bit(idx, priv->octet_cntr_use_bm);
545 // mutex_unlock(&priv->reg_mutex);
546
547 // return idx;
548 // }
549
550 /* Allocate a 32-bit packet counter
551 * 2 32-bit packet counters share the location of a 64-bit octet counter
552 * Initially there are no free packet counters and 2 new ones need to be freed
553 * by allocating the corresponding octet counter
554 */
555 int rtl83xx_packet_cntr_alloc(struct rtl838x_switch_priv *priv)
556 {
557 int idx, j;
558
559 mutex_lock(&priv->reg_mutex);
560
561 /* Because initially no packet counters are free, the logic is reversed:
562 * a 0-bit means the counter is already allocated (for octets)
563 */
564 idx = find_first_bit(priv->packet_cntr_use_bm, MAX_COUNTERS * 2);
565 if (idx >= priv->n_counters * 2) {
566 j = find_first_zero_bit(priv->octet_cntr_use_bm, MAX_COUNTERS);
567 if (j >= priv->n_counters) {
568 mutex_unlock(&priv->reg_mutex);
569 return -1;
570 }
571 set_bit(j, priv->octet_cntr_use_bm);
572 idx = j * 2;
573 set_bit(j * 2 + 1, priv->packet_cntr_use_bm);
574
575 } else {
576 clear_bit(idx, priv->packet_cntr_use_bm);
577 }
578
579 mutex_unlock(&priv->reg_mutex);
580
581 return idx;
582 }
583
584 /* Add an L2 nexthop entry for the L3 routing system / PIE forwarding in the SoC
585 * Use VID and MAC in rtl838x_l2_entry to identify either a free slot in the L2 hash table
586 * or mark an existing entry as a nexthop by setting it's nexthop bit
587 * Called from the L3 layer
588 * The index in the L2 hash table is filled into nh->l2_id;
589 */
590 int rtl83xx_l2_nexthop_add(struct rtl838x_switch_priv *priv, struct rtl83xx_nexthop *nh)
591 {
592 struct rtl838x_l2_entry e;
593 u64 seed = priv->r->l2_hash_seed(nh->mac, nh->rvid);
594 u32 key = priv->r->l2_hash_key(priv, seed);
595 int idx = -1;
596 u64 entry;
597
598 pr_debug("%s searching for %08llx vid %d with key %d, seed: %016llx\n",
599 __func__, nh->mac, nh->rvid, key, seed);
600
601 e.type = L2_UNICAST;
602 u64_to_ether_addr(nh->mac, &e.mac[0]);
603 e.port = nh->port;
604
605 /* Loop over all entries in the hash-bucket and over the second block on 93xx SoCs */
606 for (int i = 0; i < priv->l2_bucket_size; i++) {
607 entry = priv->r->read_l2_entry_using_hash(key, i, &e);
608
609 if (!e.valid || ((entry & 0x0fffffffffffffffULL) == seed)) {
610 idx = i > 3 ? ((key >> 14) & 0xffff) | i >> 1
611 : ((key << 2) | i) & 0xffff;
612 break;
613 }
614 }
615
616 if (idx < 0) {
617 pr_err("%s: No more L2 forwarding entries available\n", __func__);
618 return -1;
619 }
620
621 /* Found an existing (e->valid is true) or empty entry, make it a nexthop entry */
622 nh->l2_id = idx;
623 if (e.valid) {
624 nh->port = e.port;
625 nh->vid = e.vid; /* Save VID */
626 nh->rvid = e.rvid;
627 nh->dev_id = e.stack_dev;
628 /* If the entry is already a valid next hop entry, don't change it */
629 if (e.next_hop)
630 return 0;
631 } else {
632 e.valid = true;
633 e.is_static = true;
634 e.rvid = nh->rvid;
635 e.is_ip_mc = false;
636 e.is_ipv6_mc = false;
637 e.block_da = false;
638 e.block_sa = false;
639 e.suspended = false;
640 e.age = 0; /* With port-ignore */
641 e.port = priv->port_ignore;
642 u64_to_ether_addr(nh->mac, &e.mac[0]);
643 }
644 e.next_hop = true;
645 e.nh_route_id = nh->id; /* NH route ID takes place of VID */
646 e.nh_vlan_target = false;
647
648 priv->r->write_l2_entry_using_hash(idx >> 2, idx & 0x3, &e);
649
650 return 0;
651 }
652
653 /* Removes a Layer 2 next hop entry in the forwarding database
654 * If it was static, the entire entry is removed, otherwise the nexthop bit is cleared
655 * and we wait until the entry ages out
656 */
657 int rtl83xx_l2_nexthop_rm(struct rtl838x_switch_priv *priv, struct rtl83xx_nexthop *nh)
658 {
659 struct rtl838x_l2_entry e;
660 u32 key = nh->l2_id >> 2;
661 int i = nh->l2_id & 0x3;
662 u64 entry = entry = priv->r->read_l2_entry_using_hash(key, i, &e);
663
664 pr_debug("%s: id %d, key %d, index %d\n", __func__, nh->l2_id, key, i);
665 if (!e.valid) {
666 dev_err(priv->dev, "unknown nexthop, id %x\n", nh->l2_id);
667 return -1;
668 }
669
670 if (e.is_static)
671 e.valid = false;
672 e.next_hop = false;
673 e.vid = nh->vid; /* Restore VID */
674 e.rvid = nh->rvid;
675
676 priv->r->write_l2_entry_using_hash(key, i, &e);
677
678 return 0;
679 }
680
681 static int rtl83xx_handle_changeupper(struct rtl838x_switch_priv *priv,
682 struct net_device *ndev,
683 struct netdev_notifier_changeupper_info *info)
684 {
685 struct net_device *upper = info->upper_dev;
686 struct netdev_lag_upper_info *lag_upper_info = NULL;
687 int i, j, err;
688
689 if (!netif_is_lag_master(upper))
690 return 0;
691
692 mutex_lock(&priv->reg_mutex);
693
694 for (i = 0; i < priv->n_lags; i++) {
695 if ((!priv->lag_devs[i]) || (priv->lag_devs[i] == upper))
696 break;
697 }
698 for (j = 0; j < priv->cpu_port; j++) {
699 if (priv->ports[j].dp->slave == ndev)
700 break;
701 }
702 if (j >= priv->cpu_port) {
703 err = -EINVAL;
704 goto out;
705 }
706
707 if (info->linking) {
708 lag_upper_info = info->upper_info;
709 if (!priv->lag_devs[i])
710 priv->lag_devs[i] = upper;
711 err = rtl83xx_lag_add(priv->ds, i, priv->ports[j].dp->index, lag_upper_info);
712 if (err) {
713 err = -EINVAL;
714 goto out;
715 }
716 } else {
717 if (!priv->lag_devs[i])
718 err = -EINVAL;
719 err = rtl83xx_lag_del(priv->ds, i, priv->ports[j].dp->index);
720 if (err) {
721 err = -EINVAL;
722 goto out;
723 }
724 if (!priv->lags_port_members[i])
725 priv->lag_devs[i] = NULL;
726 }
727
728 out:
729 mutex_unlock(&priv->reg_mutex);
730
731 return 0;
732 }
733
734 /* Is the lower network device a DSA slave network device of our RTL930X-switch?
735 * Unfortunately we cannot just follow dev->dsa_prt as this is only set for the
736 * DSA master device.
737 */
738 int rtl83xx_port_is_under(const struct net_device * dev, struct rtl838x_switch_priv *priv)
739 {
740 /* TODO: On 5.12:
741 * if(!dsa_slave_dev_check(dev)) {
742 * netdev_info(dev, "%s: not a DSA device.\n", __func__);
743 * return -EINVAL;
744 * }
745 */
746
747 for (int i = 0; i < priv->cpu_port; i++) {
748 if (!priv->ports[i].dp)
749 continue;
750 if (priv->ports[i].dp->slave == dev)
751 return i;
752 }
753
754 return -EINVAL;
755 }
756
757 static int rtl83xx_netdevice_event(struct notifier_block *this,
758 unsigned long event, void *ptr)
759 {
760 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
761 struct rtl838x_switch_priv *priv;
762 int err;
763
764 pr_debug("In: %s, event: %lu\n", __func__, event);
765
766 if ((event != NETDEV_CHANGEUPPER) && (event != NETDEV_CHANGELOWERSTATE))
767 return NOTIFY_DONE;
768
769 priv = container_of(this, struct rtl838x_switch_priv, nb);
770 switch (event) {
771 case NETDEV_CHANGEUPPER:
772 err = rtl83xx_handle_changeupper(priv, ndev, ptr);
773 break;
774 }
775
776 if (err)
777 return err;
778
779 return NOTIFY_DONE;
780 }
781
782 const static struct rhashtable_params route_ht_params = {
783 .key_len = sizeof(u32),
784 .key_offset = offsetof(struct rtl83xx_route, gw_ip),
785 .head_offset = offsetof(struct rtl83xx_route, linkage),
786 };
787
788 /* Updates an L3 next hop entry in the ROUTING table */
789 static int rtl83xx_l3_nexthop_update(struct rtl838x_switch_priv *priv, __be32 ip_addr, u64 mac)
790 {
791 struct rtl83xx_route *r;
792 struct rhlist_head *tmp, *list;
793
794 rcu_read_lock();
795 list = rhltable_lookup(&priv->routes, &ip_addr, route_ht_params);
796 if (!list) {
797 rcu_read_unlock();
798 return -ENOENT;
799 }
800
801 rhl_for_each_entry_rcu(r, tmp, list, linkage) {
802 pr_info("%s: Setting up fwding: ip %pI4, GW mac %016llx\n",
803 __func__, &ip_addr, mac);
804
805 /* Reads the ROUTING table entry associated with the route */
806 priv->r->route_read(r->id, r);
807 pr_info("Route with id %d to %pI4 / %d\n", r->id, &r->dst_ip, r->prefix_len);
808
809 r->nh.mac = r->nh.gw = mac;
810 r->nh.port = priv->port_ignore;
811 r->nh.id = r->id;
812
813 /* Do we need to explicitly add a DMAC entry with the route's nh index? */
814 if (priv->r->set_l3_egress_mac)
815 priv->r->set_l3_egress_mac(r->id, mac);
816
817 /* Update ROUTING table: map gateway-mac and switch-mac id to route id */
818 rtl83xx_l2_nexthop_add(priv, &r->nh);
819
820 r->attr.valid = true;
821 r->attr.action = ROUTE_ACT_FORWARD;
822 r->attr.type = 0;
823 r->attr.hit = false; /* Reset route-used indicator */
824
825 /* Add PIE entry with dst_ip and prefix_len */
826 r->pr.dip = r->dst_ip;
827 r->pr.dip_m = inet_make_mask(r->prefix_len);
828
829 if (r->is_host_route) {
830 int slot = priv->r->find_l3_slot(r, false);
831
832 pr_info("%s: Got slot for route: %d\n", __func__, slot);
833 priv->r->host_route_write(slot, r);
834 } else {
835 priv->r->route_write(r->id, r);
836 r->pr.fwd_sel = true;
837 r->pr.fwd_data = r->nh.l2_id;
838 r->pr.fwd_act = PIE_ACT_ROUTE_UC;
839 }
840
841 if (priv->r->set_l3_nexthop)
842 priv->r->set_l3_nexthop(r->nh.id, r->nh.l2_id, r->nh.if_id);
843
844 if (r->pr.id < 0) {
845 r->pr.packet_cntr = rtl83xx_packet_cntr_alloc(priv);
846 if (r->pr.packet_cntr >= 0) {
847 pr_info("Using packet counter %d\n", r->pr.packet_cntr);
848 r->pr.log_sel = true;
849 r->pr.log_data = r->pr.packet_cntr;
850 }
851 priv->r->pie_rule_add(priv, &r->pr);
852 } else {
853 int pkts = priv->r->packet_cntr_read(r->pr.packet_cntr);
854 pr_info("%s: total packets: %d\n", __func__, pkts);
855
856 priv->r->pie_rule_write(priv, r->pr.id, &r->pr);
857 }
858 }
859 rcu_read_unlock();
860
861 return 0;
862 }
863
864 static int rtl83xx_port_ipv4_resolve(struct rtl838x_switch_priv *priv,
865 struct net_device *dev, __be32 ip_addr)
866 {
867 struct neighbour *n = neigh_lookup(&arp_tbl, &ip_addr, dev);
868 int err = 0;
869 u64 mac;
870
871 if (!n) {
872 n = neigh_create(&arp_tbl, &ip_addr, dev);
873 if (IS_ERR(n))
874 return PTR_ERR(n);
875 }
876
877 /* If the neigh is already resolved, then go ahead and
878 * install the entry, otherwise start the ARP process to
879 * resolve the neigh.
880 */
881 if (n->nud_state & NUD_VALID) {
882 mac = ether_addr_to_u64(n->ha);
883 pr_info("%s: resolved mac: %016llx\n", __func__, mac);
884 rtl83xx_l3_nexthop_update(priv, ip_addr, mac);
885 } else {
886 pr_info("%s: need to wait\n", __func__);
887 neigh_event_send(n, NULL);
888 }
889
890 neigh_release(n);
891
892 return err;
893 }
894
895 struct rtl83xx_walk_data {
896 struct rtl838x_switch_priv *priv;
897 int port;
898 };
899
900 static int rtl83xx_port_lower_walk(struct net_device *lower, struct netdev_nested_priv *_priv)
901 {
902 struct rtl83xx_walk_data *data = (struct rtl83xx_walk_data *)_priv->data;
903 struct rtl838x_switch_priv *priv = data->priv;
904 int ret = 0;
905 int index;
906
907 index = rtl83xx_port_is_under(lower, priv);
908 data->port = index;
909 if (index >= 0) {
910 pr_debug("Found DSA-port, index %d\n", index);
911 ret = 1;
912 }
913
914 return ret;
915 }
916
917 int rtl83xx_port_dev_lower_find(struct net_device *dev, struct rtl838x_switch_priv *priv)
918 {
919 struct rtl83xx_walk_data data;
920 struct netdev_nested_priv _priv;
921
922 data.priv = priv;
923 data.port = 0;
924 _priv.data = (void *)&data;
925
926 netdev_walk_all_lower_dev(dev, rtl83xx_port_lower_walk, &_priv);
927
928 return data.port;
929 }
930
931 static struct rtl83xx_route *rtl83xx_route_alloc(struct rtl838x_switch_priv *priv, u32 ip)
932 {
933 struct rtl83xx_route *r;
934 int idx = 0, err;
935
936 mutex_lock(&priv->reg_mutex);
937
938 idx = find_first_zero_bit(priv->route_use_bm, MAX_ROUTES);
939 pr_debug("%s id: %d, ip %pI4\n", __func__, idx, &ip);
940
941 r = kzalloc(sizeof(*r), GFP_KERNEL);
942 if (!r) {
943 mutex_unlock(&priv->reg_mutex);
944 return r;
945 }
946
947 r->id = idx;
948 r->gw_ip = ip;
949 r->pr.id = -1; /* We still need to allocate a rule in HW */
950 r->is_host_route = false;
951
952 err = rhltable_insert(&priv->routes, &r->linkage, route_ht_params);
953 if (err) {
954 pr_err("Could not insert new rule\n");
955 mutex_unlock(&priv->reg_mutex);
956 goto out_free;
957 }
958
959 set_bit(idx, priv->route_use_bm);
960
961 mutex_unlock(&priv->reg_mutex);
962
963 return r;
964
965 out_free:
966 kfree(r);
967
968 return NULL;
969 }
970
971
972 static struct rtl83xx_route *rtl83xx_host_route_alloc(struct rtl838x_switch_priv *priv, u32 ip)
973 {
974 struct rtl83xx_route *r;
975 int idx = 0, err;
976
977 mutex_lock(&priv->reg_mutex);
978
979 idx = find_first_zero_bit(priv->host_route_use_bm, MAX_HOST_ROUTES);
980 pr_debug("%s id: %d, ip %pI4\n", __func__, idx, &ip);
981
982 r = kzalloc(sizeof(*r), GFP_KERNEL);
983 if (!r) {
984 mutex_unlock(&priv->reg_mutex);
985 return r;
986 }
987
988 /* We require a unique route ID irrespective of whether it is a prefix or host
989 * route (on RTL93xx) as we use this ID to associate a DMAC and next-hop entry
990 */
991 r->id = idx + MAX_ROUTES;
992
993 r->gw_ip = ip;
994 r->pr.id = -1; /* We still need to allocate a rule in HW */
995 r->is_host_route = true;
996
997 err = rhltable_insert(&priv->routes, &r->linkage, route_ht_params);
998 if (err) {
999 pr_err("Could not insert new rule\n");
1000 mutex_unlock(&priv->reg_mutex);
1001 goto out_free;
1002 }
1003
1004 set_bit(idx, priv->host_route_use_bm);
1005
1006 mutex_unlock(&priv->reg_mutex);
1007
1008 return r;
1009
1010 out_free:
1011 kfree(r);
1012
1013 return NULL;
1014 }
1015
1016
1017
1018 static void rtl83xx_route_rm(struct rtl838x_switch_priv *priv, struct rtl83xx_route *r)
1019 {
1020 int id;
1021
1022 if (rhltable_remove(&priv->routes, &r->linkage, route_ht_params))
1023 dev_warn(priv->dev, "Could not remove route\n");
1024
1025 if (r->is_host_route) {
1026 id = priv->r->find_l3_slot(r, false);
1027 pr_debug("%s: Got id for host route: %d\n", __func__, id);
1028 r->attr.valid = false;
1029 priv->r->host_route_write(id, r);
1030 clear_bit(r->id - MAX_ROUTES, priv->host_route_use_bm);
1031 } else {
1032 /* If there is a HW representation of the route, delete it */
1033 if (priv->r->route_lookup_hw) {
1034 id = priv->r->route_lookup_hw(r);
1035 pr_info("%s: Got id for prefix route: %d\n", __func__, id);
1036 r->attr.valid = false;
1037 priv->r->route_write(id, r);
1038 }
1039 clear_bit(r->id, priv->route_use_bm);
1040 }
1041
1042 kfree(r);
1043 }
1044
1045 static int rtl83xx_fib4_del(struct rtl838x_switch_priv *priv,
1046 struct fib_entry_notifier_info *info)
1047 {
1048 struct fib_nh *nh = fib_info_nh(info->fi, 0);
1049 struct rtl83xx_route *r;
1050 struct rhlist_head *tmp, *list;
1051
1052 pr_debug("In %s, ip %pI4, len %d\n", __func__, &info->dst, info->dst_len);
1053 rcu_read_lock();
1054 list = rhltable_lookup(&priv->routes, &nh->fib_nh_gw4, route_ht_params);
1055 if (!list) {
1056 rcu_read_unlock();
1057 pr_err("%s: no such gateway: %pI4\n", __func__, &nh->fib_nh_gw4);
1058 return -ENOENT;
1059 }
1060 rhl_for_each_entry_rcu(r, tmp, list, linkage) {
1061 if (r->dst_ip == info->dst && r->prefix_len == info->dst_len) {
1062 pr_info("%s: found a route with id %d, nh-id %d\n",
1063 __func__, r->id, r->nh.id);
1064 break;
1065 }
1066 }
1067 rcu_read_unlock();
1068
1069 rtl83xx_l2_nexthop_rm(priv, &r->nh);
1070
1071 pr_debug("%s: Releasing packet counter %d\n", __func__, r->pr.packet_cntr);
1072 set_bit(r->pr.packet_cntr, priv->packet_cntr_use_bm);
1073 priv->r->pie_rule_rm(priv, &r->pr);
1074
1075 rtl83xx_route_rm(priv, r);
1076
1077 nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
1078
1079 return 0;
1080 }
1081
1082 /* On the RTL93xx, an L3 termination endpoint MAC address on which the router waits
1083 * for packets to be routed needs to be allocated.
1084 */
1085 static int rtl83xx_alloc_router_mac(struct rtl838x_switch_priv *priv, u64 mac)
1086 {
1087 int free_mac = -1;
1088 struct rtl93xx_rt_mac m;
1089
1090 mutex_lock(&priv->reg_mutex);
1091 for (int i = 0; i < MAX_ROUTER_MACS; i++) {
1092 priv->r->get_l3_router_mac(i, &m);
1093 if (free_mac < 0 && !m.valid) {
1094 free_mac = i;
1095 continue;
1096 }
1097 if (m.valid && m.mac == mac) {
1098 free_mac = i;
1099 break;
1100 }
1101 }
1102
1103 if (free_mac < 0) {
1104 pr_err("No free router MACs, cannot offload\n");
1105 mutex_unlock(&priv->reg_mutex);
1106 return -1;
1107 }
1108
1109 m.valid = true;
1110 m.mac = mac;
1111 m.p_type = 0; /* An individual port, not a trunk port */
1112 m.p_id = 0x3f; /* Listen on any port */
1113 m.p_id_mask = 0;
1114 m.vid = 0; /* Listen on any VLAN... */
1115 m.vid_mask = 0; /* ... so mask needs to be 0 */
1116 m.mac_mask = 0xffffffffffffULL; /* We want an exact match of the interface MAC */
1117 m.action = L3_FORWARD; /* Route the packet */
1118 priv->r->set_l3_router_mac(free_mac, &m);
1119
1120 mutex_unlock(&priv->reg_mutex);
1121
1122 return 0;
1123 }
1124
1125 static int rtl83xx_alloc_egress_intf(struct rtl838x_switch_priv *priv, u64 mac, int vlan)
1126 {
1127 int free_mac = -1;
1128 struct rtl838x_l3_intf intf;
1129 u64 m;
1130
1131 mutex_lock(&priv->reg_mutex);
1132 for (int i = 0; i < MAX_SMACS; i++) {
1133 m = priv->r->get_l3_egress_mac(L3_EGRESS_DMACS + i);
1134 if (free_mac < 0 && !m) {
1135 free_mac = i;
1136 continue;
1137 }
1138 if (m == mac) {
1139 mutex_unlock(&priv->reg_mutex);
1140 return i;
1141 }
1142 }
1143
1144 if (free_mac < 0) {
1145 pr_err("No free egress interface, cannot offload\n");
1146 return -1;
1147 }
1148
1149 /* Set up default egress interface 1 */
1150 intf.vid = vlan;
1151 intf.smac_idx = free_mac;
1152 intf.ip4_mtu_id = 1;
1153 intf.ip6_mtu_id = 1;
1154 intf.ttl_scope = 1; /* TTL */
1155 intf.hl_scope = 1; /* Hop Limit */
1156 intf.ip4_icmp_redirect = intf.ip6_icmp_redirect = 2; /* FORWARD */
1157 intf.ip4_pbr_icmp_redirect = intf.ip6_pbr_icmp_redirect = 2; /* FORWARD; */
1158 priv->r->set_l3_egress_intf(free_mac, &intf);
1159
1160 priv->r->set_l3_egress_mac(L3_EGRESS_DMACS + free_mac, mac);
1161
1162 mutex_unlock(&priv->reg_mutex);
1163
1164 return free_mac;
1165 }
1166
1167 static int rtl83xx_fib4_add(struct rtl838x_switch_priv *priv,
1168 struct fib_entry_notifier_info *info)
1169 {
1170 struct fib_nh *nh = fib_info_nh(info->fi, 0);
1171 struct net_device *dev = fib_info_nh(info->fi, 0)->fib_nh_dev;
1172 int port;
1173 struct rtl83xx_route *r;
1174 bool to_localhost;
1175 int vlan = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 0;
1176
1177 pr_debug("In %s, ip %pI4, len %d\n", __func__, &info->dst, info->dst_len);
1178 if (!info->dst) {
1179 pr_info("Not offloading default route for now\n");
1180 return 0;
1181 }
1182
1183 pr_debug("GW: %pI4, interface name %s, mac %016llx, vlan %d\n", &nh->fib_nh_gw4, dev->name,
1184 ether_addr_to_u64(dev->dev_addr), vlan
1185 );
1186
1187 port = rtl83xx_port_dev_lower_find(dev, priv);
1188 if (port < 0)
1189 return -1;
1190
1191 /* For now we only work with routes that have a gateway and are not ourself */
1192 /* if ((!nh->fib_nh_gw4) && (info->dst_len != 32)) */
1193 /* return 0; */
1194
1195 if ((info->dst & 0xff) == 0xff)
1196 return 0;
1197
1198 /* Do not offload routes to 192.168.100.x */
1199 if ((info->dst & 0xffffff00) == 0xc0a86400)
1200 return 0;
1201
1202 /* Do not offload routes to 127.x.x.x */
1203 if ((info->dst & 0xff000000) == 0x7f000000)
1204 return 0;
1205
1206 /* Allocate route or host-route (entry if hardware supports this) */
1207 if (info->dst_len == 32 && priv->r->host_route_write)
1208 r = rtl83xx_host_route_alloc(priv, nh->fib_nh_gw4);
1209 else
1210 r = rtl83xx_route_alloc(priv, nh->fib_nh_gw4);
1211
1212 if (!r) {
1213 pr_err("%s: No more free route entries\n", __func__);
1214 return -1;
1215 }
1216
1217 r->dst_ip = info->dst;
1218 r->prefix_len = info->dst_len;
1219 r->nh.rvid = vlan;
1220 to_localhost = !nh->fib_nh_gw4;
1221
1222 if (priv->r->set_l3_router_mac) {
1223 u64 mac = ether_addr_to_u64(dev->dev_addr);
1224
1225 pr_debug("Local route and router mac %016llx\n", mac);
1226
1227 if (rtl83xx_alloc_router_mac(priv, mac))
1228 goto out_free_rt;
1229
1230 /* vid = 0: Do not care about VID */
1231 r->nh.if_id = rtl83xx_alloc_egress_intf(priv, mac, vlan);
1232 if (r->nh.if_id < 0)
1233 goto out_free_rmac;
1234
1235 if (to_localhost) {
1236 int slot;
1237
1238 r->nh.mac = mac;
1239 r->nh.port = priv->port_ignore;
1240 r->attr.valid = true;
1241 r->attr.action = ROUTE_ACT_TRAP2CPU;
1242 r->attr.type = 0;
1243
1244 slot = priv->r->find_l3_slot(r, false);
1245 pr_debug("%s: Got slot for route: %d\n", __func__, slot);
1246 priv->r->host_route_write(slot, r);
1247 }
1248 }
1249
1250 /* We need to resolve the mac address of the GW */
1251 if (!to_localhost)
1252 rtl83xx_port_ipv4_resolve(priv, dev, nh->fib_nh_gw4);
1253
1254 nh->fib_nh_flags |= RTNH_F_OFFLOAD;
1255
1256 return 0;
1257
1258 out_free_rmac:
1259 out_free_rt:
1260 return 0;
1261 }
1262
1263 static int rtl83xx_fib6_add(struct rtl838x_switch_priv *priv,
1264 struct fib6_entry_notifier_info *info)
1265 {
1266 pr_debug("In %s\n", __func__);
1267 /* nh->fib_nh_flags |= RTNH_F_OFFLOAD; */
1268
1269 return 0;
1270 }
1271
1272 struct net_event_work {
1273 struct work_struct work;
1274 struct rtl838x_switch_priv *priv;
1275 u64 mac;
1276 u32 gw_addr;
1277 };
1278
1279 static void rtl83xx_net_event_work_do(struct work_struct *work)
1280 {
1281 struct net_event_work *net_work =
1282 container_of(work, struct net_event_work, work);
1283 struct rtl838x_switch_priv *priv = net_work->priv;
1284
1285 rtl83xx_l3_nexthop_update(priv, net_work->gw_addr, net_work->mac);
1286
1287 kfree(net_work);
1288 }
1289
1290 static int rtl83xx_netevent_event(struct notifier_block *this,
1291 unsigned long event, void *ptr)
1292 {
1293 struct rtl838x_switch_priv *priv;
1294 struct net_device *dev;
1295 struct neighbour *n = ptr;
1296 int err, port;
1297 struct net_event_work *net_work;
1298
1299 priv = container_of(this, struct rtl838x_switch_priv, ne_nb);
1300
1301 switch (event) {
1302 case NETEVENT_NEIGH_UPDATE:
1303 if (n->tbl != &arp_tbl)
1304 return NOTIFY_DONE;
1305 dev = n->dev;
1306 port = rtl83xx_port_dev_lower_find(dev, priv);
1307 if (port < 0 || !(n->nud_state & NUD_VALID)) {
1308 pr_debug("%s: Neigbour invalid, not updating\n", __func__);
1309 return NOTIFY_DONE;
1310 }
1311
1312 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
1313 if (!net_work)
1314 return NOTIFY_BAD;
1315
1316 INIT_WORK(&net_work->work, rtl83xx_net_event_work_do);
1317 net_work->priv = priv;
1318
1319 net_work->mac = ether_addr_to_u64(n->ha);
1320 net_work->gw_addr = *(__be32 *) n->primary_key;
1321
1322 pr_debug("%s: updating neighbour on port %d, mac %016llx\n",
1323 __func__, port, net_work->mac);
1324 schedule_work(&net_work->work);
1325 if (err)
1326 netdev_warn(dev, "failed to handle neigh update (err %d)\n", err);
1327 break;
1328 }
1329
1330 return NOTIFY_DONE;
1331 }
1332
1333 struct rtl83xx_fib_event_work {
1334 struct work_struct work;
1335 union {
1336 struct fib_entry_notifier_info fen_info;
1337 struct fib6_entry_notifier_info fen6_info;
1338 struct fib_rule_notifier_info fr_info;
1339 };
1340 struct rtl838x_switch_priv *priv;
1341 bool is_fib6;
1342 unsigned long event;
1343 };
1344
1345 static void rtl83xx_fib_event_work_do(struct work_struct *work)
1346 {
1347 struct rtl83xx_fib_event_work *fib_work =
1348 container_of(work, struct rtl83xx_fib_event_work, work);
1349 struct rtl838x_switch_priv *priv = fib_work->priv;
1350 struct fib_rule *rule;
1351 int err;
1352
1353 /* Protect internal structures from changes */
1354 rtnl_lock();
1355 pr_debug("%s: doing work, event %ld\n", __func__, fib_work->event);
1356 switch (fib_work->event) {
1357 case FIB_EVENT_ENTRY_ADD:
1358 case FIB_EVENT_ENTRY_REPLACE:
1359 case FIB_EVENT_ENTRY_APPEND:
1360 if (fib_work->is_fib6) {
1361 err = rtl83xx_fib6_add(priv, &fib_work->fen6_info);
1362 } else {
1363 err = rtl83xx_fib4_add(priv, &fib_work->fen_info);
1364 fib_info_put(fib_work->fen_info.fi);
1365 }
1366 if (err)
1367 pr_err("%s: FIB4 failed\n", __func__);
1368 break;
1369 case FIB_EVENT_ENTRY_DEL:
1370 rtl83xx_fib4_del(priv, &fib_work->fen_info);
1371 fib_info_put(fib_work->fen_info.fi);
1372 break;
1373 case FIB_EVENT_RULE_ADD:
1374 case FIB_EVENT_RULE_DEL:
1375 rule = fib_work->fr_info.rule;
1376 if (!fib4_rule_default(rule))
1377 pr_err("%s: FIB4 default rule failed\n", __func__);
1378 fib_rule_put(rule);
1379 break;
1380 }
1381 rtnl_unlock();
1382 kfree(fib_work);
1383 }
1384
1385 /* Called with rcu_read_lock() */
1386 static int rtl83xx_fib_event(struct notifier_block *this, unsigned long event, void *ptr)
1387 {
1388 struct fib_notifier_info *info = ptr;
1389 struct rtl838x_switch_priv *priv;
1390 struct rtl83xx_fib_event_work *fib_work;
1391
1392 if ((info->family != AF_INET && info->family != AF_INET6 &&
1393 info->family != RTNL_FAMILY_IPMR &&
1394 info->family != RTNL_FAMILY_IP6MR))
1395 return NOTIFY_DONE;
1396
1397 priv = container_of(this, struct rtl838x_switch_priv, fib_nb);
1398
1399 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
1400 if (!fib_work)
1401 return NOTIFY_BAD;
1402
1403 INIT_WORK(&fib_work->work, rtl83xx_fib_event_work_do);
1404 fib_work->priv = priv;
1405 fib_work->event = event;
1406 fib_work->is_fib6 = false;
1407
1408 switch (event) {
1409 case FIB_EVENT_ENTRY_ADD:
1410 case FIB_EVENT_ENTRY_REPLACE:
1411 case FIB_EVENT_ENTRY_APPEND:
1412 case FIB_EVENT_ENTRY_DEL:
1413 pr_debug("%s: FIB_ENTRY ADD/DEL, event %ld\n", __func__, event);
1414 if (info->family == AF_INET) {
1415 struct fib_entry_notifier_info *fen_info = ptr;
1416
1417 if (fen_info->fi->fib_nh_is_v6) {
1418 NL_SET_ERR_MSG_MOD(info->extack,
1419 "IPv6 gateway with IPv4 route is not supported");
1420 kfree(fib_work);
1421 return notifier_from_errno(-EINVAL);
1422 }
1423
1424 memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info));
1425 /* Take referece on fib_info to prevent it from being
1426 * freed while work is queued. Release it afterwards.
1427 */
1428 fib_info_hold(fib_work->fen_info.fi);
1429
1430 } else if (info->family == AF_INET6) {
1431 //struct fib6_entry_notifier_info *fen6_info = ptr;
1432 pr_warn("%s: FIB_RULE ADD/DEL for IPv6 not supported\n", __func__);
1433 kfree(fib_work);
1434 return NOTIFY_DONE;
1435 }
1436 break;
1437
1438 case FIB_EVENT_RULE_ADD:
1439 case FIB_EVENT_RULE_DEL:
1440 pr_debug("%s: FIB_RULE ADD/DEL, event: %ld\n", __func__, event);
1441 memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info));
1442 fib_rule_get(fib_work->fr_info.rule);
1443 break;
1444 }
1445
1446 schedule_work(&fib_work->work);
1447
1448 return NOTIFY_DONE;
1449 }
1450
1451 static int __init rtl83xx_sw_probe(struct platform_device *pdev)
1452 {
1453 int err = 0;
1454 struct rtl838x_switch_priv *priv;
1455 struct device *dev = &pdev->dev;
1456 u64 bpdu_mask;
1457
1458 pr_debug("Probing RTL838X switch device\n");
1459 if (!pdev->dev.of_node) {
1460 dev_err(dev, "No DT found\n");
1461 return -EINVAL;
1462 }
1463
1464 /* Initialize access to RTL switch tables */
1465 rtl_table_init();
1466
1467 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1468 if (!priv)
1469 return -ENOMEM;
1470
1471 priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
1472
1473 if (!priv->ds)
1474 return -ENOMEM;
1475 priv->ds->dev = dev;
1476 priv->ds->priv = priv;
1477 priv->ds->ops = &rtl83xx_switch_ops;
1478 priv->ds->needs_standalone_vlan_filtering = true;
1479 priv->dev = dev;
1480
1481 mutex_init(&priv->reg_mutex);
1482
1483 priv->family_id = soc_info.family;
1484 priv->id = soc_info.id;
1485 switch(soc_info.family) {
1486 case RTL8380_FAMILY_ID:
1487 priv->ds->ops = &rtl83xx_switch_ops;
1488 priv->cpu_port = RTL838X_CPU_PORT;
1489 priv->port_mask = 0x1f;
1490 priv->port_width = 1;
1491 priv->irq_mask = 0x0FFFFFFF;
1492 priv->r = &rtl838x_reg;
1493 priv->ds->num_ports = 29;
1494 priv->fib_entries = 8192;
1495 rtl8380_get_version(priv);
1496 priv->n_lags = 8;
1497 priv->l2_bucket_size = 4;
1498 priv->n_pie_blocks = 12;
1499 priv->port_ignore = 0x1f;
1500 priv->n_counters = 128;
1501 break;
1502 case RTL8390_FAMILY_ID:
1503 priv->ds->ops = &rtl83xx_switch_ops;
1504 priv->cpu_port = RTL839X_CPU_PORT;
1505 priv->port_mask = 0x3f;
1506 priv->port_width = 2;
1507 priv->irq_mask = 0xFFFFFFFFFFFFFULL;
1508 priv->r = &rtl839x_reg;
1509 priv->ds->num_ports = 53;
1510 priv->fib_entries = 16384;
1511 rtl8390_get_version(priv);
1512 priv->n_lags = 16;
1513 priv->l2_bucket_size = 4;
1514 priv->n_pie_blocks = 18;
1515 priv->port_ignore = 0x3f;
1516 priv->n_counters = 1024;
1517 break;
1518 case RTL9300_FAMILY_ID:
1519 priv->ds->ops = &rtl930x_switch_ops;
1520 priv->cpu_port = RTL930X_CPU_PORT;
1521 priv->port_mask = 0x1f;
1522 priv->port_width = 1;
1523 priv->irq_mask = 0x0FFFFFFF;
1524 priv->r = &rtl930x_reg;
1525 priv->ds->num_ports = 29;
1526 priv->fib_entries = 16384;
1527 priv->version = RTL8390_VERSION_A;
1528 priv->n_lags = 16;
1529 sw_w32(1, RTL930X_ST_CTRL);
1530 priv->l2_bucket_size = 8;
1531 priv->n_pie_blocks = 16;
1532 priv->port_ignore = 0x3f;
1533 priv->n_counters = 2048;
1534 break;
1535 case RTL9310_FAMILY_ID:
1536 priv->ds->ops = &rtl930x_switch_ops;
1537 priv->cpu_port = RTL931X_CPU_PORT;
1538 priv->port_mask = 0x3f;
1539 priv->port_width = 2;
1540 priv->irq_mask = 0xFFFFFFFFFFFFFULL;
1541 priv->r = &rtl931x_reg;
1542 priv->ds->num_ports = 57;
1543 priv->fib_entries = 16384;
1544 priv->version = RTL8390_VERSION_A;
1545 priv->n_lags = 16;
1546 priv->l2_bucket_size = 8;
1547 break;
1548 }
1549 pr_debug("Chip version %c\n", priv->version);
1550
1551 err = rtl83xx_mdio_probe(priv);
1552 if (err) {
1553 /* Probing fails the 1st time because of missing ethernet driver
1554 * initialization. Use this to disable traffic in case the bootloader left if on
1555 */
1556 return err;
1557 }
1558
1559 err = dsa_register_switch(priv->ds);
1560 if (err) {
1561 dev_err(dev, "Error registering switch: %d\n", err);
1562 return err;
1563 }
1564
1565 /* dsa_to_port returns dsa_port from the port list in
1566 * dsa_switch_tree, the tree is built when the switch
1567 * is registered by dsa_register_switch
1568 */
1569 for (int i = 0; i <= priv->cpu_port; i++)
1570 priv->ports[i].dp = dsa_to_port(priv->ds, i);
1571
1572 /* Enable link and media change interrupts. Are the SERDES masks needed? */
1573 sw_w32_mask(0, 3, priv->r->isr_glb_src);
1574
1575 priv->r->set_port_reg_le(priv->irq_mask, priv->r->isr_port_link_sts_chg);
1576 priv->r->set_port_reg_le(priv->irq_mask, priv->r->imr_port_link_sts_chg);
1577
1578 priv->link_state_irq = platform_get_irq(pdev, 0);
1579 pr_info("LINK state irq: %d\n", priv->link_state_irq);
1580 switch (priv->family_id) {
1581 case RTL8380_FAMILY_ID:
1582 err = request_irq(priv->link_state_irq, rtl838x_switch_irq,
1583 IRQF_SHARED, "rtl838x-link-state", priv->ds);
1584 break;
1585 case RTL8390_FAMILY_ID:
1586 err = request_irq(priv->link_state_irq, rtl839x_switch_irq,
1587 IRQF_SHARED, "rtl839x-link-state", priv->ds);
1588 break;
1589 case RTL9300_FAMILY_ID:
1590 err = request_irq(priv->link_state_irq, rtl930x_switch_irq,
1591 IRQF_SHARED, "rtl930x-link-state", priv->ds);
1592 break;
1593 case RTL9310_FAMILY_ID:
1594 err = request_irq(priv->link_state_irq, rtl931x_switch_irq,
1595 IRQF_SHARED, "rtl931x-link-state", priv->ds);
1596 break;
1597 }
1598 if (err) {
1599 dev_err(dev, "Error setting up switch interrupt.\n");
1600 /* Need to free allocated switch here */
1601 }
1602
1603 /* Enable interrupts for switch, on RTL931x, the IRQ is always on globally */
1604 if (soc_info.family != RTL9310_FAMILY_ID)
1605 sw_w32(0x1, priv->r->imr_glb);
1606
1607 rtl83xx_get_l2aging(priv);
1608
1609 rtl83xx_setup_qos(priv);
1610
1611 priv->r->l3_setup(priv);
1612
1613 /* Clear all destination ports for mirror groups */
1614 for (int i = 0; i < 4; i++)
1615 priv->mirror_group_ports[i] = -1;
1616
1617 /* Register netdevice event callback to catch changes in link aggregation groups */
1618 priv->nb.notifier_call = rtl83xx_netdevice_event;
1619 if (register_netdevice_notifier(&priv->nb)) {
1620 priv->nb.notifier_call = NULL;
1621 dev_err(dev, "Failed to register LAG netdev notifier\n");
1622 goto err_register_nb;
1623 }
1624
1625 /* Initialize hash table for L3 routing */
1626 rhltable_init(&priv->routes, &route_ht_params);
1627
1628 /* Register netevent notifier callback to catch notifications about neighboring
1629 * changes to update nexthop entries for L3 routing.
1630 */
1631 priv->ne_nb.notifier_call = rtl83xx_netevent_event;
1632 if (register_netevent_notifier(&priv->ne_nb)) {
1633 priv->ne_nb.notifier_call = NULL;
1634 dev_err(dev, "Failed to register netevent notifier\n");
1635 goto err_register_ne_nb;
1636 }
1637
1638 priv->fib_nb.notifier_call = rtl83xx_fib_event;
1639
1640 /* Register Forwarding Information Base notifier to offload routes where
1641 * where possible
1642 * Only FIBs pointing to our own netdevs are programmed into
1643 * the device, so no need to pass a callback.
1644 */
1645 err = register_fib_notifier(&init_net, &priv->fib_nb, NULL, NULL);
1646 if (err)
1647 goto err_register_fib_nb;
1648
1649 /* TODO: put this into l2_setup() */
1650 /* Flood BPDUs to all ports including cpu-port */
1651 if (soc_info.family != RTL9300_FAMILY_ID) {
1652 bpdu_mask = soc_info.family == RTL8380_FAMILY_ID ? 0x1FFFFFFF : 0x1FFFFFFFFFFFFF;
1653 priv->r->set_port_reg_be(bpdu_mask, priv->r->rma_bpdu_fld_pmask);
1654
1655 /* TRAP 802.1X frames (EAPOL) to the CPU-Port, bypass STP and VLANs */
1656 sw_w32(7, priv->r->spcl_trap_eapol_ctrl);
1657
1658 rtl838x_dbgfs_init(priv);
1659 } else {
1660 rtl930x_dbgfs_init(priv);
1661 }
1662
1663 return 0;
1664
1665 err_register_fib_nb:
1666 unregister_netevent_notifier(&priv->ne_nb);
1667 err_register_ne_nb:
1668 unregister_netdevice_notifier(&priv->nb);
1669 err_register_nb:
1670 return err;
1671 }
1672
1673 static int rtl83xx_sw_remove(struct platform_device *pdev)
1674 {
1675 /* TODO: */
1676 pr_debug("Removing platform driver for rtl83xx-sw\n");
1677
1678 return 0;
1679 }
1680
1681 static const struct of_device_id rtl83xx_switch_of_ids[] = {
1682 { .compatible = "realtek,rtl83xx-switch"},
1683 { /* sentinel */ }
1684 };
1685
1686
1687 MODULE_DEVICE_TABLE(of, rtl83xx_switch_of_ids);
1688
1689 static struct platform_driver rtl83xx_switch_driver = {
1690 .probe = rtl83xx_sw_probe,
1691 .remove = rtl83xx_sw_remove,
1692 .driver = {
1693 .name = "rtl83xx-switch",
1694 .pm = NULL,
1695 .of_match_table = rtl83xx_switch_of_ids,
1696 },
1697 };
1698
1699 module_platform_driver(rtl83xx_switch_driver);
1700
1701 MODULE_AUTHOR("B. Koblitz");
1702 MODULE_DESCRIPTION("RTL83XX SoC Switch Driver");
1703 MODULE_LICENSE("GPL");