realtek: Whitespace and codestyle cleanup
[openwrt/staging/jow.git] / target / linux / realtek / files-5.15 / drivers / net / dsa / rtl83xx / common.c
1 // SPDX-License-Identifier: GPL-2.0-only
2
3 #include <linux/of_mdio.h>
4 #include <linux/of_platform.h>
5 #include <net/arp.h>
6 #include <net/nexthop.h>
7 #include <net/neighbour.h>
8 #include <net/netevent.h>
9 #include <linux/inetdevice.h>
10 #include <linux/rhashtable.h>
11 #include <linux/of_net.h>
12 #include <asm/mach-rtl838x/mach-rtl83xx.h>
13
14 #include "rtl83xx.h"
15
16 extern struct rtl83xx_soc_info soc_info;
17
18 extern const struct rtl838x_reg rtl838x_reg;
19 extern const struct rtl838x_reg rtl839x_reg;
20 extern const struct rtl838x_reg rtl930x_reg;
21 extern const struct rtl838x_reg rtl931x_reg;
22
23 extern const struct dsa_switch_ops rtl83xx_switch_ops;
24 extern const struct dsa_switch_ops rtl930x_switch_ops;
25
26 DEFINE_MUTEX(smi_lock);
27
28 int rtl83xx_port_get_stp_state(struct rtl838x_switch_priv *priv, int port)
29 {
30 u32 msti = 0;
31 u32 port_state[4];
32 int index, bit;
33 int pos = port;
34 int n = priv->port_width << 1;
35
36 /* Ports above or equal CPU port can never be configured */
37 if (port >= priv->cpu_port)
38 return -1;
39
40 mutex_lock(&priv->reg_mutex);
41
42 /* For the RTL839x and following, the bits are left-aligned in the 64/128 bit field */
43 if (priv->family_id == RTL8390_FAMILY_ID)
44 pos += 12;
45 if (priv->family_id == RTL9300_FAMILY_ID)
46 pos += 3;
47 if (priv->family_id == RTL9310_FAMILY_ID)
48 pos += 8;
49
50 index = n - (pos >> 4) - 1;
51 bit = (pos << 1) % 32;
52
53 priv->r->stp_get(priv, msti, port_state);
54
55 mutex_unlock(&priv->reg_mutex);
56
57 return (port_state[index] >> bit) & 3;
58 }
59
60 static struct table_reg rtl838x_tbl_regs[] = {
61 TBL_DESC(0x6900, 0x6908, 3, 15, 13, 1), // RTL8380_TBL_L2
62 TBL_DESC(0x6914, 0x6918, 18, 14, 12, 1), // RTL8380_TBL_0
63 TBL_DESC(0xA4C8, 0xA4CC, 6, 14, 12, 1), // RTL8380_TBL_1
64
65 TBL_DESC(0x1180, 0x1184, 3, 16, 14, 0), // RTL8390_TBL_L2
66 TBL_DESC(0x1190, 0x1194, 17, 15, 12, 0), // RTL8390_TBL_0
67 TBL_DESC(0x6B80, 0x6B84, 4, 14, 12, 0), // RTL8390_TBL_1
68 TBL_DESC(0x611C, 0x6120, 9, 8, 6, 0), // RTL8390_TBL_2
69
70 TBL_DESC(0xB320, 0xB334, 3, 18, 16, 0), // RTL9300_TBL_L2
71 TBL_DESC(0xB340, 0xB344, 19, 16, 12, 0), // RTL9300_TBL_0
72 TBL_DESC(0xB3A0, 0xB3A4, 20, 16, 13, 0), // RTL9300_TBL_1
73 TBL_DESC(0xCE04, 0xCE08, 6, 14, 12, 0), // RTL9300_TBL_2
74 TBL_DESC(0xD600, 0xD604, 30, 7, 6, 0), // RTL9300_TBL_HSB
75 TBL_DESC(0x7880, 0x7884, 22, 9, 8, 0), // RTL9300_TBL_HSA
76
77 TBL_DESC(0x8500, 0x8508, 8, 19, 15, 0), // RTL9310_TBL_0
78 TBL_DESC(0x40C0, 0x40C4, 22, 16, 14, 0), // RTL9310_TBL_1
79 TBL_DESC(0x8528, 0x852C, 6, 18, 14, 0), // RTL9310_TBL_2
80 TBL_DESC(0x0200, 0x0204, 9, 15, 12, 0), // RTL9310_TBL_3
81 TBL_DESC(0x20dc, 0x20e0, 29, 7, 6, 0), // RTL9310_TBL_4
82 TBL_DESC(0x7e1c, 0x7e20, 53, 8, 6, 0), // RTL9310_TBL_5
83 };
84
85 void rtl_table_init(void)
86 {
87 int i;
88
89 for (i = 0; i < RTL_TBL_END; i++)
90 mutex_init(&rtl838x_tbl_regs[i].lock);
91 }
92
93 /* Request access to table t in table access register r
94 * Returns a handle to a lock for that table
95 */
96 struct table_reg *rtl_table_get(rtl838x_tbl_reg_t r, int t)
97 {
98 if (r >= RTL_TBL_END)
99 return NULL;
100
101 if (t >= BIT(rtl838x_tbl_regs[r].c_bit-rtl838x_tbl_regs[r].t_bit))
102 return NULL;
103
104 mutex_lock(&rtl838x_tbl_regs[r].lock);
105 rtl838x_tbl_regs[r].tbl = t;
106
107 return &rtl838x_tbl_regs[r];
108 }
109
110 /* Release a table r, unlock the corresponding lock */
111 void rtl_table_release(struct table_reg *r)
112 {
113 if (!r)
114 return;
115
116 // pr_info("Unlocking %08x\n", (u32)r);
117 mutex_unlock(&r->lock);
118 // pr_info("Unlock done\n");
119 }
120
121 static int rtl_table_exec(struct table_reg *r, bool is_write, int idx)
122 {
123 int ret = 0;
124 u32 cmd, val;
125
126 /* Read/write bit has inverted meaning on RTL838x */
127 if (r->rmode)
128 cmd = is_write ? 0 : BIT(r->c_bit);
129 else
130 cmd = is_write ? BIT(r->c_bit) : 0;
131
132 cmd |= BIT(r->c_bit + 1); /* Execute bit */
133 cmd |= r->tbl << r->t_bit; /* Table type */
134 cmd |= idx & (BIT(r->t_bit) - 1); /* Index */
135
136 sw_w32(cmd, r->addr);
137
138 ret = readx_poll_timeout(sw_r32, r->addr, val,
139 !(val & BIT(r->c_bit + 1)), 20, 10000);
140 if (ret)
141 pr_err("%s: timeout\n", __func__);
142
143 return ret;
144 }
145
146 /* Reads table index idx into the data registers of the table */
147 int rtl_table_read(struct table_reg *r, int idx)
148 {
149 return rtl_table_exec(r, false, idx);
150 }
151
152 /* Writes the content of the table data registers into the table at index idx */
153 int rtl_table_write(struct table_reg *r, int idx)
154 {
155 return rtl_table_exec(r, true, idx);
156 }
157
158 /* Returns the address of the ith data register of table register r
159 * the address is relative to the beginning of the Switch-IO block at 0xbb000000
160 */
161 inline u16 rtl_table_data(struct table_reg *r, int i)
162 {
163 if (i >= r->max_data)
164 i = r->max_data - 1;
165 return r->data + i * 4;
166 }
167
168 inline u32 rtl_table_data_r(struct table_reg *r, int i)
169 {
170 return sw_r32(rtl_table_data(r, i));
171 }
172
173 inline void rtl_table_data_w(struct table_reg *r, u32 v, int i)
174 {
175 sw_w32(v, rtl_table_data(r, i));
176 }
177
178 /* Port register accessor functions for the RTL838x and RTL930X SoCs */
179 void rtl838x_mask_port_reg(u64 clear, u64 set, int reg)
180 {
181 sw_w32_mask((u32)clear, (u32)set, reg);
182 }
183
184 void rtl838x_set_port_reg(u64 set, int reg)
185 {
186 sw_w32((u32)set, reg);
187 }
188
189 u64 rtl838x_get_port_reg(int reg)
190 {
191 return ((u64)sw_r32(reg));
192 }
193
194 /* Port register accessor functions for the RTL839x and RTL931X SoCs */
195 void rtl839x_mask_port_reg_be(u64 clear, u64 set, int reg)
196 {
197 sw_w32_mask((u32)(clear >> 32), (u32)(set >> 32), reg);
198 sw_w32_mask((u32)(clear & 0xffffffff), (u32)(set & 0xffffffff), reg + 4);
199 }
200
201 u64 rtl839x_get_port_reg_be(int reg)
202 {
203 u64 v = sw_r32(reg);
204
205 v <<= 32;
206 v |= sw_r32(reg + 4);
207
208 return v;
209 }
210
211 void rtl839x_set_port_reg_be(u64 set, int reg)
212 {
213 sw_w32(set >> 32, reg);
214 sw_w32(set & 0xffffffff, reg + 4);
215 }
216
217 void rtl839x_mask_port_reg_le(u64 clear, u64 set, int reg)
218 {
219 sw_w32_mask((u32)clear, (u32)set, reg);
220 sw_w32_mask((u32)(clear >> 32), (u32)(set >> 32), reg + 4);
221 }
222
223 void rtl839x_set_port_reg_le(u64 set, int reg)
224 {
225 sw_w32(set, reg);
226 sw_w32(set >> 32, reg + 4);
227 }
228
229 u64 rtl839x_get_port_reg_le(int reg)
230 {
231 u64 v = sw_r32(reg + 4);
232
233 v <<= 32;
234 v |= sw_r32(reg);
235
236 return v;
237 }
238
239 int read_phy(u32 port, u32 page, u32 reg, u32 *val)
240 {
241 switch (soc_info.family) {
242 case RTL8380_FAMILY_ID:
243 return rtl838x_read_phy(port, page, reg, val);
244 case RTL8390_FAMILY_ID:
245 return rtl839x_read_phy(port, page, reg, val);
246 case RTL9300_FAMILY_ID:
247 return rtl930x_read_phy(port, page, reg, val);
248 case RTL9310_FAMILY_ID:
249 return rtl931x_read_phy(port, page, reg, val);
250 }
251
252 return -1;
253 }
254
255 int write_phy(u32 port, u32 page, u32 reg, u32 val)
256 {
257 switch (soc_info.family) {
258 case RTL8380_FAMILY_ID:
259 return rtl838x_write_phy(port, page, reg, val);
260 case RTL8390_FAMILY_ID:
261 return rtl839x_write_phy(port, page, reg, val);
262 case RTL9300_FAMILY_ID:
263 return rtl930x_write_phy(port, page, reg, val);
264 case RTL9310_FAMILY_ID:
265 return rtl931x_write_phy(port, page, reg, val);
266 }
267
268 return -1;
269 }
270
271 static int __init rtl83xx_mdio_probe(struct rtl838x_switch_priv *priv)
272 {
273 struct device *dev = priv->dev;
274 struct device_node *dn, *phy_node, *mii_np = dev->of_node;
275 struct mii_bus *bus;
276 int ret;
277 u32 pn;
278
279 pr_debug("In %s\n", __func__);
280 mii_np = of_find_compatible_node(NULL, NULL, "realtek,rtl838x-mdio");
281 if (mii_np) {
282 pr_debug("Found compatible MDIO node!\n");
283 } else {
284 dev_err(priv->dev, "no %s child node found", "mdio-bus");
285 return -ENODEV;
286 }
287
288 priv->mii_bus = of_mdio_find_bus(mii_np);
289 if (!priv->mii_bus) {
290 pr_debug("Deferring probe of mdio bus\n");
291 return -EPROBE_DEFER;
292 }
293 if (!of_device_is_available(mii_np))
294 ret = -ENODEV;
295
296 bus = devm_mdiobus_alloc(priv->ds->dev);
297 if (!bus)
298 return -ENOMEM;
299
300 bus->name = "rtl838x slave mii";
301
302 /* Since the NIC driver is loaded first, we can use the mdio rw functions
303 * assigned there.
304 */
305 bus->read = priv->mii_bus->read;
306 bus->write = priv->mii_bus->write;
307 bus->read_paged = priv->mii_bus->read_paged;
308 bus->write_paged = priv->mii_bus->write_paged;
309 snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", bus->name, dev->id);
310
311 bus->parent = dev;
312 priv->ds->slave_mii_bus = bus;
313 priv->ds->slave_mii_bus->priv = priv->mii_bus->priv;
314 priv->ds->slave_mii_bus->access_capabilities = priv->mii_bus->access_capabilities;
315
316 ret = mdiobus_register(priv->ds->slave_mii_bus);
317 if (ret && mii_np) {
318 of_node_put(dn);
319 return ret;
320 }
321
322 dn = of_find_compatible_node(NULL, NULL, "realtek,rtl83xx-switch");
323 if (!dn) {
324 dev_err(priv->dev, "No RTL switch node in DTS\n");
325 return -ENODEV;
326 }
327
328 for_each_node_by_name(dn, "port") {
329 phy_interface_t interface;
330 u32 led_set;
331
332 if (!of_device_is_available(dn))
333 continue;
334
335 if (of_property_read_u32(dn, "reg", &pn))
336 continue;
337
338 phy_node = of_parse_phandle(dn, "phy-handle", 0);
339 if (!phy_node) {
340 if (pn != priv->cpu_port)
341 dev_err(priv->dev, "Port node %d misses phy-handle\n", pn);
342 continue;
343 }
344
345 if (of_property_read_u32(phy_node, "sds", &priv->ports[pn].sds_num))
346 priv->ports[pn].sds_num = -1;
347 pr_debug("%s port %d has SDS %d\n", __func__, pn, priv->ports[pn].sds_num);
348
349 if (of_get_phy_mode(dn, &interface))
350 interface = PHY_INTERFACE_MODE_NA;
351 if (interface == PHY_INTERFACE_MODE_HSGMII)
352 priv->ports[pn].is2G5 = true;
353 if (interface == PHY_INTERFACE_MODE_USXGMII)
354 priv->ports[pn].is2G5 = priv->ports[pn].is10G = true;
355 if (interface == PHY_INTERFACE_MODE_10GBASER)
356 priv->ports[pn].is10G = true;
357
358 if (of_property_read_u32(dn, "led-set", &led_set))
359 led_set = 0;
360 priv->ports[pn].led_set = led_set;
361
362 // Check for the integrated SerDes of the RTL8380M first
363 if (of_property_read_bool(phy_node, "phy-is-integrated")
364 && priv->id == 0x8380 && pn >= 24) {
365 pr_debug("----> FÓUND A SERDES\n");
366 priv->ports[pn].phy = PHY_RTL838X_SDS;
367 continue;
368 }
369
370 if (priv->id >= 0x9300) {
371 priv->ports[pn].phy_is_integrated = false;
372 if (of_property_read_bool(phy_node, "phy-is-integrated")) {
373 priv->ports[pn].phy_is_integrated = true;
374 priv->ports[pn].phy = PHY_RTL930X_SDS;
375 }
376 } else {
377 if (of_property_read_bool(phy_node, "phy-is-integrated") &&
378 !of_property_read_bool(phy_node, "sfp")) {
379 priv->ports[pn].phy = PHY_RTL8218B_INT;
380 continue;
381 }
382 }
383
384 if (!of_property_read_bool(phy_node, "phy-is-integrated") &&
385 of_property_read_bool(phy_node, "sfp")) {
386 priv->ports[pn].phy = PHY_RTL8214FC;
387 continue;
388 }
389
390 if (!of_property_read_bool(phy_node, "phy-is-integrated") &&
391 !of_property_read_bool(phy_node, "sfp")) {
392 priv->ports[pn].phy = PHY_RTL8218B_EXT;
393 continue;
394 }
395 }
396
397 /* Disable MAC polling the PHY so that we can start configuration */
398 priv->r->set_port_reg_le(0ULL, priv->r->smi_poll_ctrl);
399
400 /* Enable PHY control via SoC */
401 if (priv->family_id == RTL8380_FAMILY_ID) {
402 /* Enable SerDes NWAY and PHY control via SoC */
403 sw_w32_mask(BIT(7), BIT(15), RTL838X_SMI_GLB_CTRL);
404 } else if (priv->family_id == RTL8390_FAMILY_ID) {
405 /* Disable PHY polling via SoC */
406 sw_w32_mask(BIT(7), 0, RTL839X_SMI_GLB_CTRL);
407 }
408
409 /* Power on fibre ports and reset them if necessary */
410 if (priv->ports[24].phy == PHY_RTL838X_SDS) {
411 pr_debug("Powering on fibre ports & reset\n");
412 rtl8380_sds_power(24, 1);
413 rtl8380_sds_power(26, 1);
414 }
415
416 pr_debug("%s done\n", __func__);
417
418 return 0;
419 }
420
421 static int __init rtl83xx_get_l2aging(struct rtl838x_switch_priv *priv)
422 {
423 int t = sw_r32(priv->r->l2_ctrl_1);
424
425 t &= priv->family_id == RTL8380_FAMILY_ID ? 0x7fffff : 0x1FFFFF;
426
427 if (priv->family_id == RTL8380_FAMILY_ID)
428 t = t * 128 / 625; /* Aging time in seconds. 0: L2 aging disabled */
429 else
430 t = (t * 3) / 5;
431
432 pr_debug("L2 AGING time: %d sec\n", t);
433 pr_debug("Dynamic aging for ports: %x\n", sw_r32(priv->r->l2_port_aging_out));
434
435 return t;
436 }
437
438 /* Caller must hold priv->reg_mutex */
439 int rtl83xx_lag_add(struct dsa_switch *ds, int group, int port, struct netdev_lag_upper_info *info)
440 {
441 struct rtl838x_switch_priv *priv = ds->priv;
442 int i;
443 u32 algomsk = 0;
444 u32 algoidx = 0;
445
446 if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
447 pr_err("%s: Only mode LACP 802.3ad (4) allowed.\n", __func__);
448 return -EINVAL;
449 }
450
451 if (group >= priv->n_lags) {
452 pr_err("%s: LAG %d invalid.\n", __func__, group);
453 return -EINVAL;
454 }
455
456 if (port >= priv->cpu_port) {
457 pr_err("%s: Port %d invalid.\n", __func__, port);
458 return -EINVAL;
459 }
460
461 for (i = 0; i < priv->n_lags; i++) {
462 if (priv->lags_port_members[i] & BIT_ULL(port))
463 break;
464 }
465 if (i != priv->n_lags) {
466 pr_err("%s: Port %d already member of LAG %d.\n", __func__, port, i);
467 return -ENOSPC;
468 }
469
470 switch(info->hash_type) {
471 case NETDEV_LAG_HASH_L2:
472 algomsk |= TRUNK_DISTRIBUTION_ALGO_DMAC_BIT;
473 algomsk |= TRUNK_DISTRIBUTION_ALGO_SMAC_BIT;
474 break;
475 case NETDEV_LAG_HASH_L23:
476 algomsk |= TRUNK_DISTRIBUTION_ALGO_DMAC_BIT;
477 algomsk |= TRUNK_DISTRIBUTION_ALGO_SMAC_BIT;
478 algomsk |= TRUNK_DISTRIBUTION_ALGO_SIP_BIT; //source ip
479 algomsk |= TRUNK_DISTRIBUTION_ALGO_DIP_BIT; //dest ip
480 algoidx = 1;
481 break;
482 case NETDEV_LAG_HASH_L34:
483 algomsk |= TRUNK_DISTRIBUTION_ALGO_SRC_L4PORT_BIT; //sport
484 algomsk |= TRUNK_DISTRIBUTION_ALGO_DST_L4PORT_BIT; //dport
485 algomsk |= TRUNK_DISTRIBUTION_ALGO_SIP_BIT; //source ip
486 algomsk |= TRUNK_DISTRIBUTION_ALGO_DIP_BIT; //dest ip
487 algoidx = 2;
488 break;
489 default:
490 algomsk |= 0x7f;
491 }
492 priv->r->set_distribution_algorithm(group, algoidx, algomsk);
493 priv->r->mask_port_reg_be(0, BIT_ULL(port), priv->r->trk_mbr_ctr(group));
494 priv->lags_port_members[group] |= BIT_ULL(port);
495
496 pr_info("%s: Added port %d to LAG %d. Members now %016llx.\n",
497 __func__, port, group, priv->lags_port_members[group]);
498
499 return 0;
500 }
501
502 /* Caller must hold priv->reg_mutex */
503 int rtl83xx_lag_del(struct dsa_switch *ds, int group, int port)
504 {
505 struct rtl838x_switch_priv *priv = ds->priv;
506
507 if (group >= priv->n_lags) {
508 pr_err("%s: LAG %d invalid.\n", __func__, group);
509 return -EINVAL;
510 }
511
512 if (port >= priv->cpu_port) {
513 pr_err("%s: Port %d invalid.\n", __func__, port);
514 return -EINVAL;
515 }
516
517 if (!(priv->lags_port_members[group] & BIT_ULL(port))) {
518 pr_err("%s: Port %d not member of LAG %d.\n", __func__, port, group);
519 return -ENOSPC;
520 }
521
522 // 0x7f algo mask all
523 priv->r->mask_port_reg_be(BIT_ULL(port), 0, priv->r->trk_mbr_ctr(group));
524 priv->lags_port_members[group] &= ~BIT_ULL(port);
525
526 pr_info("%s: Removed port %d from LAG %d. Members now %016llx.\n",
527 __func__, port, group, priv->lags_port_members[group]);
528
529 return 0;
530 }
531
532 /* Allocate a 64 bit octet counter located in the LOG HW table */
533 static int rtl83xx_octet_cntr_alloc(struct rtl838x_switch_priv *priv)
534 {
535 int idx;
536
537 mutex_lock(&priv->reg_mutex);
538
539 idx = find_first_zero_bit(priv->octet_cntr_use_bm, MAX_COUNTERS);
540 if (idx >= priv->n_counters) {
541 mutex_unlock(&priv->reg_mutex);
542 return -1;
543 }
544
545 set_bit(idx, priv->octet_cntr_use_bm);
546 mutex_unlock(&priv->reg_mutex);
547
548 return idx;
549 }
550
551 /* Allocate a 32-bit packet counter
552 * 2 32-bit packet counters share the location of a 64-bit octet counter
553 * Initially there are no free packet counters and 2 new ones need to be freed
554 * by allocating the corresponding octet counter
555 */
556 int rtl83xx_packet_cntr_alloc(struct rtl838x_switch_priv *priv)
557 {
558 int idx, j;
559
560 mutex_lock(&priv->reg_mutex);
561
562 /* Because initially no packet counters are free, the logic is reversed:
563 * a 0-bit means the counter is already allocated (for octets)
564 */
565 idx = find_first_bit(priv->packet_cntr_use_bm, MAX_COUNTERS * 2);
566 if (idx >= priv->n_counters * 2) {
567 j = find_first_zero_bit(priv->octet_cntr_use_bm, MAX_COUNTERS);
568 if (j >= priv->n_counters) {
569 mutex_unlock(&priv->reg_mutex);
570 return -1;
571 }
572 set_bit(j, priv->octet_cntr_use_bm);
573 idx = j * 2;
574 set_bit(j * 2 + 1, priv->packet_cntr_use_bm);
575
576 } else {
577 clear_bit(idx, priv->packet_cntr_use_bm);
578 }
579
580 mutex_unlock(&priv->reg_mutex);
581
582 return idx;
583 }
584
585 /* Add an L2 nexthop entry for the L3 routing system / PIE forwarding in the SoC
586 * Use VID and MAC in rtl838x_l2_entry to identify either a free slot in the L2 hash table
587 * or mark an existing entry as a nexthop by setting it's nexthop bit
588 * Called from the L3 layer
589 * The index in the L2 hash table is filled into nh->l2_id;
590 */
591 int rtl83xx_l2_nexthop_add(struct rtl838x_switch_priv *priv, struct rtl83xx_nexthop *nh)
592 {
593 struct rtl838x_l2_entry e;
594 u64 seed = priv->r->l2_hash_seed(nh->mac, nh->rvid);
595 u32 key = priv->r->l2_hash_key(priv, seed);
596 int i, idx = -1;
597 u64 entry;
598
599 pr_debug("%s searching for %08llx vid %d with key %d, seed: %016llx\n",
600 __func__, nh->mac, nh->rvid, key, seed);
601
602 e.type = L2_UNICAST;
603 u64_to_ether_addr(nh->mac, &e.mac[0]);
604 e.port = nh->port;
605
606 // Loop over all entries in the hash-bucket and over the second block on 93xx SoCs
607 for (i = 0; i < priv->l2_bucket_size; i++) {
608 entry = priv->r->read_l2_entry_using_hash(key, i, &e);
609
610 if (!e.valid || ((entry & 0x0fffffffffffffffULL) == seed)) {
611 idx = i > 3 ? ((key >> 14) & 0xffff) | i >> 1
612 : ((key << 2) | i) & 0xffff;
613 break;
614 }
615 }
616
617 if (idx < 0) {
618 pr_err("%s: No more L2 forwarding entries available\n", __func__);
619 return -1;
620 }
621
622 // Found an existing (e->valid is true) or empty entry, make it a nexthop entry
623 nh->l2_id = idx;
624 if (e.valid) {
625 nh->port = e.port;
626 nh->vid = e.vid; // Save VID
627 nh->rvid = e.rvid;
628 nh->dev_id = e.stack_dev;
629 // If the entry is already a valid next hop entry, don't change it
630 if (e.next_hop)
631 return 0;
632 } else {
633 e.valid = true;
634 e.is_static = true;
635 e.rvid = nh->rvid;
636 e.is_ip_mc = false;
637 e.is_ipv6_mc = false;
638 e.block_da = false;
639 e.block_sa = false;
640 e.suspended = false;
641 e.age = 0; // With port-ignore
642 e.port = priv->port_ignore;
643 u64_to_ether_addr(nh->mac, &e.mac[0]);
644 }
645 e.next_hop = true;
646 e.nh_route_id = nh->id; // NH route ID takes place of VID
647 e.nh_vlan_target = false;
648
649 priv->r->write_l2_entry_using_hash(idx >> 2, idx & 0x3, &e);
650
651 return 0;
652 }
653
654 /* Removes a Layer 2 next hop entry in the forwarding database
655 * If it was static, the entire entry is removed, otherwise the nexthop bit is cleared
656 * and we wait until the entry ages out
657 */
658 int rtl83xx_l2_nexthop_rm(struct rtl838x_switch_priv *priv, struct rtl83xx_nexthop *nh)
659 {
660 struct rtl838x_l2_entry e;
661 u32 key = nh->l2_id >> 2;
662 int i = nh->l2_id & 0x3;
663 u64 entry = entry = priv->r->read_l2_entry_using_hash(key, i, &e);
664
665 pr_debug("%s: id %d, key %d, index %d\n", __func__, nh->l2_id, key, i);
666 if (!e.valid) {
667 dev_err(priv->dev, "unknown nexthop, id %x\n", nh->l2_id);
668 return -1;
669 }
670
671 if (e.is_static)
672 e.valid = false;
673 e.next_hop = false;
674 e.vid = nh->vid; // Restore VID
675 e.rvid = nh->rvid;
676
677 priv->r->write_l2_entry_using_hash(key, i, &e);
678
679 return 0;
680 }
681
682 static int rtl83xx_handle_changeupper(struct rtl838x_switch_priv *priv,
683 struct net_device *ndev,
684 struct netdev_notifier_changeupper_info *info)
685 {
686 struct net_device *upper = info->upper_dev;
687 struct netdev_lag_upper_info *lag_upper_info = NULL;
688 int i, j, err;
689
690 if (!netif_is_lag_master(upper))
691 return 0;
692
693 mutex_lock(&priv->reg_mutex);
694
695 for (i = 0; i < priv->n_lags; i++) {
696 if ((!priv->lag_devs[i]) || (priv->lag_devs[i] == upper))
697 break;
698 }
699 for (j = 0; j < priv->cpu_port; j++) {
700 if (priv->ports[j].dp->slave == ndev)
701 break;
702 }
703 if (j >= priv->cpu_port) {
704 err = -EINVAL;
705 goto out;
706 }
707
708 if (info->linking) {
709 lag_upper_info = info->upper_info;
710 if (!priv->lag_devs[i])
711 priv->lag_devs[i] = upper;
712 err = rtl83xx_lag_add(priv->ds, i, priv->ports[j].dp->index, lag_upper_info);
713 if (err) {
714 err = -EINVAL;
715 goto out;
716 }
717 } else {
718 if (!priv->lag_devs[i])
719 err = -EINVAL;
720 err = rtl83xx_lag_del(priv->ds, i, priv->ports[j].dp->index);
721 if (err) {
722 err = -EINVAL;
723 goto out;
724 }
725 if (!priv->lags_port_members[i])
726 priv->lag_devs[i] = NULL;
727 }
728
729 out:
730 mutex_unlock(&priv->reg_mutex);
731
732 return 0;
733 }
734
735 /* Is the lower network device a DSA slave network device of our RTL930X-switch?
736 * Unfortunately we cannot just follow dev->dsa_prt as this is only set for the
737 * DSA master device.
738 */
739 int rtl83xx_port_is_under(const struct net_device * dev, struct rtl838x_switch_priv *priv)
740 {
741 int i;
742
743 // TODO: On 5.12:
744 // if(!dsa_slave_dev_check(dev)) {
745 // netdev_info(dev, "%s: not a DSA device.\n", __func__);
746 // return -EINVAL;
747 // }
748
749 for (i = 0; i < priv->cpu_port; i++) {
750 if (!priv->ports[i].dp)
751 continue;
752 if (priv->ports[i].dp->slave == dev)
753 return i;
754 }
755
756 return -EINVAL;
757 }
758
759 static int rtl83xx_netdevice_event(struct notifier_block *this,
760 unsigned long event, void *ptr)
761 {
762 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
763 struct rtl838x_switch_priv *priv;
764 int err;
765
766 pr_debug("In: %s, event: %lu\n", __func__, event);
767
768 if ((event != NETDEV_CHANGEUPPER) && (event != NETDEV_CHANGELOWERSTATE))
769 return NOTIFY_DONE;
770
771 priv = container_of(this, struct rtl838x_switch_priv, nb);
772 switch (event) {
773 case NETDEV_CHANGEUPPER:
774 err = rtl83xx_handle_changeupper(priv, ndev, ptr);
775 break;
776 }
777
778 if (err)
779 return err;
780
781 return NOTIFY_DONE;
782 }
783
784 const static struct rhashtable_params route_ht_params = {
785 .key_len = sizeof(u32),
786 .key_offset = offsetof(struct rtl83xx_route, gw_ip),
787 .head_offset = offsetof(struct rtl83xx_route, linkage),
788 };
789
790 /* Updates an L3 next hop entry in the ROUTING table */
791 static int rtl83xx_l3_nexthop_update(struct rtl838x_switch_priv *priv, __be32 ip_addr, u64 mac)
792 {
793 struct rtl83xx_route *r;
794 struct rhlist_head *tmp, *list;
795
796 rcu_read_lock();
797 list = rhltable_lookup(&priv->routes, &ip_addr, route_ht_params);
798 if (!list) {
799 rcu_read_unlock();
800 return -ENOENT;
801 }
802
803 rhl_for_each_entry_rcu(r, tmp, list, linkage) {
804 pr_info("%s: Setting up fwding: ip %pI4, GW mac %016llx\n",
805 __func__, &ip_addr, mac);
806
807 // Reads the ROUTING table entry associated with the route
808 priv->r->route_read(r->id, r);
809 pr_info("Route with id %d to %pI4 / %d\n", r->id, &r->dst_ip, r->prefix_len);
810
811 r->nh.mac = r->nh.gw = mac;
812 r->nh.port = priv->port_ignore;
813 r->nh.id = r->id;
814
815 // Do we need to explicitly add a DMAC entry with the route's nh index?
816 if (priv->r->set_l3_egress_mac)
817 priv->r->set_l3_egress_mac(r->id, mac);
818
819 // Update ROUTING table: map gateway-mac and switch-mac id to route id
820 rtl83xx_l2_nexthop_add(priv, &r->nh);
821
822 r->attr.valid = true;
823 r->attr.action = ROUTE_ACT_FORWARD;
824 r->attr.type = 0;
825 r->attr.hit = false; // Reset route-used indicator
826
827 // Add PIE entry with dst_ip and prefix_len
828 r->pr.dip = r->dst_ip;
829 r->pr.dip_m = inet_make_mask(r->prefix_len);
830
831 if (r->is_host_route) {
832 int slot = priv->r->find_l3_slot(r, false);
833
834 pr_info("%s: Got slot for route: %d\n", __func__, slot);
835 priv->r->host_route_write(slot, r);
836 } else {
837 priv->r->route_write(r->id, r);
838 r->pr.fwd_sel = true;
839 r->pr.fwd_data = r->nh.l2_id;
840 r->pr.fwd_act = PIE_ACT_ROUTE_UC;
841 }
842
843 if (priv->r->set_l3_nexthop)
844 priv->r->set_l3_nexthop(r->nh.id, r->nh.l2_id, r->nh.if_id);
845
846 if (r->pr.id < 0) {
847 r->pr.packet_cntr = rtl83xx_packet_cntr_alloc(priv);
848 if (r->pr.packet_cntr >= 0) {
849 pr_info("Using packet counter %d\n", r->pr.packet_cntr);
850 r->pr.log_sel = true;
851 r->pr.log_data = r->pr.packet_cntr;
852 }
853 priv->r->pie_rule_add(priv, &r->pr);
854 } else {
855 int pkts = priv->r->packet_cntr_read(r->pr.packet_cntr);
856 pr_info("%s: total packets: %d\n", __func__, pkts);
857
858 priv->r->pie_rule_write(priv, r->pr.id, &r->pr);
859 }
860 }
861 rcu_read_unlock();
862
863 return 0;
864 }
865
866 static int rtl83xx_port_ipv4_resolve(struct rtl838x_switch_priv *priv,
867 struct net_device *dev, __be32 ip_addr)
868 {
869 struct neighbour *n = neigh_lookup(&arp_tbl, &ip_addr, dev);
870 int err = 0;
871 u64 mac;
872
873 if (!n) {
874 n = neigh_create(&arp_tbl, &ip_addr, dev);
875 if (IS_ERR(n))
876 return PTR_ERR(n);
877 }
878
879 /* If the neigh is already resolved, then go ahead and
880 * install the entry, otherwise start the ARP process to
881 * resolve the neigh.
882 */
883 if (n->nud_state & NUD_VALID) {
884 mac = ether_addr_to_u64(n->ha);
885 pr_info("%s: resolved mac: %016llx\n", __func__, mac);
886 rtl83xx_l3_nexthop_update(priv, ip_addr, mac);
887 } else {
888 pr_info("%s: need to wait\n", __func__);
889 neigh_event_send(n, NULL);
890 }
891
892 neigh_release(n);
893
894 return err;
895 }
896
897 struct rtl83xx_walk_data {
898 struct rtl838x_switch_priv *priv;
899 int port;
900 };
901
902 static int rtl83xx_port_lower_walk(struct net_device *lower, struct netdev_nested_priv *_priv)
903 {
904 struct rtl83xx_walk_data *data = (struct rtl83xx_walk_data *)_priv->data;
905 struct rtl838x_switch_priv *priv = data->priv;
906 int ret = 0;
907 int index;
908
909 index = rtl83xx_port_is_under(lower, priv);
910 data->port = index;
911 if (index >= 0) {
912 pr_debug("Found DSA-port, index %d\n", index);
913 ret = 1;
914 }
915
916 return ret;
917 }
918
919 int rtl83xx_port_dev_lower_find(struct net_device *dev, struct rtl838x_switch_priv *priv)
920 {
921 struct rtl83xx_walk_data data;
922 struct netdev_nested_priv _priv;
923
924 data.priv = priv;
925 data.port = 0;
926 _priv.data = (void *)&data;
927
928 netdev_walk_all_lower_dev(dev, rtl83xx_port_lower_walk, &_priv);
929
930 return data.port;
931 }
932
933 static struct rtl83xx_route *rtl83xx_route_alloc(struct rtl838x_switch_priv *priv, u32 ip)
934 {
935 struct rtl83xx_route *r;
936 int idx = 0, err;
937
938 mutex_lock(&priv->reg_mutex);
939
940 idx = find_first_zero_bit(priv->route_use_bm, MAX_ROUTES);
941 pr_debug("%s id: %d, ip %pI4\n", __func__, idx, &ip);
942
943 r = kzalloc(sizeof(*r), GFP_KERNEL);
944 if (!r) {
945 mutex_unlock(&priv->reg_mutex);
946 return r;
947 }
948
949 r->id = idx;
950 r->gw_ip = ip;
951 r->pr.id = -1; // We still need to allocate a rule in HW
952 r->is_host_route = false;
953
954 err = rhltable_insert(&priv->routes, &r->linkage, route_ht_params);
955 if (err) {
956 pr_err("Could not insert new rule\n");
957 mutex_unlock(&priv->reg_mutex);
958 goto out_free;
959 }
960
961 set_bit(idx, priv->route_use_bm);
962
963 mutex_unlock(&priv->reg_mutex);
964
965 return r;
966
967 out_free:
968 kfree(r);
969
970 return NULL;
971 }
972
973
974 static struct rtl83xx_route *rtl83xx_host_route_alloc(struct rtl838x_switch_priv *priv, u32 ip)
975 {
976 struct rtl83xx_route *r;
977 int idx = 0, err;
978
979 mutex_lock(&priv->reg_mutex);
980
981 idx = find_first_zero_bit(priv->host_route_use_bm, MAX_HOST_ROUTES);
982 pr_debug("%s id: %d, ip %pI4\n", __func__, idx, &ip);
983
984 r = kzalloc(sizeof(*r), GFP_KERNEL);
985 if (!r) {
986 mutex_unlock(&priv->reg_mutex);
987 return r;
988 }
989
990 /* We require a unique route ID irrespective of whether it is a prefix or host
991 * route (on RTL93xx) as we use this ID to associate a DMAC and next-hop entry
992 */
993 r->id = idx + MAX_ROUTES;
994
995 r->gw_ip = ip;
996 r->pr.id = -1; // We still need to allocate a rule in HW
997 r->is_host_route = true;
998
999 err = rhltable_insert(&priv->routes, &r->linkage, route_ht_params);
1000 if (err) {
1001 pr_err("Could not insert new rule\n");
1002 mutex_unlock(&priv->reg_mutex);
1003 goto out_free;
1004 }
1005
1006 set_bit(idx, priv->host_route_use_bm);
1007
1008 mutex_unlock(&priv->reg_mutex);
1009
1010 return r;
1011
1012 out_free:
1013 kfree(r);
1014
1015 return NULL;
1016 }
1017
1018
1019
1020 static void rtl83xx_route_rm(struct rtl838x_switch_priv *priv, struct rtl83xx_route *r)
1021 {
1022 int id;
1023
1024 if (rhltable_remove(&priv->routes, &r->linkage, route_ht_params))
1025 dev_warn(priv->dev, "Could not remove route\n");
1026
1027 if (r->is_host_route) {
1028 id = priv->r->find_l3_slot(r, false);
1029 pr_debug("%s: Got id for host route: %d\n", __func__, id);
1030 r->attr.valid = false;
1031 priv->r->host_route_write(id, r);
1032 clear_bit(r->id - MAX_ROUTES, priv->host_route_use_bm);
1033 } else {
1034 // If there is a HW representation of the route, delete it
1035 if (priv->r->route_lookup_hw) {
1036 id = priv->r->route_lookup_hw(r);
1037 pr_info("%s: Got id for prefix route: %d\n", __func__, id);
1038 r->attr.valid = false;
1039 priv->r->route_write(id, r);
1040 }
1041 clear_bit(r->id, priv->route_use_bm);
1042 }
1043
1044 kfree(r);
1045 }
1046
1047 static int rtl83xx_fib4_del(struct rtl838x_switch_priv *priv,
1048 struct fib_entry_notifier_info *info)
1049 {
1050 struct fib_nh *nh = fib_info_nh(info->fi, 0);
1051 struct rtl83xx_route *r;
1052 struct rhlist_head *tmp, *list;
1053
1054 pr_debug("In %s, ip %pI4, len %d\n", __func__, &info->dst, info->dst_len);
1055 rcu_read_lock();
1056 list = rhltable_lookup(&priv->routes, &nh->fib_nh_gw4, route_ht_params);
1057 if (!list) {
1058 rcu_read_unlock();
1059 pr_err("%s: no such gateway: %pI4\n", __func__, &nh->fib_nh_gw4);
1060 return -ENOENT;
1061 }
1062 rhl_for_each_entry_rcu(r, tmp, list, linkage) {
1063 if (r->dst_ip == info->dst && r->prefix_len == info->dst_len) {
1064 pr_info("%s: found a route with id %d, nh-id %d\n",
1065 __func__, r->id, r->nh.id);
1066 break;
1067 }
1068 }
1069 rcu_read_unlock();
1070
1071 rtl83xx_l2_nexthop_rm(priv, &r->nh);
1072
1073 pr_debug("%s: Releasing packet counter %d\n", __func__, r->pr.packet_cntr);
1074 set_bit(r->pr.packet_cntr, priv->packet_cntr_use_bm);
1075 priv->r->pie_rule_rm(priv, &r->pr);
1076
1077 rtl83xx_route_rm(priv, r);
1078
1079 nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
1080
1081 return 0;
1082 }
1083
1084 /* On the RTL93xx, an L3 termination endpoint MAC address on which the router waits
1085 * for packets to be routed needs to be allocated.
1086 */
1087 static int rtl83xx_alloc_router_mac(struct rtl838x_switch_priv *priv, u64 mac)
1088 {
1089 int i, free_mac = -1;
1090 struct rtl93xx_rt_mac m;
1091
1092 mutex_lock(&priv->reg_mutex);
1093 for (i = 0; i < MAX_ROUTER_MACS; i++) {
1094 priv->r->get_l3_router_mac(i, &m);
1095 if (free_mac < 0 && !m.valid) {
1096 free_mac = i;
1097 continue;
1098 }
1099 if (m.valid && m.mac == mac) {
1100 free_mac = i;
1101 break;
1102 }
1103 }
1104
1105 if (free_mac < 0) {
1106 pr_err("No free router MACs, cannot offload\n");
1107 mutex_unlock(&priv->reg_mutex);
1108 return -1;
1109 }
1110
1111 m.valid = true;
1112 m.mac = mac;
1113 m.p_type = 0; // An individual port, not a trunk port
1114 m.p_id = 0x3f; // Listen on any port
1115 m.p_id_mask = 0;
1116 m.vid = 0; // Listen on any VLAN...
1117 m.vid_mask = 0; // ... so mask needs to be 0
1118 m.mac_mask = 0xffffffffffffULL; // We want an exact match of the interface MAC
1119 m.action = L3_FORWARD; // Route the packet
1120 priv->r->set_l3_router_mac(free_mac, &m);
1121
1122 mutex_unlock(&priv->reg_mutex);
1123
1124 return 0;
1125 }
1126
1127 static int rtl83xx_alloc_egress_intf(struct rtl838x_switch_priv *priv, u64 mac, int vlan)
1128 {
1129 int i, free_mac = -1;
1130 struct rtl838x_l3_intf intf;
1131 u64 m;
1132
1133 mutex_lock(&priv->reg_mutex);
1134 for (i = 0; i < MAX_SMACS; i++) {
1135 m = priv->r->get_l3_egress_mac(L3_EGRESS_DMACS + i);
1136 if (free_mac < 0 && !m) {
1137 free_mac = i;
1138 continue;
1139 }
1140 if (m == mac) {
1141 mutex_unlock(&priv->reg_mutex);
1142 return i;
1143 }
1144 }
1145
1146 if (free_mac < 0) {
1147 pr_err("No free egress interface, cannot offload\n");
1148 return -1;
1149 }
1150
1151 // Set up default egress interface 1
1152 intf.vid = vlan;
1153 intf.smac_idx = free_mac;
1154 intf.ip4_mtu_id = 1;
1155 intf.ip6_mtu_id = 1;
1156 intf.ttl_scope = 1; // TTL
1157 intf.hl_scope = 1; // Hop Limit
1158 intf.ip4_icmp_redirect = intf.ip6_icmp_redirect = 2; // FORWARD
1159 intf.ip4_pbr_icmp_redirect = intf.ip6_pbr_icmp_redirect = 2; // FORWARD;
1160 priv->r->set_l3_egress_intf(free_mac, &intf);
1161
1162 priv->r->set_l3_egress_mac(L3_EGRESS_DMACS + free_mac, mac);
1163
1164 mutex_unlock(&priv->reg_mutex);
1165
1166 return free_mac;
1167 }
1168
1169 static int rtl83xx_fib4_add(struct rtl838x_switch_priv *priv,
1170 struct fib_entry_notifier_info *info)
1171 {
1172 struct fib_nh *nh = fib_info_nh(info->fi, 0);
1173 struct net_device *dev = fib_info_nh(info->fi, 0)->fib_nh_dev;
1174 int port;
1175 struct rtl83xx_route *r;
1176 bool to_localhost;
1177 int vlan = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 0;
1178
1179 pr_debug("In %s, ip %pI4, len %d\n", __func__, &info->dst, info->dst_len);
1180 if (!info->dst) {
1181 pr_info("Not offloading default route for now\n");
1182 return 0;
1183 }
1184
1185 pr_debug("GW: %pI4, interface name %s, mac %016llx, vlan %d\n", &nh->fib_nh_gw4, dev->name,
1186 ether_addr_to_u64(dev->dev_addr), vlan
1187 );
1188
1189 port = rtl83xx_port_dev_lower_find(dev, priv);
1190 if (port < 0)
1191 return -1;
1192
1193 // For now we only work with routes that have a gateway and are not ourself
1194 // if ((!nh->fib_nh_gw4) && (info->dst_len != 32))
1195 // return 0;
1196
1197 if ((info->dst & 0xff) == 0xff)
1198 return 0;
1199
1200 // Do not offload routes to 192.168.100.x
1201 if ((info->dst & 0xffffff00) == 0xc0a86400)
1202 return 0;
1203
1204 // Do not offload routes to 127.x.x.x
1205 if ((info->dst & 0xff000000) == 0x7f000000)
1206 return 0;
1207
1208 // Allocate route or host-route (entry if hardware supports this)
1209 if (info->dst_len == 32 && priv->r->host_route_write)
1210 r = rtl83xx_host_route_alloc(priv, nh->fib_nh_gw4);
1211 else
1212 r = rtl83xx_route_alloc(priv, nh->fib_nh_gw4);
1213
1214 if (!r) {
1215 pr_err("%s: No more free route entries\n", __func__);
1216 return -1;
1217 }
1218
1219 r->dst_ip = info->dst;
1220 r->prefix_len = info->dst_len;
1221 r->nh.rvid = vlan;
1222 to_localhost = !nh->fib_nh_gw4;
1223
1224 if (priv->r->set_l3_router_mac) {
1225 u64 mac = ether_addr_to_u64(dev->dev_addr);
1226
1227 pr_debug("Local route and router mac %016llx\n", mac);
1228
1229 if (rtl83xx_alloc_router_mac(priv, mac))
1230 goto out_free_rt;
1231
1232 // vid = 0: Do not care about VID
1233 r->nh.if_id = rtl83xx_alloc_egress_intf(priv, mac, vlan);
1234 if (r->nh.if_id < 0)
1235 goto out_free_rmac;
1236
1237 if (to_localhost) {
1238 int slot;
1239
1240 r->nh.mac = mac;
1241 r->nh.port = priv->port_ignore;
1242 r->attr.valid = true;
1243 r->attr.action = ROUTE_ACT_TRAP2CPU;
1244 r->attr.type = 0;
1245
1246 slot = priv->r->find_l3_slot(r, false);
1247 pr_debug("%s: Got slot for route: %d\n", __func__, slot);
1248 priv->r->host_route_write(slot, r);
1249 }
1250 }
1251
1252 // We need to resolve the mac address of the GW
1253 if (!to_localhost)
1254 rtl83xx_port_ipv4_resolve(priv, dev, nh->fib_nh_gw4);
1255
1256 nh->fib_nh_flags |= RTNH_F_OFFLOAD;
1257
1258 return 0;
1259
1260 out_free_rmac:
1261 out_free_rt:
1262 return 0;
1263 }
1264
1265 static int rtl83xx_fib6_add(struct rtl838x_switch_priv *priv,
1266 struct fib6_entry_notifier_info *info)
1267 {
1268 pr_debug("In %s\n", __func__);
1269 // nh->fib_nh_flags |= RTNH_F_OFFLOAD;
1270
1271 return 0;
1272 }
1273
1274 struct net_event_work {
1275 struct work_struct work;
1276 struct rtl838x_switch_priv *priv;
1277 u64 mac;
1278 u32 gw_addr;
1279 };
1280
1281 static void rtl83xx_net_event_work_do(struct work_struct *work)
1282 {
1283 struct net_event_work *net_work =
1284 container_of(work, struct net_event_work, work);
1285 struct rtl838x_switch_priv *priv = net_work->priv;
1286
1287 rtl83xx_l3_nexthop_update(priv, net_work->gw_addr, net_work->mac);
1288 }
1289
1290 static int rtl83xx_netevent_event(struct notifier_block *this,
1291 unsigned long event, void *ptr)
1292 {
1293 struct rtl838x_switch_priv *priv;
1294 struct net_device *dev;
1295 struct neighbour *n = ptr;
1296 int err, port;
1297 struct net_event_work *net_work;
1298
1299 priv = container_of(this, struct rtl838x_switch_priv, ne_nb);
1300
1301 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
1302 if (!net_work)
1303 return NOTIFY_BAD;
1304
1305 INIT_WORK(&net_work->work, rtl83xx_net_event_work_do);
1306 net_work->priv = priv;
1307
1308 switch (event) {
1309 case NETEVENT_NEIGH_UPDATE:
1310 if (n->tbl != &arp_tbl)
1311 return NOTIFY_DONE;
1312 dev = n->dev;
1313 port = rtl83xx_port_dev_lower_find(dev, priv);
1314 if (port < 0 || !(n->nud_state & NUD_VALID)) {
1315 pr_debug("%s: Neigbour invalid, not updating\n", __func__);
1316 kfree(net_work);
1317 return NOTIFY_DONE;
1318 }
1319
1320 net_work->mac = ether_addr_to_u64(n->ha);
1321 net_work->gw_addr = *(__be32 *) n->primary_key;
1322
1323 pr_debug("%s: updating neighbour on port %d, mac %016llx\n",
1324 __func__, port, net_work->mac);
1325 schedule_work(&net_work->work);
1326 if (err)
1327 netdev_warn(dev, "failed to handle neigh update (err %d)\n", err);
1328 break;
1329 }
1330
1331 return NOTIFY_DONE;
1332 }
1333
1334 struct rtl83xx_fib_event_work {
1335 struct work_struct work;
1336 union {
1337 struct fib_entry_notifier_info fen_info;
1338 struct fib6_entry_notifier_info fen6_info;
1339 struct fib_rule_notifier_info fr_info;
1340 };
1341 struct rtl838x_switch_priv *priv;
1342 bool is_fib6;
1343 unsigned long event;
1344 };
1345
1346 static void rtl83xx_fib_event_work_do(struct work_struct *work)
1347 {
1348 struct rtl83xx_fib_event_work *fib_work =
1349 container_of(work, struct rtl83xx_fib_event_work, work);
1350 struct rtl838x_switch_priv *priv = fib_work->priv;
1351 struct fib_rule *rule;
1352 int err;
1353
1354 /* Protect internal structures from changes */
1355 rtnl_lock();
1356 pr_debug("%s: doing work, event %ld\n", __func__, fib_work->event);
1357 switch (fib_work->event) {
1358 case FIB_EVENT_ENTRY_ADD:
1359 case FIB_EVENT_ENTRY_REPLACE:
1360 case FIB_EVENT_ENTRY_APPEND:
1361 if (fib_work->is_fib6) {
1362 err = rtl83xx_fib6_add(priv, &fib_work->fen6_info);
1363 } else {
1364 err = rtl83xx_fib4_add(priv, &fib_work->fen_info);
1365 fib_info_put(fib_work->fen_info.fi);
1366 }
1367 if (err)
1368 pr_err("%s: FIB4 failed\n", __func__);
1369 break;
1370 case FIB_EVENT_ENTRY_DEL:
1371 rtl83xx_fib4_del(priv, &fib_work->fen_info);
1372 fib_info_put(fib_work->fen_info.fi);
1373 break;
1374 case FIB_EVENT_RULE_ADD:
1375 case FIB_EVENT_RULE_DEL:
1376 rule = fib_work->fr_info.rule;
1377 if (!fib4_rule_default(rule))
1378 pr_err("%s: FIB4 default rule failed\n", __func__);
1379 fib_rule_put(rule);
1380 break;
1381 }
1382 rtnl_unlock();
1383 kfree(fib_work);
1384 }
1385
1386 /* Called with rcu_read_lock() */
1387 static int rtl83xx_fib_event(struct notifier_block *this, unsigned long event, void *ptr)
1388 {
1389 struct fib_notifier_info *info = ptr;
1390 struct rtl838x_switch_priv *priv;
1391 struct rtl83xx_fib_event_work *fib_work;
1392
1393 if ((info->family != AF_INET && info->family != AF_INET6 &&
1394 info->family != RTNL_FAMILY_IPMR &&
1395 info->family != RTNL_FAMILY_IP6MR))
1396 return NOTIFY_DONE;
1397
1398 priv = container_of(this, struct rtl838x_switch_priv, fib_nb);
1399
1400 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
1401 if (!fib_work)
1402 return NOTIFY_BAD;
1403
1404 INIT_WORK(&fib_work->work, rtl83xx_fib_event_work_do);
1405 fib_work->priv = priv;
1406 fib_work->event = event;
1407 fib_work->is_fib6 = false;
1408
1409 switch (event) {
1410 case FIB_EVENT_ENTRY_ADD:
1411 case FIB_EVENT_ENTRY_REPLACE:
1412 case FIB_EVENT_ENTRY_APPEND:
1413 case FIB_EVENT_ENTRY_DEL:
1414 pr_debug("%s: FIB_ENTRY ADD/DEL, event %ld\n", __func__, event);
1415 if (info->family == AF_INET) {
1416 struct fib_entry_notifier_info *fen_info = ptr;
1417
1418 if (fen_info->fi->fib_nh_is_v6) {
1419 NL_SET_ERR_MSG_MOD(info->extack,
1420 "IPv6 gateway with IPv4 route is not supported");
1421 kfree(fib_work);
1422 return notifier_from_errno(-EINVAL);
1423 }
1424
1425 memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info));
1426 /* Take referece on fib_info to prevent it from being
1427 * freed while work is queued. Release it afterwards.
1428 */
1429 fib_info_hold(fib_work->fen_info.fi);
1430
1431 } else if (info->family == AF_INET6) {
1432 struct fib6_entry_notifier_info *fen6_info = ptr;
1433 pr_warn("%s: FIB_RULE ADD/DEL for IPv6 not supported\n", __func__);
1434 kfree(fib_work);
1435 return NOTIFY_DONE;
1436 }
1437 break;
1438
1439 case FIB_EVENT_RULE_ADD:
1440 case FIB_EVENT_RULE_DEL:
1441 pr_debug("%s: FIB_RULE ADD/DEL, event: %ld\n", __func__, event);
1442 memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info));
1443 fib_rule_get(fib_work->fr_info.rule);
1444 break;
1445 }
1446
1447 schedule_work(&fib_work->work);
1448
1449 return NOTIFY_DONE;
1450 }
1451
1452 static int __init rtl83xx_sw_probe(struct platform_device *pdev)
1453 {
1454 int err = 0, i;
1455 struct rtl838x_switch_priv *priv;
1456 struct device *dev = &pdev->dev;
1457 u64 bpdu_mask;
1458
1459 pr_debug("Probing RTL838X switch device\n");
1460 if (!pdev->dev.of_node) {
1461 dev_err(dev, "No DT found\n");
1462 return -EINVAL;
1463 }
1464
1465 // Initialize access to RTL switch tables
1466 rtl_table_init();
1467
1468 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1469 if (!priv)
1470 return -ENOMEM;
1471
1472 priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
1473
1474 if (!priv->ds)
1475 return -ENOMEM;
1476 priv->ds->dev = dev;
1477 priv->ds->priv = priv;
1478 priv->ds->ops = &rtl83xx_switch_ops;
1479 priv->ds->needs_standalone_vlan_filtering = true;
1480 priv->dev = dev;
1481
1482 mutex_init(&priv->reg_mutex);
1483
1484 priv->family_id = soc_info.family;
1485 priv->id = soc_info.id;
1486 switch(soc_info.family) {
1487 case RTL8380_FAMILY_ID:
1488 priv->ds->ops = &rtl83xx_switch_ops;
1489 priv->cpu_port = RTL838X_CPU_PORT;
1490 priv->port_mask = 0x1f;
1491 priv->port_width = 1;
1492 priv->irq_mask = 0x0FFFFFFF;
1493 priv->r = &rtl838x_reg;
1494 priv->ds->num_ports = 29;
1495 priv->fib_entries = 8192;
1496 rtl8380_get_version(priv);
1497 priv->n_lags = 8;
1498 priv->l2_bucket_size = 4;
1499 priv->n_pie_blocks = 12;
1500 priv->port_ignore = 0x1f;
1501 priv->n_counters = 128;
1502 break;
1503 case RTL8390_FAMILY_ID:
1504 priv->ds->ops = &rtl83xx_switch_ops;
1505 priv->cpu_port = RTL839X_CPU_PORT;
1506 priv->port_mask = 0x3f;
1507 priv->port_width = 2;
1508 priv->irq_mask = 0xFFFFFFFFFFFFFULL;
1509 priv->r = &rtl839x_reg;
1510 priv->ds->num_ports = 53;
1511 priv->fib_entries = 16384;
1512 rtl8390_get_version(priv);
1513 priv->n_lags = 16;
1514 priv->l2_bucket_size = 4;
1515 priv->n_pie_blocks = 18;
1516 priv->port_ignore = 0x3f;
1517 priv->n_counters = 1024;
1518 break;
1519 case RTL9300_FAMILY_ID:
1520 priv->ds->ops = &rtl930x_switch_ops;
1521 priv->cpu_port = RTL930X_CPU_PORT;
1522 priv->port_mask = 0x1f;
1523 priv->port_width = 1;
1524 priv->irq_mask = 0x0FFFFFFF;
1525 priv->r = &rtl930x_reg;
1526 priv->ds->num_ports = 29;
1527 priv->fib_entries = 16384;
1528 priv->version = RTL8390_VERSION_A;
1529 priv->n_lags = 16;
1530 sw_w32(1, RTL930X_ST_CTRL);
1531 priv->l2_bucket_size = 8;
1532 priv->n_pie_blocks = 16;
1533 priv->port_ignore = 0x3f;
1534 priv->n_counters = 2048;
1535 break;
1536 case RTL9310_FAMILY_ID:
1537 priv->ds->ops = &rtl930x_switch_ops;
1538 priv->cpu_port = RTL931X_CPU_PORT;
1539 priv->port_mask = 0x3f;
1540 priv->port_width = 2;
1541 priv->irq_mask = 0xFFFFFFFFFFFFFULL;
1542 priv->r = &rtl931x_reg;
1543 priv->ds->num_ports = 57;
1544 priv->fib_entries = 16384;
1545 priv->version = RTL8390_VERSION_A;
1546 priv->n_lags = 16;
1547 priv->l2_bucket_size = 8;
1548 break;
1549 }
1550 pr_debug("Chip version %c\n", priv->version);
1551
1552 err = rtl83xx_mdio_probe(priv);
1553 if (err) {
1554 /* Probing fails the 1st time because of missing ethernet driver
1555 * initialization. Use this to disable traffic in case the bootloader left if on
1556 */
1557 return err;
1558 }
1559
1560 err = dsa_register_switch(priv->ds);
1561 if (err) {
1562 dev_err(dev, "Error registering switch: %d\n", err);
1563 return err;
1564 }
1565
1566 /* dsa_to_port returns dsa_port from the port list in
1567 * dsa_switch_tree, the tree is built when the switch
1568 * is registered by dsa_register_switch
1569 */
1570 for (i = 0; i <= priv->cpu_port; i++)
1571 priv->ports[i].dp = dsa_to_port(priv->ds, i);
1572
1573 /* Enable link and media change interrupts. Are the SERDES masks needed? */
1574 sw_w32_mask(0, 3, priv->r->isr_glb_src);
1575
1576 priv->r->set_port_reg_le(priv->irq_mask, priv->r->isr_port_link_sts_chg);
1577 priv->r->set_port_reg_le(priv->irq_mask, priv->r->imr_port_link_sts_chg);
1578
1579 priv->link_state_irq = platform_get_irq(pdev, 0);
1580 pr_info("LINK state irq: %d\n", priv->link_state_irq);
1581 switch (priv->family_id) {
1582 case RTL8380_FAMILY_ID:
1583 err = request_irq(priv->link_state_irq, rtl838x_switch_irq,
1584 IRQF_SHARED, "rtl838x-link-state", priv->ds);
1585 break;
1586 case RTL8390_FAMILY_ID:
1587 err = request_irq(priv->link_state_irq, rtl839x_switch_irq,
1588 IRQF_SHARED, "rtl839x-link-state", priv->ds);
1589 break;
1590 case RTL9300_FAMILY_ID:
1591 err = request_irq(priv->link_state_irq, rtl930x_switch_irq,
1592 IRQF_SHARED, "rtl930x-link-state", priv->ds);
1593 break;
1594 case RTL9310_FAMILY_ID:
1595 err = request_irq(priv->link_state_irq, rtl931x_switch_irq,
1596 IRQF_SHARED, "rtl931x-link-state", priv->ds);
1597 break;
1598 }
1599 if (err) {
1600 dev_err(dev, "Error setting up switch interrupt.\n");
1601 /* Need to free allocated switch here */
1602 }
1603
1604 /* Enable interrupts for switch, on RTL931x, the IRQ is always on globally */
1605 if (soc_info.family != RTL9310_FAMILY_ID)
1606 sw_w32(0x1, priv->r->imr_glb);
1607
1608 rtl83xx_get_l2aging(priv);
1609
1610 rtl83xx_setup_qos(priv);
1611
1612 priv->r->l3_setup(priv);
1613
1614 /* Clear all destination ports for mirror groups */
1615 for (i = 0; i < 4; i++)
1616 priv->mirror_group_ports[i] = -1;
1617
1618 /* Register netdevice event callback to catch changes in link aggregation groups */
1619 priv->nb.notifier_call = rtl83xx_netdevice_event;
1620 if (register_netdevice_notifier(&priv->nb)) {
1621 priv->nb.notifier_call = NULL;
1622 dev_err(dev, "Failed to register LAG netdev notifier\n");
1623 goto err_register_nb;
1624 }
1625
1626 // Initialize hash table for L3 routing
1627 rhltable_init(&priv->routes, &route_ht_params);
1628
1629 /* Register netevent notifier callback to catch notifications about neighboring
1630 * changes to update nexthop entries for L3 routing.
1631 */
1632 priv->ne_nb.notifier_call = rtl83xx_netevent_event;
1633 if (register_netevent_notifier(&priv->ne_nb)) {
1634 priv->ne_nb.notifier_call = NULL;
1635 dev_err(dev, "Failed to register netevent notifier\n");
1636 goto err_register_ne_nb;
1637 }
1638
1639 priv->fib_nb.notifier_call = rtl83xx_fib_event;
1640
1641 /* Register Forwarding Information Base notifier to offload routes where
1642 * where possible
1643 * Only FIBs pointing to our own netdevs are programmed into
1644 * the device, so no need to pass a callback.
1645 */
1646 err = register_fib_notifier(&init_net, &priv->fib_nb, NULL, NULL);
1647 if (err)
1648 goto err_register_fib_nb;
1649
1650 // TODO: put this into l2_setup()
1651 // Flood BPDUs to all ports including cpu-port
1652 if (soc_info.family != RTL9300_FAMILY_ID) {
1653 bpdu_mask = soc_info.family == RTL8380_FAMILY_ID ? 0x1FFFFFFF : 0x1FFFFFFFFFFFFF;
1654 priv->r->set_port_reg_be(bpdu_mask, priv->r->rma_bpdu_fld_pmask);
1655
1656 // TRAP 802.1X frames (EAPOL) to the CPU-Port, bypass STP and VLANs
1657 sw_w32(7, priv->r->spcl_trap_eapol_ctrl);
1658
1659 rtl838x_dbgfs_init(priv);
1660 } else {
1661 rtl930x_dbgfs_init(priv);
1662 }
1663
1664 return 0;
1665
1666 err_register_fib_nb:
1667 unregister_netevent_notifier(&priv->ne_nb);
1668 err_register_ne_nb:
1669 unregister_netdevice_notifier(&priv->nb);
1670 err_register_nb:
1671 return err;
1672 }
1673
1674 static int rtl83xx_sw_remove(struct platform_device *pdev)
1675 {
1676 // TODO:
1677 pr_debug("Removing platform driver for rtl83xx-sw\n");
1678
1679 return 0;
1680 }
1681
1682 static const struct of_device_id rtl83xx_switch_of_ids[] = {
1683 { .compatible = "realtek,rtl83xx-switch"},
1684 { /* sentinel */ }
1685 };
1686
1687
1688 MODULE_DEVICE_TABLE(of, rtl83xx_switch_of_ids);
1689
1690 static struct platform_driver rtl83xx_switch_driver = {
1691 .probe = rtl83xx_sw_probe,
1692 .remove = rtl83xx_sw_remove,
1693 .driver = {
1694 .name = "rtl83xx-switch",
1695 .pm = NULL,
1696 .of_match_table = rtl83xx_switch_of_ids,
1697 },
1698 };
1699
1700 module_platform_driver(rtl83xx_switch_driver);
1701
1702 MODULE_AUTHOR("B. Koblitz");
1703 MODULE_DESCRIPTION("RTL83XX SoC Switch Driver");
1704 MODULE_LICENSE("GPL");