realtek: Reduce variable scopes
[openwrt/staging/nbd.git] / target / linux / realtek / files-5.15 / drivers / net / ethernet / rtl838x_eth.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* linux/drivers/net/ethernet/rtl838x_eth.c
3 * Copyright (C) 2020 B. Koblitz
4 */
5
6 #include <linux/dma-mapping.h>
7 #include <linux/etherdevice.h>
8 #include <linux/interrupt.h>
9 #include <linux/io.h>
10 #include <linux/platform_device.h>
11 #include <linux/sched.h>
12 #include <linux/slab.h>
13 #include <linux/of.h>
14 #include <linux/of_net.h>
15 #include <linux/of_mdio.h>
16 #include <linux/module.h>
17 #include <linux/phylink.h>
18 #include <linux/pkt_sched.h>
19 #include <net/dsa.h>
20 #include <net/switchdev.h>
21 #include <asm/cacheflush.h>
22
23 #include <asm/mach-rtl838x/mach-rtl83xx.h>
24 #include "rtl838x_eth.h"
25
26 extern struct rtl83xx_soc_info soc_info;
27
28 /* Maximum number of RX rings is 8 on RTL83XX and 32 on the 93XX
29 * The ring is assigned by switch based on packet/port priortity
30 * Maximum number of TX rings is 2, Ring 2 being the high priority
31 * ring on the RTL93xx SoCs. MAX_RXLEN gives the maximum length
32 * for an RX ring, MAX_ENTRIES the maximum number of entries
33 * available in total for all queues.
34 */
35 #define MAX_RXRINGS 32
36 #define MAX_RXLEN 300
37 #define MAX_ENTRIES (300 * 8)
38 #define TXRINGS 2
39 #define TXRINGLEN 160
40 #define NOTIFY_EVENTS 10
41 #define NOTIFY_BLOCKS 10
42 #define TX_EN 0x8
43 #define RX_EN 0x4
44 #define TX_EN_93XX 0x20
45 #define RX_EN_93XX 0x10
46 #define TX_DO 0x2
47 #define WRAP 0x2
48 #define MAX_PORTS 57
49 #define MAX_SMI_BUSSES 4
50
51 #define RING_BUFFER 1600
52
53 struct p_hdr {
54 uint8_t *buf;
55 uint16_t reserved;
56 uint16_t size; /* buffer size */
57 uint16_t offset;
58 uint16_t len; /* pkt len */
59 /* cpu_tag[0] is a reserved uint16_t on RTL83xx */
60 uint16_t cpu_tag[10];
61 } __packed __aligned(1);
62
63 struct n_event {
64 uint32_t type:2;
65 uint32_t fidVid:12;
66 uint64_t mac:48;
67 uint32_t slp:6;
68 uint32_t valid:1;
69 uint32_t reserved:27;
70 } __packed __aligned(1);
71
72 struct ring_b {
73 uint32_t rx_r[MAX_RXRINGS][MAX_RXLEN];
74 uint32_t tx_r[TXRINGS][TXRINGLEN];
75 struct p_hdr rx_header[MAX_RXRINGS][MAX_RXLEN];
76 struct p_hdr tx_header[TXRINGS][TXRINGLEN];
77 uint32_t c_rx[MAX_RXRINGS];
78 uint32_t c_tx[TXRINGS];
79 uint8_t tx_space[TXRINGS * TXRINGLEN * RING_BUFFER];
80 uint8_t *rx_space;
81 };
82
83 struct notify_block {
84 struct n_event events[NOTIFY_EVENTS];
85 };
86
87 struct notify_b {
88 struct notify_block blocks[NOTIFY_BLOCKS];
89 u32 reserved1[8];
90 u32 ring[NOTIFY_BLOCKS];
91 u32 reserved2[8];
92 };
93
94 static void rtl838x_create_tx_header(struct p_hdr *h, unsigned int dest_port, int prio)
95 {
96 /* cpu_tag[0] is reserved on the RTL83XX SoCs */
97 h->cpu_tag[1] = 0x0400; /* BIT 10: RTL8380_CPU_TAG */
98 h->cpu_tag[2] = 0x0200; /* Set only AS_DPM, to enable DPM settings below */
99 h->cpu_tag[3] = 0x0000;
100 h->cpu_tag[4] = BIT(dest_port) >> 16;
101 h->cpu_tag[5] = BIT(dest_port) & 0xffff;
102
103 /* Set internal priority (PRI) and enable (AS_PRI) */
104 if (prio >= 0)
105 h->cpu_tag[2] |= ((prio & 0x7) | BIT(3)) << 12;
106 }
107
108 static void rtl839x_create_tx_header(struct p_hdr *h, unsigned int dest_port, int prio)
109 {
110 /* cpu_tag[0] is reserved on the RTL83XX SoCs */
111 h->cpu_tag[1] = 0x0100; /* RTL8390_CPU_TAG marker */
112 h->cpu_tag[2] = BIT(4); /* AS_DPM flag */
113 h->cpu_tag[3] = h->cpu_tag[4] = h->cpu_tag[5] = 0;
114 /* h->cpu_tag[1] |= BIT(1) | BIT(0); */ /* Bypass filter 1/2 */
115 if (dest_port >= 32) {
116 dest_port -= 32;
117 h->cpu_tag[2] |= (BIT(dest_port) >> 16) & 0xf;
118 h->cpu_tag[3] = BIT(dest_port) & 0xffff;
119 } else {
120 h->cpu_tag[4] = BIT(dest_port) >> 16;
121 h->cpu_tag[5] = BIT(dest_port) & 0xffff;
122 }
123
124 /* Set internal priority (PRI) and enable (AS_PRI) */
125 if (prio >= 0)
126 h->cpu_tag[2] |= ((prio & 0x7) | BIT(3)) << 8;
127 }
128
129 static void rtl930x_create_tx_header(struct p_hdr *h, unsigned int dest_port, int prio)
130 {
131 h->cpu_tag[0] = 0x8000; /* CPU tag marker */
132 h->cpu_tag[1] = h->cpu_tag[2] = 0;
133 h->cpu_tag[3] = 0;
134 h->cpu_tag[4] = 0;
135 h->cpu_tag[5] = 0;
136 h->cpu_tag[6] = BIT(dest_port) >> 16;
137 h->cpu_tag[7] = BIT(dest_port) & 0xffff;
138
139 /* Enable (AS_QID) and set priority queue (QID) */
140 if (prio >= 0)
141 h->cpu_tag[2] = (BIT(5) | (prio & 0x1f)) << 8;
142 }
143
144 static void rtl931x_create_tx_header(struct p_hdr *h, unsigned int dest_port, int prio)
145 {
146 h->cpu_tag[0] = 0x8000; /* CPU tag marker */
147 h->cpu_tag[1] = h->cpu_tag[2] = 0;
148 h->cpu_tag[3] = 0;
149 h->cpu_tag[4] = h->cpu_tag[5] = h->cpu_tag[6] = h->cpu_tag[7] = 0;
150 if (dest_port >= 32) {
151 dest_port -= 32;
152 h->cpu_tag[4] = BIT(dest_port) >> 16;
153 h->cpu_tag[5] = BIT(dest_port) & 0xffff;
154 } else {
155 h->cpu_tag[6] = BIT(dest_port) >> 16;
156 h->cpu_tag[7] = BIT(dest_port) & 0xffff;
157 }
158
159 /* Enable (AS_QID) and set priority queue (QID) */
160 if (prio >= 0)
161 h->cpu_tag[2] = (BIT(5) | (prio & 0x1f)) << 8;
162 }
163
164 static void rtl93xx_header_vlan_set(struct p_hdr *h, int vlan)
165 {
166 h->cpu_tag[2] |= BIT(4); /* Enable VLAN forwarding offload */
167 h->cpu_tag[2] |= (vlan >> 8) & 0xf;
168 h->cpu_tag[3] |= (vlan & 0xff) << 8;
169 }
170
171 struct rtl838x_rx_q {
172 int id;
173 struct rtl838x_eth_priv *priv;
174 struct napi_struct napi;
175 };
176
177 struct rtl838x_eth_priv {
178 struct net_device *netdev;
179 struct platform_device *pdev;
180 void *membase;
181 spinlock_t lock;
182 struct mii_bus *mii_bus;
183 struct rtl838x_rx_q rx_qs[MAX_RXRINGS];
184 struct phylink *phylink;
185 struct phylink_config phylink_config;
186 u16 id;
187 u16 family_id;
188 const struct rtl838x_eth_reg *r;
189 u8 cpu_port;
190 u32 lastEvent;
191 u16 rxrings;
192 u16 rxringlen;
193 u8 smi_bus[MAX_PORTS];
194 u8 smi_addr[MAX_PORTS];
195 u32 sds_id[MAX_PORTS];
196 bool smi_bus_isc45[MAX_SMI_BUSSES];
197 bool phy_is_internal[MAX_PORTS];
198 phy_interface_t interfaces[MAX_PORTS];
199 };
200
201 extern int rtl838x_phy_init(struct rtl838x_eth_priv *priv);
202 extern int rtl838x_read_sds_phy(int phy_addr, int phy_reg);
203 extern int rtl839x_read_sds_phy(int phy_addr, int phy_reg);
204 extern int rtl839x_write_sds_phy(int phy_addr, int phy_reg, u16 v);
205 extern int rtl930x_read_sds_phy(int phy_addr, int page, int phy_reg);
206 extern int rtl930x_write_sds_phy(int phy_addr, int page, int phy_reg, u16 v);
207 extern int rtl931x_read_sds_phy(int phy_addr, int page, int phy_reg);
208 extern int rtl931x_write_sds_phy(int phy_addr, int page, int phy_reg, u16 v);
209 extern int rtl930x_read_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 *val);
210 extern int rtl930x_write_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 val);
211 extern int rtl931x_read_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 *val);
212 extern int rtl931x_write_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 val);
213
214 /* On the RTL93XX, the RTL93XX_DMA_IF_RX_RING_CNTR track the fill level of
215 * the rings. Writing x into these registers substracts x from its content.
216 * When the content reaches the ring size, the ASIC no longer adds
217 * packets to this receive queue.
218 */
219 void rtl838x_update_cntr(int r, int released)
220 {
221 /* This feature is not available on RTL838x SoCs */
222 }
223
224 void rtl839x_update_cntr(int r, int released)
225 {
226 /* This feature is not available on RTL839x SoCs */
227 }
228
229 void rtl930x_update_cntr(int r, int released)
230 {
231 int pos = (r % 3) * 10;
232 u32 reg = RTL930X_DMA_IF_RX_RING_CNTR + ((r / 3) << 2);
233 u32 v = sw_r32(reg);
234
235 v = (v >> pos) & 0x3ff;
236 pr_debug("RX: Work done %d, old value: %d, pos %d, reg %04x\n", released, v, pos, reg);
237 sw_w32_mask(0x3ff << pos, released << pos, reg);
238 sw_w32(v, reg);
239 }
240
241 void rtl931x_update_cntr(int r, int released)
242 {
243 int pos = (r % 3) * 10;
244 u32 reg = RTL931X_DMA_IF_RX_RING_CNTR + ((r / 3) << 2);
245 u32 v = sw_r32(reg);
246
247 v = (v >> pos) & 0x3ff;
248 sw_w32_mask(0x3ff << pos, released << pos, reg);
249 sw_w32(v, reg);
250 }
251
252 struct dsa_tag {
253 u8 reason;
254 u8 queue;
255 u16 port;
256 u8 l2_offloaded;
257 u8 prio;
258 bool crc_error;
259 };
260
261 bool rtl838x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
262 {
263 /* cpu_tag[0] is reserved. Fields are off-by-one */
264 t->reason = h->cpu_tag[4] & 0xf;
265 t->queue = (h->cpu_tag[1] & 0xe0) >> 5;
266 t->port = h->cpu_tag[1] & 0x1f;
267 t->crc_error = t->reason == 13;
268
269 pr_debug("Reason: %d\n", t->reason);
270 if (t->reason != 6) /* NIC_RX_REASON_SPECIAL_TRAP */
271 t->l2_offloaded = 1;
272 else
273 t->l2_offloaded = 0;
274
275 return t->l2_offloaded;
276 }
277
278 bool rtl839x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
279 {
280 /* cpu_tag[0] is reserved. Fields are off-by-one */
281 t->reason = h->cpu_tag[5] & 0x1f;
282 t->queue = (h->cpu_tag[4] & 0xe000) >> 13;
283 t->port = h->cpu_tag[1] & 0x3f;
284 t->crc_error = h->cpu_tag[4] & BIT(6);
285
286 pr_debug("Reason: %d\n", t->reason);
287 if ((t->reason >= 7 && t->reason <= 13) || /* NIC_RX_REASON_RMA */
288 (t->reason >= 23 && t->reason <= 25)) /* NIC_RX_REASON_SPECIAL_TRAP */
289 t->l2_offloaded = 0;
290 else
291 t->l2_offloaded = 1;
292
293 return t->l2_offloaded;
294 }
295
296 bool rtl930x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
297 {
298 t->reason = h->cpu_tag[7] & 0x3f;
299 t->queue = (h->cpu_tag[2] >> 11) & 0x1f;
300 t->port = (h->cpu_tag[0] >> 8) & 0x1f;
301 t->crc_error = h->cpu_tag[1] & BIT(6);
302
303 pr_debug("Reason %d, port %d, queue %d\n", t->reason, t->port, t->queue);
304 if (t->reason >= 19 && t->reason <= 27)
305 t->l2_offloaded = 0;
306 else
307 t->l2_offloaded = 1;
308
309 return t->l2_offloaded;
310 }
311
312 bool rtl931x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
313 {
314 t->reason = h->cpu_tag[7] & 0x3f;
315 t->queue = (h->cpu_tag[2] >> 11) & 0x1f;
316 t->port = (h->cpu_tag[0] >> 8) & 0x3f;
317 t->crc_error = h->cpu_tag[1] & BIT(6);
318
319 if (t->reason != 63)
320 pr_info("%s: Reason %d, port %d, queue %d\n", __func__, t->reason, t->port, t->queue);
321 if (t->reason >= 19 && t->reason <= 27) /* NIC_RX_REASON_RMA */
322 t->l2_offloaded = 0;
323 else
324 t->l2_offloaded = 1;
325
326 return t->l2_offloaded;
327 }
328
329 /* Discard the RX ring-buffers, called as part of the net-ISR
330 * when the buffer runs over
331 */
332 static void rtl838x_rb_cleanup(struct rtl838x_eth_priv *priv, int status)
333 {
334 for (int r = 0; r < priv->rxrings; r++) {
335 struct ring_b *ring = priv->membase;
336 struct p_hdr *h;
337 u32 *last;
338
339 pr_debug("In %s working on r: %d\n", __func__, r);
340 last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur + r * 4));
341 do {
342 if ((ring->rx_r[r][ring->c_rx[r]] & 0x1))
343 break;
344 pr_debug("Got something: %d\n", ring->c_rx[r]);
345 h = &ring->rx_header[r][ring->c_rx[r]];
346 memset(h, 0, sizeof(struct p_hdr));
347 h->buf = (u8 *)KSEG1ADDR(ring->rx_space +
348 r * priv->rxringlen * RING_BUFFER +
349 ring->c_rx[r] * RING_BUFFER);
350 h->size = RING_BUFFER;
351 /* make sure the header is visible to the ASIC */
352 mb();
353
354 ring->rx_r[r][ring->c_rx[r]] = KSEG1ADDR(h) | 0x1 | (ring->c_rx[r] == (priv->rxringlen - 1) ?
355 WRAP :
356 0x1);
357 ring->c_rx[r] = (ring->c_rx[r] + 1) % priv->rxringlen;
358 } while (&ring->rx_r[r][ring->c_rx[r]] != last);
359 }
360 }
361
362 struct fdb_update_work {
363 struct work_struct work;
364 struct net_device *ndev;
365 u64 macs[NOTIFY_EVENTS + 1];
366 };
367
368 void rtl838x_fdb_sync(struct work_struct *work)
369 {
370 const struct fdb_update_work *uw = container_of(work, struct fdb_update_work, work);
371
372 for (int i = 0; uw->macs[i]; i++) {
373 struct switchdev_notifier_fdb_info info;
374 u8 addr[ETH_ALEN];
375 int action;
376
377 action = (uw->macs[i] & (1ULL << 63)) ?
378 SWITCHDEV_FDB_ADD_TO_BRIDGE :
379 SWITCHDEV_FDB_DEL_TO_BRIDGE;
380 u64_to_ether_addr(uw->macs[i] & 0xffffffffffffULL, addr);
381 info.addr = &addr[0];
382 info.vid = 0;
383 info.offloaded = 1;
384 pr_debug("FDB entry %d: %llx, action %d\n", i, uw->macs[0], action);
385 call_switchdev_notifiers(action, uw->ndev, &info.info, NULL);
386 }
387 kfree(work);
388 }
389
390 static void rtl839x_l2_notification_handler(struct rtl838x_eth_priv *priv)
391 {
392 struct notify_b *nb = priv->membase + sizeof(struct ring_b);
393 u32 e = priv->lastEvent;
394
395 while (!(nb->ring[e] & 1)) {
396 struct fdb_update_work *w;
397 struct n_event *event;
398 u64 mac;
399 int i;
400
401 w = kzalloc(sizeof(*w), GFP_ATOMIC);
402 if (!w) {
403 pr_err("Out of memory: %s", __func__);
404 return;
405 }
406 INIT_WORK(&w->work, rtl838x_fdb_sync);
407
408 for (i = 0; i < NOTIFY_EVENTS; i++) {
409 event = &nb->blocks[e].events[i];
410 if (!event->valid)
411 continue;
412 mac = event->mac;
413 if (event->type)
414 mac |= 1ULL << 63;
415 w->ndev = priv->netdev;
416 w->macs[i] = mac;
417 }
418
419 /* Hand the ring entry back to the switch */
420 nb->ring[e] = nb->ring[e] | 1;
421 e = (e + 1) % NOTIFY_BLOCKS;
422
423 w->macs[i] = 0ULL;
424 schedule_work(&w->work);
425 }
426 priv->lastEvent = e;
427 }
428
429 static irqreturn_t rtl83xx_net_irq(int irq, void *dev_id)
430 {
431 struct net_device *dev = dev_id;
432 struct rtl838x_eth_priv *priv = netdev_priv(dev);
433 u32 status = sw_r32(priv->r->dma_if_intr_sts);
434
435 pr_debug("IRQ: %08x\n", status);
436
437 /* Ignore TX interrupt */
438 if ((status & 0xf0000)) {
439 /* Clear ISR */
440 sw_w32(0x000f0000, priv->r->dma_if_intr_sts);
441 }
442
443 /* RX interrupt */
444 if (status & 0x0ff00) {
445 /* ACK and disable RX interrupt for this ring */
446 sw_w32_mask(0xff00 & status, 0, priv->r->dma_if_intr_msk);
447 sw_w32(0x0000ff00 & status, priv->r->dma_if_intr_sts);
448 for (int i = 0; i < priv->rxrings; i++) {
449 if (status & BIT(i + 8)) {
450 pr_debug("Scheduling queue: %d\n", i);
451 napi_schedule(&priv->rx_qs[i].napi);
452 }
453 }
454 }
455
456 /* RX buffer overrun */
457 if (status & 0x000ff) {
458 pr_debug("RX buffer overrun: status %x, mask: %x\n",
459 status, sw_r32(priv->r->dma_if_intr_msk));
460 sw_w32(status, priv->r->dma_if_intr_sts);
461 rtl838x_rb_cleanup(priv, status & 0xff);
462 }
463
464 if (priv->family_id == RTL8390_FAMILY_ID && status & 0x00100000) {
465 sw_w32(0x00100000, priv->r->dma_if_intr_sts);
466 rtl839x_l2_notification_handler(priv);
467 }
468
469 if (priv->family_id == RTL8390_FAMILY_ID && status & 0x00200000) {
470 sw_w32(0x00200000, priv->r->dma_if_intr_sts);
471 rtl839x_l2_notification_handler(priv);
472 }
473
474 if (priv->family_id == RTL8390_FAMILY_ID && status & 0x00400000) {
475 sw_w32(0x00400000, priv->r->dma_if_intr_sts);
476 rtl839x_l2_notification_handler(priv);
477 }
478
479 return IRQ_HANDLED;
480 }
481
482 static irqreturn_t rtl93xx_net_irq(int irq, void *dev_id)
483 {
484 struct net_device *dev = dev_id;
485 struct rtl838x_eth_priv *priv = netdev_priv(dev);
486 u32 status_rx_r = sw_r32(priv->r->dma_if_intr_rx_runout_sts);
487 u32 status_rx = sw_r32(priv->r->dma_if_intr_rx_done_sts);
488 u32 status_tx = sw_r32(priv->r->dma_if_intr_tx_done_sts);
489
490 pr_debug("In %s, status_tx: %08x, status_rx: %08x, status_rx_r: %08x\n",
491 __func__, status_tx, status_rx, status_rx_r);
492
493 /* Ignore TX interrupt */
494 if (status_tx) {
495 /* Clear ISR */
496 pr_debug("TX done\n");
497 sw_w32(status_tx, priv->r->dma_if_intr_tx_done_sts);
498 }
499
500 /* RX interrupt */
501 if (status_rx) {
502 pr_debug("RX IRQ\n");
503 /* ACK and disable RX interrupt for given rings */
504 sw_w32(status_rx, priv->r->dma_if_intr_rx_done_sts);
505 sw_w32_mask(status_rx, 0, priv->r->dma_if_intr_rx_done_msk);
506 for (int i = 0; i < priv->rxrings; i++) {
507 if (status_rx & BIT(i)) {
508 pr_debug("Scheduling queue: %d\n", i);
509 napi_schedule(&priv->rx_qs[i].napi);
510 }
511 }
512 }
513
514 /* RX buffer overrun */
515 if (status_rx_r) {
516 pr_debug("RX buffer overrun: status %x, mask: %x\n",
517 status_rx_r, sw_r32(priv->r->dma_if_intr_rx_runout_msk));
518 sw_w32(status_rx_r, priv->r->dma_if_intr_rx_runout_sts);
519 rtl838x_rb_cleanup(priv, status_rx_r);
520 }
521
522 return IRQ_HANDLED;
523 }
524
525 static const struct rtl838x_eth_reg rtl838x_reg = {
526 .net_irq = rtl83xx_net_irq,
527 .mac_port_ctrl = rtl838x_mac_port_ctrl,
528 .dma_if_intr_sts = RTL838X_DMA_IF_INTR_STS,
529 .dma_if_intr_msk = RTL838X_DMA_IF_INTR_MSK,
530 .dma_if_ctrl = RTL838X_DMA_IF_CTRL,
531 .mac_force_mode_ctrl = RTL838X_MAC_FORCE_MODE_CTRL,
532 .dma_rx_base = RTL838X_DMA_RX_BASE,
533 .dma_tx_base = RTL838X_DMA_TX_BASE,
534 .dma_if_rx_ring_size = rtl838x_dma_if_rx_ring_size,
535 .dma_if_rx_ring_cntr = rtl838x_dma_if_rx_ring_cntr,
536 .dma_if_rx_cur = RTL838X_DMA_IF_RX_CUR,
537 .rst_glb_ctrl = RTL838X_RST_GLB_CTRL_0,
538 .get_mac_link_sts = rtl838x_get_mac_link_sts,
539 .get_mac_link_dup_sts = rtl838x_get_mac_link_dup_sts,
540 .get_mac_link_spd_sts = rtl838x_get_mac_link_spd_sts,
541 .get_mac_rx_pause_sts = rtl838x_get_mac_rx_pause_sts,
542 .get_mac_tx_pause_sts = rtl838x_get_mac_tx_pause_sts,
543 .mac = RTL838X_MAC,
544 .l2_tbl_flush_ctrl = RTL838X_L2_TBL_FLUSH_CTRL,
545 .update_cntr = rtl838x_update_cntr,
546 .create_tx_header = rtl838x_create_tx_header,
547 .decode_tag = rtl838x_decode_tag,
548 };
549
550 static const struct rtl838x_eth_reg rtl839x_reg = {
551 .net_irq = rtl83xx_net_irq,
552 .mac_port_ctrl = rtl839x_mac_port_ctrl,
553 .dma_if_intr_sts = RTL839X_DMA_IF_INTR_STS,
554 .dma_if_intr_msk = RTL839X_DMA_IF_INTR_MSK,
555 .dma_if_ctrl = RTL839X_DMA_IF_CTRL,
556 .mac_force_mode_ctrl = RTL839X_MAC_FORCE_MODE_CTRL,
557 .dma_rx_base = RTL839X_DMA_RX_BASE,
558 .dma_tx_base = RTL839X_DMA_TX_BASE,
559 .dma_if_rx_ring_size = rtl839x_dma_if_rx_ring_size,
560 .dma_if_rx_ring_cntr = rtl839x_dma_if_rx_ring_cntr,
561 .dma_if_rx_cur = RTL839X_DMA_IF_RX_CUR,
562 .rst_glb_ctrl = RTL839X_RST_GLB_CTRL,
563 .get_mac_link_sts = rtl839x_get_mac_link_sts,
564 .get_mac_link_dup_sts = rtl839x_get_mac_link_dup_sts,
565 .get_mac_link_spd_sts = rtl839x_get_mac_link_spd_sts,
566 .get_mac_rx_pause_sts = rtl839x_get_mac_rx_pause_sts,
567 .get_mac_tx_pause_sts = rtl839x_get_mac_tx_pause_sts,
568 .mac = RTL839X_MAC,
569 .l2_tbl_flush_ctrl = RTL839X_L2_TBL_FLUSH_CTRL,
570 .update_cntr = rtl839x_update_cntr,
571 .create_tx_header = rtl839x_create_tx_header,
572 .decode_tag = rtl839x_decode_tag,
573 };
574
575 static const struct rtl838x_eth_reg rtl930x_reg = {
576 .net_irq = rtl93xx_net_irq,
577 .mac_port_ctrl = rtl930x_mac_port_ctrl,
578 .dma_if_intr_rx_runout_sts = RTL930X_DMA_IF_INTR_RX_RUNOUT_STS,
579 .dma_if_intr_rx_done_sts = RTL930X_DMA_IF_INTR_RX_DONE_STS,
580 .dma_if_intr_tx_done_sts = RTL930X_DMA_IF_INTR_TX_DONE_STS,
581 .dma_if_intr_rx_runout_msk = RTL930X_DMA_IF_INTR_RX_RUNOUT_MSK,
582 .dma_if_intr_rx_done_msk = RTL930X_DMA_IF_INTR_RX_DONE_MSK,
583 .dma_if_intr_tx_done_msk = RTL930X_DMA_IF_INTR_TX_DONE_MSK,
584 .l2_ntfy_if_intr_sts = RTL930X_L2_NTFY_IF_INTR_STS,
585 .l2_ntfy_if_intr_msk = RTL930X_L2_NTFY_IF_INTR_MSK,
586 .dma_if_ctrl = RTL930X_DMA_IF_CTRL,
587 .mac_force_mode_ctrl = RTL930X_MAC_FORCE_MODE_CTRL,
588 .dma_rx_base = RTL930X_DMA_RX_BASE,
589 .dma_tx_base = RTL930X_DMA_TX_BASE,
590 .dma_if_rx_ring_size = rtl930x_dma_if_rx_ring_size,
591 .dma_if_rx_ring_cntr = rtl930x_dma_if_rx_ring_cntr,
592 .dma_if_rx_cur = RTL930X_DMA_IF_RX_CUR,
593 .rst_glb_ctrl = RTL930X_RST_GLB_CTRL_0,
594 .get_mac_link_sts = rtl930x_get_mac_link_sts,
595 .get_mac_link_dup_sts = rtl930x_get_mac_link_dup_sts,
596 .get_mac_link_spd_sts = rtl930x_get_mac_link_spd_sts,
597 .get_mac_rx_pause_sts = rtl930x_get_mac_rx_pause_sts,
598 .get_mac_tx_pause_sts = rtl930x_get_mac_tx_pause_sts,
599 .mac = RTL930X_MAC_L2_ADDR_CTRL,
600 .l2_tbl_flush_ctrl = RTL930X_L2_TBL_FLUSH_CTRL,
601 .update_cntr = rtl930x_update_cntr,
602 .create_tx_header = rtl930x_create_tx_header,
603 .decode_tag = rtl930x_decode_tag,
604 };
605
606 static const struct rtl838x_eth_reg rtl931x_reg = {
607 .net_irq = rtl93xx_net_irq,
608 .mac_port_ctrl = rtl931x_mac_port_ctrl,
609 .dma_if_intr_rx_runout_sts = RTL931X_DMA_IF_INTR_RX_RUNOUT_STS,
610 .dma_if_intr_rx_done_sts = RTL931X_DMA_IF_INTR_RX_DONE_STS,
611 .dma_if_intr_tx_done_sts = RTL931X_DMA_IF_INTR_TX_DONE_STS,
612 .dma_if_intr_rx_runout_msk = RTL931X_DMA_IF_INTR_RX_RUNOUT_MSK,
613 .dma_if_intr_rx_done_msk = RTL931X_DMA_IF_INTR_RX_DONE_MSK,
614 .dma_if_intr_tx_done_msk = RTL931X_DMA_IF_INTR_TX_DONE_MSK,
615 .l2_ntfy_if_intr_sts = RTL931X_L2_NTFY_IF_INTR_STS,
616 .l2_ntfy_if_intr_msk = RTL931X_L2_NTFY_IF_INTR_MSK,
617 .dma_if_ctrl = RTL931X_DMA_IF_CTRL,
618 .mac_force_mode_ctrl = RTL931X_MAC_FORCE_MODE_CTRL,
619 .dma_rx_base = RTL931X_DMA_RX_BASE,
620 .dma_tx_base = RTL931X_DMA_TX_BASE,
621 .dma_if_rx_ring_size = rtl931x_dma_if_rx_ring_size,
622 .dma_if_rx_ring_cntr = rtl931x_dma_if_rx_ring_cntr,
623 .dma_if_rx_cur = RTL931X_DMA_IF_RX_CUR,
624 .rst_glb_ctrl = RTL931X_RST_GLB_CTRL,
625 .get_mac_link_sts = rtl931x_get_mac_link_sts,
626 .get_mac_link_dup_sts = rtl931x_get_mac_link_dup_sts,
627 .get_mac_link_spd_sts = rtl931x_get_mac_link_spd_sts,
628 .get_mac_rx_pause_sts = rtl931x_get_mac_rx_pause_sts,
629 .get_mac_tx_pause_sts = rtl931x_get_mac_tx_pause_sts,
630 .mac = RTL931X_MAC_L2_ADDR_CTRL,
631 .l2_tbl_flush_ctrl = RTL931X_L2_TBL_FLUSH_CTRL,
632 .update_cntr = rtl931x_update_cntr,
633 .create_tx_header = rtl931x_create_tx_header,
634 .decode_tag = rtl931x_decode_tag,
635 };
636
637 static void rtl838x_hw_reset(struct rtl838x_eth_priv *priv)
638 {
639 u32 int_saved, nbuf;
640 u32 reset_mask;
641
642 pr_info("RESETTING %x, CPU_PORT %d\n", priv->family_id, priv->cpu_port);
643 sw_w32_mask(0x3, 0, priv->r->mac_port_ctrl(priv->cpu_port));
644 mdelay(100);
645
646 /* Disable and clear interrupts */
647 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID) {
648 sw_w32(0x00000000, priv->r->dma_if_intr_rx_runout_msk);
649 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_runout_sts);
650 sw_w32(0x00000000, priv->r->dma_if_intr_rx_done_msk);
651 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_sts);
652 sw_w32(0x00000000, priv->r->dma_if_intr_tx_done_msk);
653 sw_w32(0x0000000f, priv->r->dma_if_intr_tx_done_sts);
654 } else {
655 sw_w32(0x00000000, priv->r->dma_if_intr_msk);
656 sw_w32(0xffffffff, priv->r->dma_if_intr_sts);
657 }
658
659 if (priv->family_id == RTL8390_FAMILY_ID) {
660 /* Preserve L2 notification and NBUF settings */
661 int_saved = sw_r32(priv->r->dma_if_intr_msk);
662 nbuf = sw_r32(RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL);
663
664 /* Disable link change interrupt on RTL839x */
665 sw_w32(0, RTL839X_IMR_PORT_LINK_STS_CHG);
666 sw_w32(0, RTL839X_IMR_PORT_LINK_STS_CHG + 4);
667
668 sw_w32(0x00000000, priv->r->dma_if_intr_msk);
669 sw_w32(0xffffffff, priv->r->dma_if_intr_sts);
670 }
671
672 /* Reset NIC (SW_NIC_RST) and queues (SW_Q_RST) */
673 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID)
674 reset_mask = 0x6;
675 else
676 reset_mask = 0xc;
677
678 sw_w32(reset_mask, priv->r->rst_glb_ctrl);
679
680 do { /* Wait for reset of NIC and Queues done */
681 udelay(20);
682 } while (sw_r32(priv->r->rst_glb_ctrl) & reset_mask);
683 mdelay(100);
684
685 /* Setup Head of Line */
686 if (priv->family_id == RTL8380_FAMILY_ID)
687 sw_w32(0, RTL838X_DMA_IF_RX_RING_SIZE); /* Disabled on RTL8380 */
688 if (priv->family_id == RTL8390_FAMILY_ID)
689 sw_w32(0xffffffff, RTL839X_DMA_IF_RX_RING_CNTR);
690 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID) {
691 for (int i = 0; i < priv->rxrings; i++) {
692 int pos = (i % 3) * 10;
693
694 sw_w32_mask(0x3ff << pos, 0, priv->r->dma_if_rx_ring_size(i));
695 sw_w32_mask(0x3ff << pos, priv->rxringlen,
696 priv->r->dma_if_rx_ring_cntr(i));
697 }
698 }
699
700 /* Re-enable link change interrupt */
701 if (priv->family_id == RTL8390_FAMILY_ID) {
702 sw_w32(0xffffffff, RTL839X_ISR_PORT_LINK_STS_CHG);
703 sw_w32(0xffffffff, RTL839X_ISR_PORT_LINK_STS_CHG + 4);
704 sw_w32(0xffffffff, RTL839X_IMR_PORT_LINK_STS_CHG);
705 sw_w32(0xffffffff, RTL839X_IMR_PORT_LINK_STS_CHG + 4);
706
707 /* Restore notification settings: on RTL838x these bits are null */
708 sw_w32_mask(7 << 20, int_saved & (7 << 20), priv->r->dma_if_intr_msk);
709 sw_w32(nbuf, RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL);
710 }
711 }
712
713 static void rtl838x_hw_ring_setup(struct rtl838x_eth_priv *priv)
714 {
715 struct ring_b *ring = priv->membase;
716
717 for (int i = 0; i < priv->rxrings; i++)
718 sw_w32(KSEG1ADDR(&ring->rx_r[i]), priv->r->dma_rx_base + i * 4);
719
720 for (int i = 0; i < TXRINGS; i++)
721 sw_w32(KSEG1ADDR(&ring->tx_r[i]), priv->r->dma_tx_base + i * 4);
722 }
723
724 static void rtl838x_hw_en_rxtx(struct rtl838x_eth_priv *priv)
725 {
726 /* Disable Head of Line features for all RX rings */
727 sw_w32(0xffffffff, priv->r->dma_if_rx_ring_size(0));
728
729 /* Truncate RX buffer to 0x640 (1600) bytes, pad TX */
730 sw_w32(0x06400020, priv->r->dma_if_ctrl);
731
732 /* Enable RX done, RX overflow and TX done interrupts */
733 sw_w32(0xfffff, priv->r->dma_if_intr_msk);
734
735 /* Enable DMA, engine expects empty FCS field */
736 sw_w32_mask(0, RX_EN | TX_EN, priv->r->dma_if_ctrl);
737
738 /* Restart TX/RX to CPU port */
739 sw_w32_mask(0x0, 0x3, priv->r->mac_port_ctrl(priv->cpu_port));
740 /* Set Speed, duplex, flow control
741 * FORCE_EN | LINK_EN | NWAY_EN | DUP_SEL
742 * | SPD_SEL = 0b10 | FORCE_FC_EN | PHY_MASTER_SLV_MANUAL_EN
743 * | MEDIA_SEL
744 */
745 sw_w32(0x6192F, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
746
747 /* Enable CRC checks on CPU-port */
748 sw_w32_mask(0, BIT(3), priv->r->mac_port_ctrl(priv->cpu_port));
749 }
750
751 static void rtl839x_hw_en_rxtx(struct rtl838x_eth_priv *priv)
752 {
753 /* Setup CPU-Port: RX Buffer */
754 sw_w32(0x0000c808, priv->r->dma_if_ctrl);
755
756 /* Enable Notify, RX done, RX overflow and TX done interrupts */
757 sw_w32(0x007fffff, priv->r->dma_if_intr_msk); /* Notify IRQ! */
758
759 /* Enable DMA */
760 sw_w32_mask(0, RX_EN | TX_EN, priv->r->dma_if_ctrl);
761
762 /* Restart TX/RX to CPU port, enable CRC checking */
763 sw_w32_mask(0x0, 0x3 | BIT(3), priv->r->mac_port_ctrl(priv->cpu_port));
764
765 /* CPU port joins Lookup Miss Flooding Portmask */
766 /* TODO: The code below should also work for the RTL838x */
767 sw_w32(0x28000, RTL839X_TBL_ACCESS_L2_CTRL);
768 sw_w32_mask(0, 0x80000000, RTL839X_TBL_ACCESS_L2_DATA(0));
769 sw_w32(0x38000, RTL839X_TBL_ACCESS_L2_CTRL);
770
771 /* Force CPU port link up */
772 sw_w32_mask(0, 3, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
773 }
774
775 static void rtl93xx_hw_en_rxtx(struct rtl838x_eth_priv *priv)
776 {
777 /* Setup CPU-Port: RX Buffer truncated at 1600 Bytes */
778 sw_w32(0x06400040, priv->r->dma_if_ctrl);
779
780 for (int i = 0; i < priv->rxrings; i++) {
781 int pos = (i % 3) * 10;
782 u32 v;
783
784 sw_w32_mask(0x3ff << pos, priv->rxringlen << pos, priv->r->dma_if_rx_ring_size(i));
785
786 /* Some SoCs have issues with missing underflow protection */
787 v = (sw_r32(priv->r->dma_if_rx_ring_cntr(i)) >> pos) & 0x3ff;
788 sw_w32_mask(0x3ff << pos, v, priv->r->dma_if_rx_ring_cntr(i));
789 }
790
791 /* Enable Notify, RX done, RX overflow and TX done interrupts */
792 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_runout_msk);
793 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_msk);
794 sw_w32(0x0000000f, priv->r->dma_if_intr_tx_done_msk);
795
796 /* Enable DMA */
797 sw_w32_mask(0, RX_EN_93XX | TX_EN_93XX, priv->r->dma_if_ctrl);
798
799 /* Restart TX/RX to CPU port, enable CRC checking */
800 sw_w32_mask(0x0, 0x3 | BIT(4), priv->r->mac_port_ctrl(priv->cpu_port));
801
802 if (priv->family_id == RTL9300_FAMILY_ID)
803 sw_w32_mask(0, BIT(priv->cpu_port), RTL930X_L2_UNKN_UC_FLD_PMSK);
804 else
805 sw_w32_mask(0, BIT(priv->cpu_port), RTL931X_L2_UNKN_UC_FLD_PMSK);
806
807 if (priv->family_id == RTL9300_FAMILY_ID)
808 sw_w32(0x217, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
809 else
810 sw_w32(0x2a1d, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
811 }
812
813 static void rtl838x_setup_ring_buffer(struct rtl838x_eth_priv *priv, struct ring_b *ring)
814 {
815 for (int i = 0; i < priv->rxrings; i++) {
816 struct p_hdr *h;
817 int j;
818
819 for (j = 0; j < priv->rxringlen; j++) {
820 h = &ring->rx_header[i][j];
821 memset(h, 0, sizeof(struct p_hdr));
822 h->buf = (u8 *)KSEG1ADDR(ring->rx_space +
823 i * priv->rxringlen * RING_BUFFER +
824 j * RING_BUFFER);
825 h->size = RING_BUFFER;
826 /* All rings owned by switch, last one wraps */
827 ring->rx_r[i][j] = KSEG1ADDR(h) | 1 | (j == (priv->rxringlen - 1) ?
828 WRAP :
829 0);
830 }
831 ring->c_rx[i] = 0;
832 }
833
834 for (int i = 0; i < TXRINGS; i++) {
835 struct p_hdr *h;
836 int j;
837
838 for (j = 0; j < TXRINGLEN; j++) {
839 h = &ring->tx_header[i][j];
840 memset(h, 0, sizeof(struct p_hdr));
841 h->buf = (u8 *)KSEG1ADDR(ring->tx_space +
842 i * TXRINGLEN * RING_BUFFER +
843 j * RING_BUFFER);
844 h->size = RING_BUFFER;
845 ring->tx_r[i][j] = KSEG1ADDR(&ring->tx_header[i][j]);
846 }
847 /* Last header is wrapping around */
848 ring->tx_r[i][j - 1] |= WRAP;
849 ring->c_tx[i] = 0;
850 }
851 }
852
853 static void rtl839x_setup_notify_ring_buffer(struct rtl838x_eth_priv *priv)
854 {
855 struct notify_b *b = priv->membase + sizeof(struct ring_b);
856
857 for (int i = 0; i < NOTIFY_BLOCKS; i++)
858 b->ring[i] = KSEG1ADDR(&b->blocks[i]) | 1 | (i == (NOTIFY_BLOCKS - 1) ? WRAP : 0);
859
860 sw_w32((u32) b->ring, RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL);
861 sw_w32_mask(0x3ff << 2, 100 << 2, RTL839X_L2_NOTIFICATION_CTRL);
862
863 /* Setup notification events */
864 sw_w32_mask(0, 1 << 14, RTL839X_L2_CTRL_0); /* RTL8390_L2_CTRL_0_FLUSH_NOTIFY_EN */
865 sw_w32_mask(0, 1 << 12, RTL839X_L2_NOTIFICATION_CTRL); /* SUSPEND_NOTIFICATION_EN
866
867 /* Enable Notification */
868 sw_w32_mask(0, 1 << 0, RTL839X_L2_NOTIFICATION_CTRL);
869 priv->lastEvent = 0;
870 }
871
872 static int rtl838x_eth_open(struct net_device *ndev)
873 {
874 unsigned long flags;
875 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
876 struct ring_b *ring = priv->membase;
877
878 pr_debug("%s called: RX rings %d(length %d), TX rings %d(length %d)\n",
879 __func__, priv->rxrings, priv->rxringlen, TXRINGS, TXRINGLEN);
880
881 spin_lock_irqsave(&priv->lock, flags);
882 rtl838x_hw_reset(priv);
883 rtl838x_setup_ring_buffer(priv, ring);
884 if (priv->family_id == RTL8390_FAMILY_ID) {
885 rtl839x_setup_notify_ring_buffer(priv);
886 /* Make sure the ring structure is visible to the ASIC */
887 mb();
888 flush_cache_all();
889 }
890
891 rtl838x_hw_ring_setup(priv);
892 phylink_start(priv->phylink);
893
894 for (int i = 0; i < priv->rxrings; i++)
895 napi_enable(&priv->rx_qs[i].napi);
896
897 switch (priv->family_id) {
898 case RTL8380_FAMILY_ID:
899 rtl838x_hw_en_rxtx(priv);
900 /* Trap IGMP/MLD traffic to CPU-Port */
901 sw_w32(0x3, RTL838X_SPCL_TRAP_IGMP_CTRL);
902 /* Flush learned FDB entries on link down of a port */
903 sw_w32_mask(0, BIT(7), RTL838X_L2_CTRL_0);
904 break;
905
906 case RTL8390_FAMILY_ID:
907 rtl839x_hw_en_rxtx(priv);
908 /* Trap MLD and IGMP messages to CPU_PORT */
909 sw_w32(0x3, RTL839X_SPCL_TRAP_IGMP_CTRL);
910 /* Flush learned FDB entries on link down of a port */
911 sw_w32_mask(0, BIT(7), RTL839X_L2_CTRL_0);
912 break;
913
914 case RTL9300_FAMILY_ID:
915 rtl93xx_hw_en_rxtx(priv);
916 /* Flush learned FDB entries on link down of a port */
917 sw_w32_mask(0, BIT(7), RTL930X_L2_CTRL);
918 /* Trap MLD and IGMP messages to CPU_PORT */
919 sw_w32((0x2 << 3) | 0x2, RTL930X_VLAN_APP_PKT_CTRL);
920 break;
921
922 case RTL9310_FAMILY_ID:
923 rtl93xx_hw_en_rxtx(priv);
924
925 /* Trap MLD and IGMP messages to CPU_PORT */
926 sw_w32((0x2 << 3) | 0x2, RTL931X_VLAN_APP_PKT_CTRL);
927
928 /* Disable External CPU access to switch, clear EXT_CPU_EN */
929 sw_w32_mask(BIT(2), 0, RTL931X_MAC_L2_GLOBAL_CTRL2);
930
931 /* Set PCIE_PWR_DOWN */
932 sw_w32_mask(0, BIT(1), RTL931X_PS_SOC_CTRL);
933 break;
934 }
935
936 netif_tx_start_all_queues(ndev);
937
938 spin_unlock_irqrestore(&priv->lock, flags);
939
940 return 0;
941 }
942
943 static void rtl838x_hw_stop(struct rtl838x_eth_priv *priv)
944 {
945 u32 force_mac = priv->family_id == RTL8380_FAMILY_ID ? 0x6192C : 0x75;
946 u32 clear_irq = priv->family_id == RTL8380_FAMILY_ID ? 0x000fffff : 0x007fffff;
947
948 /* Disable RX/TX from/to CPU-port */
949 sw_w32_mask(0x3, 0, priv->r->mac_port_ctrl(priv->cpu_port));
950
951 /* Disable traffic */
952 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID)
953 sw_w32_mask(RX_EN_93XX | TX_EN_93XX, 0, priv->r->dma_if_ctrl);
954 else
955 sw_w32_mask(RX_EN | TX_EN, 0, priv->r->dma_if_ctrl);
956 mdelay(200); /* Test, whether this is needed */
957
958 /* Block all ports */
959 if (priv->family_id == RTL8380_FAMILY_ID) {
960 sw_w32(0x03000000, RTL838X_TBL_ACCESS_DATA_0(0));
961 sw_w32(0x00000000, RTL838X_TBL_ACCESS_DATA_0(1));
962 sw_w32(1 << 15 | 2 << 12, RTL838X_TBL_ACCESS_CTRL_0);
963 }
964
965 /* Flush L2 address cache */
966 if (priv->family_id == RTL8380_FAMILY_ID) {
967 for (int i = 0; i <= priv->cpu_port; i++) {
968 sw_w32(1 << 26 | 1 << 23 | i << 5, priv->r->l2_tbl_flush_ctrl);
969 do { } while (sw_r32(priv->r->l2_tbl_flush_ctrl) & (1 << 26));
970 }
971 } else if (priv->family_id == RTL8390_FAMILY_ID) {
972 for (int i = 0; i <= priv->cpu_port; i++) {
973 sw_w32(1 << 28 | 1 << 25 | i << 5, priv->r->l2_tbl_flush_ctrl);
974 do { } while (sw_r32(priv->r->l2_tbl_flush_ctrl) & (1 << 28));
975 }
976 }
977 /* TODO: L2 flush register is 64 bit on RTL931X and 930X */
978
979 /* CPU-Port: Link down */
980 if (priv->family_id == RTL8380_FAMILY_ID || priv->family_id == RTL8390_FAMILY_ID)
981 sw_w32(force_mac, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
982 else if (priv->family_id == RTL9300_FAMILY_ID)
983 sw_w32_mask(0x3, 0, priv->r->mac_force_mode_ctrl + priv->cpu_port *4);
984 else if (priv->family_id == RTL9310_FAMILY_ID)
985 sw_w32_mask(BIT(0) | BIT(9), 0, priv->r->mac_force_mode_ctrl + priv->cpu_port *4);
986 mdelay(100);
987
988 /* Disable all TX/RX interrupts */
989 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID) {
990 sw_w32(0x00000000, priv->r->dma_if_intr_rx_runout_msk);
991 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_runout_sts);
992 sw_w32(0x00000000, priv->r->dma_if_intr_rx_done_msk);
993 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_sts);
994 sw_w32(0x00000000, priv->r->dma_if_intr_tx_done_msk);
995 sw_w32(0x0000000f, priv->r->dma_if_intr_tx_done_sts);
996 } else {
997 sw_w32(0x00000000, priv->r->dma_if_intr_msk);
998 sw_w32(clear_irq, priv->r->dma_if_intr_sts);
999 }
1000
1001 /* Disable TX/RX DMA */
1002 sw_w32(0x00000000, priv->r->dma_if_ctrl);
1003 mdelay(200);
1004 }
1005
1006 static int rtl838x_eth_stop(struct net_device *ndev)
1007 {
1008 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
1009
1010 pr_info("in %s\n", __func__);
1011
1012 phylink_stop(priv->phylink);
1013 rtl838x_hw_stop(priv);
1014
1015 for (int i = 0; i < priv->rxrings; i++)
1016 napi_disable(&priv->rx_qs[i].napi);
1017
1018 netif_tx_stop_all_queues(ndev);
1019
1020 return 0;
1021 }
1022
1023 static void rtl838x_eth_set_multicast_list(struct net_device *ndev)
1024 {
1025 /* Flood all classes of RMA addresses (01-80-C2-00-00-{01..2F})
1026 * CTRL_0_FULL = GENMASK(21, 0) = 0x3FFFFF
1027 */
1028 if (!(ndev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
1029 sw_w32(0x0, RTL838X_RMA_CTRL_0);
1030 sw_w32(0x0, RTL838X_RMA_CTRL_1);
1031 }
1032 if (ndev->flags & IFF_ALLMULTI)
1033 sw_w32(GENMASK(21, 0), RTL838X_RMA_CTRL_0);
1034 if (ndev->flags & IFF_PROMISC) {
1035 sw_w32(GENMASK(21, 0), RTL838X_RMA_CTRL_0);
1036 sw_w32(0x7fff, RTL838X_RMA_CTRL_1);
1037 }
1038 }
1039
1040 static void rtl839x_eth_set_multicast_list(struct net_device *ndev)
1041 {
1042 /* Flood all classes of RMA addresses (01-80-C2-00-00-{01..2F})
1043 * CTRL_0_FULL = GENMASK(31, 2) = 0xFFFFFFFC
1044 * Lower two bits are reserved, corresponding to RMA 01-80-C2-00-00-00
1045 * CTRL_1_FULL = CTRL_2_FULL = GENMASK(31, 0)
1046 */
1047 if (!(ndev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
1048 sw_w32(0x0, RTL839X_RMA_CTRL_0);
1049 sw_w32(0x0, RTL839X_RMA_CTRL_1);
1050 sw_w32(0x0, RTL839X_RMA_CTRL_2);
1051 sw_w32(0x0, RTL839X_RMA_CTRL_3);
1052 }
1053 if (ndev->flags & IFF_ALLMULTI) {
1054 sw_w32(GENMASK(31, 2), RTL839X_RMA_CTRL_0);
1055 sw_w32(GENMASK(31, 0), RTL839X_RMA_CTRL_1);
1056 sw_w32(GENMASK(31, 0), RTL839X_RMA_CTRL_2);
1057 }
1058 if (ndev->flags & IFF_PROMISC) {
1059 sw_w32(GENMASK(31, 2), RTL839X_RMA_CTRL_0);
1060 sw_w32(GENMASK(31, 0), RTL839X_RMA_CTRL_1);
1061 sw_w32(GENMASK(31, 0), RTL839X_RMA_CTRL_2);
1062 sw_w32(0x3ff, RTL839X_RMA_CTRL_3);
1063 }
1064 }
1065
1066 static void rtl930x_eth_set_multicast_list(struct net_device *ndev)
1067 {
1068 /* Flood all classes of RMA addresses (01-80-C2-00-00-{01..2F})
1069 * CTRL_0_FULL = GENMASK(31, 2) = 0xFFFFFFFC
1070 * Lower two bits are reserved, corresponding to RMA 01-80-C2-00-00-00
1071 * CTRL_1_FULL = CTRL_2_FULL = GENMASK(31, 0)
1072 */
1073 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC)) {
1074 sw_w32(GENMASK(31, 2), RTL930X_RMA_CTRL_0);
1075 sw_w32(GENMASK(31, 0), RTL930X_RMA_CTRL_1);
1076 sw_w32(GENMASK(31, 0), RTL930X_RMA_CTRL_2);
1077 } else {
1078 sw_w32(0x0, RTL930X_RMA_CTRL_0);
1079 sw_w32(0x0, RTL930X_RMA_CTRL_1);
1080 sw_w32(0x0, RTL930X_RMA_CTRL_2);
1081 }
1082 }
1083
1084 static void rtl931x_eth_set_multicast_list(struct net_device *ndev)
1085 {
1086 /* Flood all classes of RMA addresses (01-80-C2-00-00-{01..2F})
1087 * CTRL_0_FULL = GENMASK(31, 2) = 0xFFFFFFFC
1088 * Lower two bits are reserved, corresponding to RMA 01-80-C2-00-00-00.
1089 * CTRL_1_FULL = CTRL_2_FULL = GENMASK(31, 0)
1090 */
1091 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC)) {
1092 sw_w32(GENMASK(31, 2), RTL931X_RMA_CTRL_0);
1093 sw_w32(GENMASK(31, 0), RTL931X_RMA_CTRL_1);
1094 sw_w32(GENMASK(31, 0), RTL931X_RMA_CTRL_2);
1095 } else {
1096 sw_w32(0x0, RTL931X_RMA_CTRL_0);
1097 sw_w32(0x0, RTL931X_RMA_CTRL_1);
1098 sw_w32(0x0, RTL931X_RMA_CTRL_2);
1099 }
1100 }
1101
1102 static void rtl838x_eth_tx_timeout(struct net_device *ndev, unsigned int txqueue)
1103 {
1104 unsigned long flags;
1105 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
1106
1107 pr_warn("%s\n", __func__);
1108 spin_lock_irqsave(&priv->lock, flags);
1109 rtl838x_hw_stop(priv);
1110 rtl838x_hw_ring_setup(priv);
1111 rtl838x_hw_en_rxtx(priv);
1112 netif_trans_update(ndev);
1113 netif_start_queue(ndev);
1114 spin_unlock_irqrestore(&priv->lock, flags);
1115 }
1116
1117 static int rtl838x_eth_tx(struct sk_buff *skb, struct net_device *dev)
1118 {
1119 int len;
1120 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1121 struct ring_b *ring = priv->membase;
1122 int ret;
1123 unsigned long flags;
1124 struct p_hdr *h;
1125 int dest_port = -1;
1126 int q = skb_get_queue_mapping(skb) % TXRINGS;
1127
1128 if (q) /* Check for high prio queue */
1129 pr_debug("SKB priority: %d\n", skb->priority);
1130
1131 spin_lock_irqsave(&priv->lock, flags);
1132 len = skb->len;
1133
1134 /* Check for DSA tagging at the end of the buffer */
1135 if (netdev_uses_dsa(dev) &&
1136 skb->data[len - 4] == 0x80 &&
1137 skb->data[len - 3] < priv->cpu_port &&
1138 skb->data[len - 2] == 0x10 &&
1139 skb->data[len - 1] == 0x00) {
1140 /* Reuse tag space for CRC if possible */
1141 dest_port = skb->data[len - 3];
1142 skb->data[len - 4] = skb->data[len - 3] = skb->data[len - 2] = skb->data[len - 1] = 0x00;
1143 len -= 4;
1144 }
1145
1146 len += 4; /* Add space for CRC */
1147
1148 if (skb_padto(skb, len)) {
1149 ret = NETDEV_TX_OK;
1150 goto txdone;
1151 }
1152
1153 /* We can send this packet if CPU owns the descriptor */
1154 if (!(ring->tx_r[q][ring->c_tx[q]] & 0x1)) {
1155
1156 /* Set descriptor for tx */
1157 h = &ring->tx_header[q][ring->c_tx[q]];
1158 h->size = len;
1159 h->len = len;
1160 /* On RTL8380 SoCs, small packet lengths being sent need adjustments */
1161 if (priv->family_id == RTL8380_FAMILY_ID) {
1162 if (len < ETH_ZLEN - 4)
1163 h->len -= 4;
1164 }
1165
1166 if (dest_port >= 0)
1167 priv->r->create_tx_header(h, dest_port, skb->priority >> 1);
1168
1169 /* Copy packet data to tx buffer */
1170 memcpy((void *)KSEG1ADDR(h->buf), skb->data, len);
1171 /* Make sure packet data is visible to ASIC */
1172 wmb();
1173
1174 /* Hand over to switch */
1175 ring->tx_r[q][ring->c_tx[q]] |= 1;
1176
1177 /* Before starting TX, prevent a Lextra bus bug on RTL8380 SoCs */
1178 if (priv->family_id == RTL8380_FAMILY_ID) {
1179 for (int i = 0; i < 10; i++) {
1180 u32 val = sw_r32(priv->r->dma_if_ctrl);
1181 if ((val & 0xc) == 0xc)
1182 break;
1183 }
1184 }
1185
1186 /* Tell switch to send data */
1187 if (priv->family_id == RTL9310_FAMILY_ID || priv->family_id == RTL9300_FAMILY_ID) {
1188 /* Ring ID q == 0: Low priority, Ring ID = 1: High prio queue */
1189 if (!q)
1190 sw_w32_mask(0, BIT(2), priv->r->dma_if_ctrl);
1191 else
1192 sw_w32_mask(0, BIT(3), priv->r->dma_if_ctrl);
1193 } else {
1194 sw_w32_mask(0, TX_DO, priv->r->dma_if_ctrl);
1195 }
1196
1197 dev->stats.tx_packets++;
1198 dev->stats.tx_bytes += len;
1199 dev_kfree_skb(skb);
1200 ring->c_tx[q] = (ring->c_tx[q] + 1) % TXRINGLEN;
1201 ret = NETDEV_TX_OK;
1202 } else {
1203 dev_warn(&priv->pdev->dev, "Data is owned by switch\n");
1204 ret = NETDEV_TX_BUSY;
1205 }
1206
1207 txdone:
1208 spin_unlock_irqrestore(&priv->lock, flags);
1209
1210 return ret;
1211 }
1212
1213 /* Return queue number for TX. On the RTL83XX, these queues have equal priority
1214 * so we do round-robin
1215 */
1216 u16 rtl83xx_pick_tx_queue(struct net_device *dev, struct sk_buff *skb,
1217 struct net_device *sb_dev)
1218 {
1219 static u8 last = 0;
1220
1221 last++;
1222 return last % TXRINGS;
1223 }
1224
1225 /* Return queue number for TX. On the RTL93XX, queue 1 is the high priority queue
1226 */
1227 u16 rtl93xx_pick_tx_queue(struct net_device *dev, struct sk_buff *skb,
1228 struct net_device *sb_dev)
1229 {
1230 if (skb->priority >= TC_PRIO_CONTROL)
1231 return 1;
1232
1233 return 0;
1234 }
1235
1236 static int rtl838x_hw_receive(struct net_device *dev, int r, int budget)
1237 {
1238 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1239 struct ring_b *ring = priv->membase;
1240 LIST_HEAD(rx_list);
1241 unsigned long flags;
1242 int work_done = 0;
1243 u32 *last;
1244 bool dsa = netdev_uses_dsa(dev);
1245
1246 pr_debug("---------------------------------------------------------- RX - %d\n", r);
1247 spin_lock_irqsave(&priv->lock, flags);
1248 last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur + r * 4));
1249
1250 do {
1251 struct sk_buff *skb;
1252 struct dsa_tag tag;
1253 struct p_hdr *h;
1254 u8 *skb_data;
1255 u8 *data;
1256 int len;
1257
1258 if ((ring->rx_r[r][ring->c_rx[r]] & 0x1)) {
1259 if (&ring->rx_r[r][ring->c_rx[r]] != last) {
1260 netdev_warn(dev, "Ring contention: r: %x, last %x, cur %x\n",
1261 r, (uint32_t)last, (u32) &ring->rx_r[r][ring->c_rx[r]]);
1262 }
1263 break;
1264 }
1265
1266 h = &ring->rx_header[r][ring->c_rx[r]];
1267 data = (u8 *)KSEG1ADDR(h->buf);
1268 len = h->len;
1269 if (!len)
1270 break;
1271 work_done++;
1272
1273 len -= 4; /* strip the CRC */
1274 /* Add 4 bytes for cpu_tag */
1275 if (dsa)
1276 len += 4;
1277
1278 skb = netdev_alloc_skb(dev, len + 4);
1279 skb_reserve(skb, NET_IP_ALIGN);
1280
1281 if (likely(skb)) {
1282 /* BUG: Prevent bug on RTL838x SoCs */
1283 if (priv->family_id == RTL8380_FAMILY_ID) {
1284 sw_w32(0xffffffff, priv->r->dma_if_rx_ring_size(0));
1285 for (int i = 0; i < priv->rxrings; i++) {
1286 unsigned int val;
1287
1288 /* Update each ring cnt */
1289 val = sw_r32(priv->r->dma_if_rx_ring_cntr(i));
1290 sw_w32(val, priv->r->dma_if_rx_ring_cntr(i));
1291 }
1292 }
1293
1294 skb_data = skb_put(skb, len);
1295 /* Make sure data is visible */
1296 mb();
1297 memcpy(skb->data, (u8 *)KSEG1ADDR(data), len);
1298 /* Overwrite CRC with cpu_tag */
1299 if (dsa) {
1300 priv->r->decode_tag(h, &tag);
1301 skb->data[len - 4] = 0x80;
1302 skb->data[len - 3] = tag.port;
1303 skb->data[len - 2] = 0x10;
1304 skb->data[len - 1] = 0x00;
1305 if (tag.l2_offloaded)
1306 skb->data[len - 3] |= 0x40;
1307 }
1308
1309 if (tag.queue >= 0)
1310 pr_debug("Queue: %d, len: %d, reason %d port %d\n",
1311 tag.queue, len, tag.reason, tag.port);
1312
1313 skb->protocol = eth_type_trans(skb, dev);
1314 if (dev->features & NETIF_F_RXCSUM) {
1315 if (tag.crc_error)
1316 skb_checksum_none_assert(skb);
1317 else
1318 skb->ip_summed = CHECKSUM_UNNECESSARY;
1319 }
1320 dev->stats.rx_packets++;
1321 dev->stats.rx_bytes += len;
1322
1323 list_add_tail(&skb->list, &rx_list);
1324 } else {
1325 if (net_ratelimit())
1326 dev_warn(&dev->dev, "low on memory - packet dropped\n");
1327 dev->stats.rx_dropped++;
1328 }
1329
1330 /* Reset header structure */
1331 memset(h, 0, sizeof(struct p_hdr));
1332 h->buf = data;
1333 h->size = RING_BUFFER;
1334
1335 ring->rx_r[r][ring->c_rx[r]] = KSEG1ADDR(h) | 0x1 | (ring->c_rx[r] == (priv->rxringlen - 1) ?
1336 WRAP :
1337 0x1);
1338 ring->c_rx[r] = (ring->c_rx[r] + 1) % priv->rxringlen;
1339 last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur + r * 4));
1340 } while (&ring->rx_r[r][ring->c_rx[r]] != last && work_done < budget);
1341
1342 netif_receive_skb_list(&rx_list);
1343
1344 /* Update counters */
1345 priv->r->update_cntr(r, 0);
1346
1347 spin_unlock_irqrestore(&priv->lock, flags);
1348
1349 return work_done;
1350 }
1351
1352 static int rtl838x_poll_rx(struct napi_struct *napi, int budget)
1353 {
1354 struct rtl838x_rx_q *rx_q = container_of(napi, struct rtl838x_rx_q, napi);
1355 struct rtl838x_eth_priv *priv = rx_q->priv;
1356 int work_done = 0;
1357 int r = rx_q->id;
1358 int work;
1359
1360 while (work_done < budget) {
1361 work = rtl838x_hw_receive(priv->netdev, r, budget - work_done);
1362 if (!work)
1363 break;
1364 work_done += work;
1365 }
1366
1367 if (work_done < budget) {
1368 napi_complete_done(napi, work_done);
1369
1370 /* Enable RX interrupt */
1371 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID)
1372 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_msk);
1373 else
1374 sw_w32_mask(0, 0xf00ff | BIT(r + 8), priv->r->dma_if_intr_msk);
1375 }
1376
1377 return work_done;
1378 }
1379
1380
1381 static void rtl838x_validate(struct phylink_config *config,
1382 unsigned long *supported,
1383 struct phylink_link_state *state)
1384 {
1385 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1386
1387 pr_debug("In %s\n", __func__);
1388
1389 if (!phy_interface_mode_is_rgmii(state->interface) &&
1390 state->interface != PHY_INTERFACE_MODE_1000BASEX &&
1391 state->interface != PHY_INTERFACE_MODE_MII &&
1392 state->interface != PHY_INTERFACE_MODE_REVMII &&
1393 state->interface != PHY_INTERFACE_MODE_GMII &&
1394 state->interface != PHY_INTERFACE_MODE_QSGMII &&
1395 state->interface != PHY_INTERFACE_MODE_INTERNAL &&
1396 state->interface != PHY_INTERFACE_MODE_SGMII) {
1397 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
1398 pr_err("Unsupported interface: %d\n", state->interface);
1399 return;
1400 }
1401
1402 /* Allow all the expected bits */
1403 phylink_set(mask, Autoneg);
1404 phylink_set_port_modes(mask);
1405 phylink_set(mask, Pause);
1406 phylink_set(mask, Asym_Pause);
1407
1408 /* With the exclusion of MII and Reverse MII, we support Gigabit,
1409 * including Half duplex
1410 */
1411 if (state->interface != PHY_INTERFACE_MODE_MII &&
1412 state->interface != PHY_INTERFACE_MODE_REVMII) {
1413 phylink_set(mask, 1000baseT_Full);
1414 phylink_set(mask, 1000baseT_Half);
1415 }
1416
1417 phylink_set(mask, 10baseT_Half);
1418 phylink_set(mask, 10baseT_Full);
1419 phylink_set(mask, 100baseT_Half);
1420 phylink_set(mask, 100baseT_Full);
1421
1422 bitmap_and(supported, supported, mask,
1423 __ETHTOOL_LINK_MODE_MASK_NBITS);
1424 bitmap_and(state->advertising, state->advertising, mask,
1425 __ETHTOOL_LINK_MODE_MASK_NBITS);
1426 }
1427
1428
1429 static void rtl838x_mac_config(struct phylink_config *config,
1430 unsigned int mode,
1431 const struct phylink_link_state *state)
1432 {
1433 /* This is only being called for the master device,
1434 * i.e. the CPU-Port. We don't need to do anything.
1435 */
1436
1437 pr_info("In %s, mode %x\n", __func__, mode);
1438 }
1439
1440 static void rtl838x_mac_an_restart(struct phylink_config *config)
1441 {
1442 struct net_device *dev = container_of(config->dev, struct net_device, dev);
1443 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1444
1445 /* This works only on RTL838x chips */
1446 if (priv->family_id != RTL8380_FAMILY_ID)
1447 return;
1448
1449 pr_debug("In %s\n", __func__);
1450 /* Restart by disabling and re-enabling link */
1451 sw_w32(0x6192D, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
1452 mdelay(20);
1453 sw_w32(0x6192F, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
1454 }
1455
1456 static void rtl838x_mac_pcs_get_state(struct phylink_config *config,
1457 struct phylink_link_state *state)
1458 {
1459 u32 speed;
1460 struct net_device *dev = container_of(config->dev, struct net_device, dev);
1461 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1462 int port = priv->cpu_port;
1463
1464 pr_info("In %s\n", __func__);
1465
1466 state->link = priv->r->get_mac_link_sts(port) ? 1 : 0;
1467 state->duplex = priv->r->get_mac_link_dup_sts(port) ? 1 : 0;
1468
1469 pr_info("%s link status is %d\n", __func__, state->link);
1470 speed = priv->r->get_mac_link_spd_sts(port);
1471 switch (speed) {
1472 case 0:
1473 state->speed = SPEED_10;
1474 break;
1475 case 1:
1476 state->speed = SPEED_100;
1477 break;
1478 case 2:
1479 state->speed = SPEED_1000;
1480 break;
1481 case 5:
1482 state->speed = SPEED_2500;
1483 break;
1484 case 6:
1485 state->speed = SPEED_5000;
1486 break;
1487 case 4:
1488 state->speed = SPEED_10000;
1489 break;
1490 default:
1491 state->speed = SPEED_UNKNOWN;
1492 break;
1493 }
1494
1495 state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
1496 if (priv->r->get_mac_rx_pause_sts(port))
1497 state->pause |= MLO_PAUSE_RX;
1498 if (priv->r->get_mac_tx_pause_sts(port))
1499 state->pause |= MLO_PAUSE_TX;
1500 }
1501
1502 static void rtl838x_mac_link_down(struct phylink_config *config,
1503 unsigned int mode,
1504 phy_interface_t interface)
1505 {
1506 struct net_device *dev = container_of(config->dev, struct net_device, dev);
1507 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1508
1509 pr_debug("In %s\n", __func__);
1510 /* Stop TX/RX to port */
1511 sw_w32_mask(0x03, 0, priv->r->mac_port_ctrl(priv->cpu_port));
1512 }
1513
1514 static void rtl838x_mac_link_up(struct phylink_config *config,
1515 struct phy_device *phy, unsigned int mode,
1516 phy_interface_t interface, int speed, int duplex,
1517 bool tx_pause, bool rx_pause)
1518 {
1519 struct net_device *dev = container_of(config->dev, struct net_device, dev);
1520 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1521
1522 pr_debug("In %s\n", __func__);
1523 /* Restart TX/RX to port */
1524 sw_w32_mask(0, 0x03, priv->r->mac_port_ctrl(priv->cpu_port));
1525 }
1526
1527 static void rtl838x_set_mac_hw(struct net_device *dev, u8 *mac)
1528 {
1529 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1530 unsigned long flags;
1531
1532 spin_lock_irqsave(&priv->lock, flags);
1533 pr_debug("In %s\n", __func__);
1534 sw_w32((mac[0] << 8) | mac[1], priv->r->mac);
1535 sw_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5], priv->r->mac + 4);
1536
1537 if (priv->family_id == RTL8380_FAMILY_ID) {
1538 /* 2 more registers, ALE/MAC block */
1539 sw_w32((mac[0] << 8) | mac[1], RTL838X_MAC_ALE);
1540 sw_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
1541 (RTL838X_MAC_ALE + 4));
1542
1543 sw_w32((mac[0] << 8) | mac[1], RTL838X_MAC2);
1544 sw_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
1545 RTL838X_MAC2 + 4);
1546 }
1547 spin_unlock_irqrestore(&priv->lock, flags);
1548 }
1549
1550 static int rtl838x_set_mac_address(struct net_device *dev, void *p)
1551 {
1552 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1553 const struct sockaddr *addr = p;
1554 u8 *mac = (u8 *) (addr->sa_data);
1555
1556 if (!is_valid_ether_addr(addr->sa_data))
1557 return -EADDRNOTAVAIL;
1558
1559 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
1560 rtl838x_set_mac_hw(dev, mac);
1561
1562 pr_info("Using MAC %08x%08x\n", sw_r32(priv->r->mac), sw_r32(priv->r->mac + 4));
1563
1564 return 0;
1565 }
1566
1567 static int rtl8390_init_mac(struct rtl838x_eth_priv *priv)
1568 {
1569 /* We will need to set-up EEE and the egress-rate limitation */
1570 return 0;
1571 }
1572
1573 static int rtl8380_init_mac(struct rtl838x_eth_priv *priv)
1574 {
1575 if (priv->family_id == 0x8390)
1576 return rtl8390_init_mac(priv);
1577
1578 /* At present we do not know how to set up EEE on any other SoC than RTL8380 */
1579 if (priv->family_id != 0x8380)
1580 return 0;
1581
1582 pr_info("%s\n", __func__);
1583 /* fix timer for EEE */
1584 sw_w32(0x5001411, RTL838X_EEE_TX_TIMER_GIGA_CTRL);
1585 sw_w32(0x5001417, RTL838X_EEE_TX_TIMER_GELITE_CTRL);
1586
1587 /* Init VLAN. TODO: Understand what is being done, here */
1588 if (priv->id == 0x8382) {
1589 for (int i = 0; i <= 28; i++)
1590 sw_w32(0, 0xd57c + i * 0x80);
1591 }
1592 if (priv->id == 0x8380) {
1593 for (int i = 8; i <= 28; i++)
1594 sw_w32(0, 0xd57c + i * 0x80);
1595 }
1596
1597 return 0;
1598 }
1599
1600 static int rtl838x_get_link_ksettings(struct net_device *ndev,
1601 struct ethtool_link_ksettings *cmd)
1602 {
1603 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
1604
1605 pr_debug("%s called\n", __func__);
1606
1607 return phylink_ethtool_ksettings_get(priv->phylink, cmd);
1608 }
1609
1610 static int rtl838x_set_link_ksettings(struct net_device *ndev,
1611 const struct ethtool_link_ksettings *cmd)
1612 {
1613 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
1614
1615 pr_debug("%s called\n", __func__);
1616
1617 return phylink_ethtool_ksettings_set(priv->phylink, cmd);
1618 }
1619
1620 static int rtl838x_mdio_read_paged(struct mii_bus *bus, int mii_id, u16 page, int regnum)
1621 {
1622 u32 val;
1623 int err;
1624 struct rtl838x_eth_priv *priv = bus->priv;
1625
1626 if (mii_id >= 24 && mii_id <= 27 && priv->id == 0x8380)
1627 return rtl838x_read_sds_phy(mii_id, regnum);
1628
1629 if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
1630 err = rtl838x_read_mmd_phy(mii_id,
1631 mdiobus_c45_devad(regnum),
1632 regnum, &val);
1633 pr_debug("MMD: %d dev %x register %x read %x, err %d\n", mii_id,
1634 mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
1635 val, err);
1636 } else {
1637 pr_debug("PHY: %d register %x read %x, err %d\n", mii_id, regnum, val, err);
1638 err = rtl838x_read_phy(mii_id, page, regnum, &val);
1639 }
1640 if (err)
1641 return err;
1642
1643 return val;
1644 }
1645
1646 static int rtl838x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1647 {
1648 return rtl838x_mdio_read_paged(bus, mii_id, 0, regnum);
1649 }
1650
1651 static int rtl839x_mdio_read_paged(struct mii_bus *bus, int mii_id, u16 page, int regnum)
1652 {
1653 u32 val;
1654 int err;
1655 struct rtl838x_eth_priv *priv = bus->priv;
1656
1657 if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393)
1658 return rtl839x_read_sds_phy(mii_id, regnum);
1659
1660 if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
1661 err = rtl839x_read_mmd_phy(mii_id,
1662 mdiobus_c45_devad(regnum),
1663 regnum, &val);
1664 pr_debug("MMD: %d dev %x register %x read %x, err %d\n", mii_id,
1665 mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
1666 val, err);
1667 } else {
1668 err = rtl839x_read_phy(mii_id, page, regnum, &val);
1669 pr_debug("PHY: %d register %x read %x, err %d\n", mii_id, regnum, val, err);
1670 }
1671
1672 if (err)
1673 return err;
1674
1675 return val;
1676 }
1677
1678 static int rtl839x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1679 {
1680 return rtl839x_mdio_read_paged(bus, mii_id, 0, regnum);
1681 }
1682
1683 static int rtl930x_mdio_read_paged(struct mii_bus *bus, int mii_id, u16 page, int regnum)
1684 {
1685 u32 val;
1686 int err;
1687 struct rtl838x_eth_priv *priv = bus->priv;
1688
1689 if (priv->phy_is_internal[mii_id])
1690 return rtl930x_read_sds_phy(priv->sds_id[mii_id], page, regnum);
1691
1692 if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
1693 err = rtl930x_read_mmd_phy(mii_id,
1694 mdiobus_c45_devad(regnum),
1695 regnum, &val);
1696 pr_debug("MMD: %d dev %x register %x read %x, err %d\n", mii_id,
1697 mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
1698 val, err);
1699 } else {
1700 err = rtl930x_read_phy(mii_id, page, regnum, &val);
1701 pr_debug("PHY: %d register %x read %x, err %d\n", mii_id, regnum, val, err);
1702 }
1703
1704 if (err)
1705 return err;
1706
1707 return val;
1708 }
1709
1710 static int rtl930x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1711 {
1712 return rtl930x_mdio_read_paged(bus, mii_id, 0, regnum);
1713 }
1714
1715 static int rtl931x_mdio_read_paged(struct mii_bus *bus, int mii_id, u16 page, int regnum)
1716 {
1717 u32 val;
1718 int err, v;
1719 struct rtl838x_eth_priv *priv = bus->priv;
1720
1721 pr_debug("%s: In here, port %d\n", __func__, mii_id);
1722 if (priv->phy_is_internal[mii_id]) {
1723 v = rtl931x_read_sds_phy(priv->sds_id[mii_id], page, regnum);
1724 if (v < 0) {
1725 err = v;
1726 } else {
1727 err = 0;
1728 val = v;
1729 }
1730 } else {
1731 if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
1732 err = rtl931x_read_mmd_phy(mii_id,
1733 mdiobus_c45_devad(regnum),
1734 regnum, &val);
1735 pr_debug("MMD: %d dev %x register %x read %x, err %d\n", mii_id,
1736 mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
1737 val, err);
1738 } else {
1739 err = rtl931x_read_phy(mii_id, page, regnum, &val);
1740 pr_debug("PHY: %d register %x read %x, err %d\n", mii_id, regnum, val, err);
1741 }
1742 }
1743
1744 if (err)
1745 return err;
1746
1747 return val;
1748 }
1749
1750 static int rtl931x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1751 {
1752 return rtl931x_mdio_read_paged(bus, mii_id, 0, regnum);
1753 }
1754
1755 static int rtl838x_mdio_write_paged(struct mii_bus *bus, int mii_id, u16 page,
1756 int regnum, u16 value)
1757 {
1758 u32 offset = 0;
1759 struct rtl838x_eth_priv *priv = bus->priv;
1760 int err;
1761
1762 if (mii_id >= 24 && mii_id <= 27 && priv->id == 0x8380) {
1763 if (mii_id == 26)
1764 offset = 0x100;
1765 sw_w32(value, RTL838X_SDS4_FIB_REG0 + offset + (regnum << 2));
1766 return 0;
1767 }
1768
1769 if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
1770 err = rtl838x_write_mmd_phy(mii_id, mdiobus_c45_devad(regnum),
1771 regnum, value);
1772 pr_debug("MMD: %d dev %x register %x write %x, err %d\n", mii_id,
1773 mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
1774 value, err);
1775
1776 return err;
1777 }
1778 err = rtl838x_write_phy(mii_id, page, regnum, value);
1779 pr_debug("PHY: %d register %x write %x, err %d\n", mii_id, regnum, value, err);
1780
1781 return err;
1782 }
1783
1784 static int rtl838x_mdio_write(struct mii_bus *bus, int mii_id,
1785 int regnum, u16 value)
1786 {
1787 return rtl838x_mdio_write_paged(bus, mii_id, 0, regnum, value);
1788 }
1789
1790 static int rtl839x_mdio_write_paged(struct mii_bus *bus, int mii_id, u16 page,
1791 int regnum, u16 value)
1792 {
1793 struct rtl838x_eth_priv *priv = bus->priv;
1794 int err;
1795
1796 if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393)
1797 return rtl839x_write_sds_phy(mii_id, regnum, value);
1798
1799 if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
1800 err = rtl839x_write_mmd_phy(mii_id, mdiobus_c45_devad(regnum),
1801 regnum, value);
1802 pr_debug("MMD: %d dev %x register %x write %x, err %d\n", mii_id,
1803 mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
1804 value, err);
1805
1806 return err;
1807 }
1808
1809 err = rtl839x_write_phy(mii_id, page, regnum, value);
1810 pr_debug("PHY: %d register %x write %x, err %d\n", mii_id, regnum, value, err);
1811
1812 return err;
1813 }
1814
1815 static int rtl839x_mdio_write(struct mii_bus *bus, int mii_id,
1816 int regnum, u16 value)
1817 {
1818 return rtl839x_mdio_write_paged(bus, mii_id, 0, regnum, value);
1819 }
1820
1821 static int rtl930x_mdio_write_paged(struct mii_bus *bus, int mii_id, u16 page,
1822 int regnum, u16 value)
1823 {
1824 struct rtl838x_eth_priv *priv = bus->priv;
1825 int err;
1826
1827 if (priv->phy_is_internal[mii_id])
1828 return rtl930x_write_sds_phy(priv->sds_id[mii_id], page, regnum, value);
1829
1830 if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD))
1831 return rtl930x_write_mmd_phy(mii_id, mdiobus_c45_devad(regnum),
1832 regnum, value);
1833
1834 err = rtl930x_write_phy(mii_id, page, regnum, value);
1835 pr_debug("PHY: %d register %x write %x, err %d\n", mii_id, regnum, value, err);
1836
1837 return err;
1838 }
1839
1840 static int rtl930x_mdio_write(struct mii_bus *bus, int mii_id,
1841 int regnum, u16 value)
1842 {
1843 return rtl930x_mdio_write_paged(bus, mii_id, 0, regnum, value);
1844 }
1845
1846 static int rtl931x_mdio_write_paged(struct mii_bus *bus, int mii_id, u16 page,
1847 int regnum, u16 value)
1848 {
1849 struct rtl838x_eth_priv *priv = bus->priv;
1850 int err;
1851
1852 if (priv->phy_is_internal[mii_id])
1853 return rtl931x_write_sds_phy(priv->sds_id[mii_id], page, regnum, value);
1854
1855 if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
1856 err = rtl931x_write_mmd_phy(mii_id, mdiobus_c45_devad(regnum),
1857 regnum, value);
1858 pr_debug("MMD: %d dev %x register %x write %x, err %d\n", mii_id,
1859 mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
1860 value, err);
1861
1862 return err;
1863 }
1864
1865 err = rtl931x_write_phy(mii_id, page, regnum, value);
1866 pr_debug("PHY: %d register %x write %x, err %d\n", mii_id, regnum, value, err);
1867
1868 return err;
1869 }
1870
1871 static int rtl931x_mdio_write(struct mii_bus *bus, int mii_id,
1872 int regnum, u16 value)
1873 {
1874 return rtl931x_mdio_write_paged(bus, mii_id, 0, regnum, value);
1875 }
1876
1877 static int rtl838x_mdio_reset(struct mii_bus *bus)
1878 {
1879 pr_debug("%s called\n", __func__);
1880 /* Disable MAC polling the PHY so that we can start configuration */
1881 sw_w32(0x00000000, RTL838X_SMI_POLL_CTRL);
1882
1883 /* Enable PHY control via SoC */
1884 sw_w32_mask(0, 1 << 15, RTL838X_SMI_GLB_CTRL);
1885
1886 /* Probably should reset all PHYs here... */
1887 return 0;
1888 }
1889
1890 static int rtl839x_mdio_reset(struct mii_bus *bus)
1891 {
1892 return 0;
1893
1894 pr_debug("%s called\n", __func__);
1895 /* BUG: The following does not work, but should! */
1896 /* Disable MAC polling the PHY so that we can start configuration */
1897 sw_w32(0x00000000, RTL839X_SMI_PORT_POLLING_CTRL);
1898 sw_w32(0x00000000, RTL839X_SMI_PORT_POLLING_CTRL + 4);
1899 /* Disable PHY polling via SoC */
1900 sw_w32_mask(1 << 7, 0, RTL839X_SMI_GLB_CTRL);
1901
1902 /* Probably should reset all PHYs here... */
1903 return 0;
1904 }
1905
1906 u8 mac_type_bit[RTL930X_CPU_PORT] = {0, 0, 0, 0, 2, 2, 2, 2, 4, 4, 4, 4, 6, 6, 6, 6,
1907 8, 8, 8, 8, 10, 10, 10, 10, 12, 15, 18, 21};
1908
1909 static int rtl930x_mdio_reset(struct mii_bus *bus)
1910 {
1911 struct rtl838x_eth_priv *priv = bus->priv;
1912 u32 c45_mask = 0;
1913 u32 poll_sel[2];
1914 u32 poll_ctrl = 0;
1915 u32 private_poll_mask = 0;
1916 u32 v;
1917 bool uses_usxgmii = false; /* For the Aquantia PHYs */
1918 bool uses_hisgmii = false; /* For the RTL8221/8226 */
1919
1920 /* Mapping of port to phy-addresses on an SMI bus */
1921 poll_sel[0] = poll_sel[1] = 0;
1922 for (int i = 0; i < RTL930X_CPU_PORT; i++) {
1923 int pos;
1924
1925 if (priv->smi_bus[i] > 3)
1926 continue;
1927 pos = (i % 6) * 5;
1928 sw_w32_mask(0x1f << pos, priv->smi_addr[i] << pos,
1929 RTL930X_SMI_PORT0_5_ADDR + (i / 6) * 4);
1930
1931 pos = (i * 2) % 32;
1932 poll_sel[i / 16] |= priv->smi_bus[i] << pos;
1933 poll_ctrl |= BIT(20 + priv->smi_bus[i]);
1934 }
1935
1936 /* Configure which SMI bus is behind which port number */
1937 sw_w32(poll_sel[0], RTL930X_SMI_PORT0_15_POLLING_SEL);
1938 sw_w32(poll_sel[1], RTL930X_SMI_PORT16_27_POLLING_SEL);
1939
1940 /* Disable POLL_SEL for any SMI bus with a normal PHY (not RTL8295R for SFP+) */
1941 sw_w32_mask(poll_ctrl, 0, RTL930X_SMI_GLB_CTRL);
1942
1943 /* Configure which SMI busses are polled in c45 based on a c45 PHY being on that bus */
1944 for (int i = 0; i < 4; i++)
1945 if (priv->smi_bus_isc45[i])
1946 c45_mask |= BIT(i + 16);
1947
1948 pr_info("c45_mask: %08x\n", c45_mask);
1949 sw_w32_mask(0, c45_mask, RTL930X_SMI_GLB_CTRL);
1950
1951 /* Set the MAC type of each port according to the PHY-interface */
1952 /* Values are FE: 2, GE: 3, XGE/2.5G: 0(SERDES) or 1(otherwise), SXGE: 0 */
1953 v = 0;
1954 for (int i = 0; i < RTL930X_CPU_PORT; i++) {
1955 switch (priv->interfaces[i]) {
1956 case PHY_INTERFACE_MODE_10GBASER:
1957 break; /* Serdes: Value = 0 */
1958 case PHY_INTERFACE_MODE_HSGMII:
1959 private_poll_mask |= BIT(i);
1960 /* fallthrough */
1961 case PHY_INTERFACE_MODE_USXGMII:
1962 v |= BIT(mac_type_bit[i]);
1963 uses_usxgmii = true;
1964 break;
1965 case PHY_INTERFACE_MODE_QSGMII:
1966 private_poll_mask |= BIT(i);
1967 v |= 3 << mac_type_bit[i];
1968 break;
1969 default:
1970 break;
1971 }
1972 }
1973 sw_w32(v, RTL930X_SMI_MAC_TYPE_CTRL);
1974
1975 /* Set the private polling mask for all Realtek PHYs (i.e. not the 10GBit Aquantia ones) */
1976 sw_w32(private_poll_mask, RTL930X_SMI_PRVTE_POLLING_CTRL);
1977
1978 /* The following magic values are found in the port configuration, they seem to
1979 * define different ways of polling a PHY. The below is for the Aquantia PHYs of
1980 * the XGS1250 and the RTL8226 of the XGS1210
1981 */
1982 if (uses_usxgmii) {
1983 sw_w32(0x01010000, RTL930X_SMI_10GPHY_POLLING_REG0_CFG);
1984 sw_w32(0x01E7C400, RTL930X_SMI_10GPHY_POLLING_REG9_CFG);
1985 sw_w32(0x01E7E820, RTL930X_SMI_10GPHY_POLLING_REG10_CFG);
1986 }
1987 if (uses_hisgmii) {
1988 sw_w32(0x011FA400, RTL930X_SMI_10GPHY_POLLING_REG0_CFG);
1989 sw_w32(0x013FA412, RTL930X_SMI_10GPHY_POLLING_REG9_CFG);
1990 sw_w32(0x017FA414, RTL930X_SMI_10GPHY_POLLING_REG10_CFG);
1991 }
1992
1993 pr_debug("%s: RTL930X_SMI_GLB_CTRL %08x\n", __func__,
1994 sw_r32(RTL930X_SMI_GLB_CTRL));
1995 pr_debug("%s: RTL930X_SMI_PORT0_15_POLLING_SEL %08x\n", __func__,
1996 sw_r32(RTL930X_SMI_PORT0_15_POLLING_SEL));
1997 pr_debug("%s: RTL930X_SMI_PORT16_27_POLLING_SEL %08x\n", __func__,
1998 sw_r32(RTL930X_SMI_PORT16_27_POLLING_SEL));
1999 pr_debug("%s: RTL930X_SMI_MAC_TYPE_CTRL %08x\n", __func__,
2000 sw_r32(RTL930X_SMI_MAC_TYPE_CTRL));
2001 pr_debug("%s: RTL930X_SMI_10GPHY_POLLING_REG0_CFG %08x\n", __func__,
2002 sw_r32(RTL930X_SMI_10GPHY_POLLING_REG0_CFG));
2003 pr_debug("%s: RTL930X_SMI_10GPHY_POLLING_REG9_CFG %08x\n", __func__,
2004 sw_r32(RTL930X_SMI_10GPHY_POLLING_REG9_CFG));
2005 pr_debug("%s: RTL930X_SMI_10GPHY_POLLING_REG10_CFG %08x\n", __func__,
2006 sw_r32(RTL930X_SMI_10GPHY_POLLING_REG10_CFG));
2007 pr_debug("%s: RTL930X_SMI_PRVTE_POLLING_CTRL %08x\n", __func__,
2008 sw_r32(RTL930X_SMI_PRVTE_POLLING_CTRL));
2009
2010 return 0;
2011 }
2012
2013 static int rtl931x_mdio_reset(struct mii_bus *bus)
2014 {
2015 struct rtl838x_eth_priv *priv = bus->priv;
2016 u32 c45_mask = 0;
2017 u32 poll_sel[4];
2018 u32 poll_ctrl = 0;
2019 bool mdc_on[4];
2020
2021 pr_info("%s called\n", __func__);
2022 /* Disable port polling for configuration purposes */
2023 sw_w32(0, RTL931X_SMI_PORT_POLLING_CTRL);
2024 sw_w32(0, RTL931X_SMI_PORT_POLLING_CTRL + 4);
2025 msleep(100);
2026
2027 mdc_on[0] = mdc_on[1] = mdc_on[2] = mdc_on[3] = false;
2028 /* Mapping of port to phy-addresses on an SMI bus */
2029 poll_sel[0] = poll_sel[1] = poll_sel[2] = poll_sel[3] = 0;
2030 for (int i = 0; i < 56; i++) {
2031 u32 pos;
2032
2033 pos = (i % 6) * 5;
2034 sw_w32_mask(0x1f << pos, priv->smi_addr[i] << pos, RTL931X_SMI_PORT_ADDR + (i / 6) * 4);
2035 pos = (i * 2) % 32;
2036 poll_sel[i / 16] |= priv->smi_bus[i] << pos;
2037 poll_ctrl |= BIT(20 + priv->smi_bus[i]);
2038 mdc_on[priv->smi_bus[i]] = true;
2039 }
2040
2041 /* Configure which SMI bus is behind which port number */
2042 for (int i = 0; i < 4; i++) {
2043 pr_info("poll sel %d, %08x\n", i, poll_sel[i]);
2044 sw_w32(poll_sel[i], RTL931X_SMI_PORT_POLLING_SEL + (i * 4));
2045 }
2046
2047 /* Configure which SMI busses */
2048 pr_info("%s: WAS RTL931X_MAC_L2_GLOBAL_CTRL2 %08x\n", __func__, sw_r32(RTL931X_MAC_L2_GLOBAL_CTRL2));
2049 pr_info("c45_mask: %08x, RTL931X_SMI_GLB_CTRL0 was %X", c45_mask, sw_r32(RTL931X_SMI_GLB_CTRL0));
2050 for (int i = 0; i < 4; i++) {
2051 /* bus is polled in c45 */
2052 if (priv->smi_bus_isc45[i])
2053 c45_mask |= 0x2 << (i * 2); /* Std. C45, non-standard is 0x3 */
2054 /* Enable bus access via MDC */
2055 if (mdc_on[i])
2056 sw_w32_mask(0, BIT(9 + i), RTL931X_MAC_L2_GLOBAL_CTRL2);
2057 }
2058
2059 pr_info("%s: RTL931X_MAC_L2_GLOBAL_CTRL2 %08x\n", __func__, sw_r32(RTL931X_MAC_L2_GLOBAL_CTRL2));
2060 pr_info("c45_mask: %08x, RTL931X_SMI_GLB_CTRL0 was %X", c45_mask, sw_r32(RTL931X_SMI_GLB_CTRL0));
2061
2062 /* We have a 10G PHY enable polling
2063 * sw_w32(0x01010000, RTL931X_SMI_10GPHY_POLLING_SEL2);
2064 * sw_w32(0x01E7C400, RTL931X_SMI_10GPHY_POLLING_SEL3);
2065 * sw_w32(0x01E7E820, RTL931X_SMI_10GPHY_POLLING_SEL4);
2066 */
2067 sw_w32_mask(0xff, c45_mask, RTL931X_SMI_GLB_CTRL1);
2068
2069 return 0;
2070 }
2071
2072 static int rtl931x_chip_init(struct rtl838x_eth_priv *priv)
2073 {
2074 pr_info("In %s\n", __func__);
2075
2076 /* Initialize Encapsulation memory and wait until finished */
2077 sw_w32(0x1, RTL931X_MEM_ENCAP_INIT);
2078 do { } while (sw_r32(RTL931X_MEM_ENCAP_INIT) & 1);
2079 pr_info("%s: init ENCAP done\n", __func__);
2080
2081 /* Initialize Managemen Information Base memory and wait until finished */
2082 sw_w32(0x1, RTL931X_MEM_MIB_INIT);
2083 do { } while (sw_r32(RTL931X_MEM_MIB_INIT) & 1);
2084 pr_info("%s: init MIB done\n", __func__);
2085
2086 /* Initialize ACL (PIE) memory and wait until finished */
2087 sw_w32(0x1, RTL931X_MEM_ACL_INIT);
2088 do { } while (sw_r32(RTL931X_MEM_ACL_INIT) & 1);
2089 pr_info("%s: init ACL done\n", __func__);
2090
2091 /* Initialize ALE memory and wait until finished */
2092 sw_w32(0xFFFFFFFF, RTL931X_MEM_ALE_INIT_0);
2093 do { } while (sw_r32(RTL931X_MEM_ALE_INIT_0));
2094 sw_w32(0x7F, RTL931X_MEM_ALE_INIT_1);
2095 sw_w32(0x7ff, RTL931X_MEM_ALE_INIT_2);
2096 do { } while (sw_r32(RTL931X_MEM_ALE_INIT_2) & 0x7ff);
2097 pr_info("%s: init ALE done\n", __func__);
2098
2099 /* Enable ESD auto recovery */
2100 sw_w32(0x1, RTL931X_MDX_CTRL_RSVD);
2101
2102 /* Init SPI, is this for thermal control or what? */
2103 sw_w32_mask(0x7 << 11, 0x2 << 11, RTL931X_SPI_CTRL0);
2104
2105 return 0;
2106 }
2107
2108 static int rtl838x_mdio_init(struct rtl838x_eth_priv *priv)
2109 {
2110 struct device_node *mii_np, *dn;
2111 u32 pn;
2112 int ret;
2113
2114 pr_debug("%s called\n", __func__);
2115 mii_np = of_get_child_by_name(priv->pdev->dev.of_node, "mdio-bus");
2116
2117 if (!mii_np) {
2118 dev_err(&priv->pdev->dev, "no %s child node found", "mdio-bus");
2119 return -ENODEV;
2120 }
2121
2122 if (!of_device_is_available(mii_np)) {
2123 ret = -ENODEV;
2124 goto err_put_node;
2125 }
2126
2127 priv->mii_bus = devm_mdiobus_alloc(&priv->pdev->dev);
2128 if (!priv->mii_bus) {
2129 ret = -ENOMEM;
2130 goto err_put_node;
2131 }
2132
2133 switch(priv->family_id) {
2134 case RTL8380_FAMILY_ID:
2135 priv->mii_bus->name = "rtl838x-eth-mdio";
2136 priv->mii_bus->read = rtl838x_mdio_read;
2137 priv->mii_bus->read_paged = rtl838x_mdio_read_paged;
2138 priv->mii_bus->write = rtl838x_mdio_write;
2139 priv->mii_bus->write_paged = rtl838x_mdio_write_paged;
2140 priv->mii_bus->reset = rtl838x_mdio_reset;
2141 break;
2142 case RTL8390_FAMILY_ID:
2143 priv->mii_bus->name = "rtl839x-eth-mdio";
2144 priv->mii_bus->read = rtl839x_mdio_read;
2145 priv->mii_bus->read_paged = rtl839x_mdio_read_paged;
2146 priv->mii_bus->write = rtl839x_mdio_write;
2147 priv->mii_bus->write_paged = rtl839x_mdio_write_paged;
2148 priv->mii_bus->reset = rtl839x_mdio_reset;
2149 break;
2150 case RTL9300_FAMILY_ID:
2151 priv->mii_bus->name = "rtl930x-eth-mdio";
2152 priv->mii_bus->read = rtl930x_mdio_read;
2153 priv->mii_bus->read_paged = rtl930x_mdio_read_paged;
2154 priv->mii_bus->write = rtl930x_mdio_write;
2155 priv->mii_bus->write_paged = rtl930x_mdio_write_paged;
2156 priv->mii_bus->reset = rtl930x_mdio_reset;
2157 priv->mii_bus->probe_capabilities = MDIOBUS_C22_C45;
2158 break;
2159 case RTL9310_FAMILY_ID:
2160 priv->mii_bus->name = "rtl931x-eth-mdio";
2161 priv->mii_bus->read = rtl931x_mdio_read;
2162 priv->mii_bus->read_paged = rtl931x_mdio_read_paged;
2163 priv->mii_bus->write = rtl931x_mdio_write;
2164 priv->mii_bus->write_paged = rtl931x_mdio_write_paged;
2165 priv->mii_bus->reset = rtl931x_mdio_reset;
2166 priv->mii_bus->probe_capabilities = MDIOBUS_C22_C45;
2167 break;
2168 }
2169 priv->mii_bus->access_capabilities = MDIOBUS_ACCESS_C22_MMD;
2170 priv->mii_bus->priv = priv;
2171 priv->mii_bus->parent = &priv->pdev->dev;
2172
2173 for_each_node_by_name(dn, "ethernet-phy") {
2174 u32 smi_addr[2];
2175
2176 if (of_property_read_u32(dn, "reg", &pn))
2177 continue;
2178
2179 if (of_property_read_u32_array(dn, "rtl9300,smi-address", &smi_addr[0], 2)) {
2180 smi_addr[0] = 0;
2181 smi_addr[1] = pn;
2182 }
2183
2184 if (of_property_read_u32(dn, "sds", &priv->sds_id[pn]))
2185 priv->sds_id[pn] = -1;
2186 else {
2187 pr_info("set sds port %d to %d\n", pn, priv->sds_id[pn]);
2188 }
2189
2190 if (pn < MAX_PORTS) {
2191 priv->smi_bus[pn] = smi_addr[0];
2192 priv->smi_addr[pn] = smi_addr[1];
2193 } else {
2194 pr_err("%s: illegal port number %d\n", __func__, pn);
2195 }
2196
2197 if (of_device_is_compatible(dn, "ethernet-phy-ieee802.3-c45"))
2198 priv->smi_bus_isc45[smi_addr[0]] = true;
2199
2200 if (of_property_read_bool(dn, "phy-is-integrated")) {
2201 priv->phy_is_internal[pn] = true;
2202 }
2203 }
2204
2205 dn = of_find_compatible_node(NULL, NULL, "realtek,rtl83xx-switch");
2206 if (!dn) {
2207 dev_err(&priv->pdev->dev, "No RTL switch node in DTS\n");
2208 return -ENODEV;
2209 }
2210
2211 for_each_node_by_name(dn, "port") {
2212 if (of_property_read_u32(dn, "reg", &pn))
2213 continue;
2214 pr_debug("%s Looking at port %d\n", __func__, pn);
2215 if (pn > priv->cpu_port)
2216 continue;
2217 if (of_get_phy_mode(dn, &priv->interfaces[pn]))
2218 priv->interfaces[pn] = PHY_INTERFACE_MODE_NA;
2219 pr_debug("%s phy mode of port %d is %s\n", __func__, pn, phy_modes(priv->interfaces[pn]));
2220 }
2221
2222 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
2223 ret = of_mdiobus_register(priv->mii_bus, mii_np);
2224
2225 err_put_node:
2226 of_node_put(mii_np);
2227
2228 return ret;
2229 }
2230
2231 static int rtl838x_mdio_remove(struct rtl838x_eth_priv *priv)
2232 {
2233 pr_debug("%s called\n", __func__);
2234 if (!priv->mii_bus)
2235 return 0;
2236
2237 mdiobus_unregister(priv->mii_bus);
2238 mdiobus_free(priv->mii_bus);
2239
2240 return 0;
2241 }
2242
2243 static netdev_features_t rtl838x_fix_features(struct net_device *dev,
2244 netdev_features_t features)
2245 {
2246 return features;
2247 }
2248
2249 static int rtl83xx_set_features(struct net_device *dev, netdev_features_t features)
2250 {
2251 struct rtl838x_eth_priv *priv = netdev_priv(dev);
2252
2253 if ((features ^ dev->features) & NETIF_F_RXCSUM) {
2254 if (!(features & NETIF_F_RXCSUM))
2255 sw_w32_mask(BIT(3), 0, priv->r->mac_port_ctrl(priv->cpu_port));
2256 else
2257 sw_w32_mask(0, BIT(3), priv->r->mac_port_ctrl(priv->cpu_port));
2258 }
2259
2260 return 0;
2261 }
2262
2263 static int rtl93xx_set_features(struct net_device *dev, netdev_features_t features)
2264 {
2265 struct rtl838x_eth_priv *priv = netdev_priv(dev);
2266
2267 if ((features ^ dev->features) & NETIF_F_RXCSUM) {
2268 if (!(features & NETIF_F_RXCSUM))
2269 sw_w32_mask(BIT(4), 0, priv->r->mac_port_ctrl(priv->cpu_port));
2270 else
2271 sw_w32_mask(0, BIT(4), priv->r->mac_port_ctrl(priv->cpu_port));
2272 }
2273
2274 return 0;
2275 }
2276
2277 static const struct net_device_ops rtl838x_eth_netdev_ops = {
2278 .ndo_open = rtl838x_eth_open,
2279 .ndo_stop = rtl838x_eth_stop,
2280 .ndo_start_xmit = rtl838x_eth_tx,
2281 .ndo_select_queue = rtl83xx_pick_tx_queue,
2282 .ndo_set_mac_address = rtl838x_set_mac_address,
2283 .ndo_validate_addr = eth_validate_addr,
2284 .ndo_set_rx_mode = rtl838x_eth_set_multicast_list,
2285 .ndo_tx_timeout = rtl838x_eth_tx_timeout,
2286 .ndo_set_features = rtl83xx_set_features,
2287 .ndo_fix_features = rtl838x_fix_features,
2288 .ndo_setup_tc = rtl83xx_setup_tc,
2289 };
2290
2291 static const struct net_device_ops rtl839x_eth_netdev_ops = {
2292 .ndo_open = rtl838x_eth_open,
2293 .ndo_stop = rtl838x_eth_stop,
2294 .ndo_start_xmit = rtl838x_eth_tx,
2295 .ndo_select_queue = rtl83xx_pick_tx_queue,
2296 .ndo_set_mac_address = rtl838x_set_mac_address,
2297 .ndo_validate_addr = eth_validate_addr,
2298 .ndo_set_rx_mode = rtl839x_eth_set_multicast_list,
2299 .ndo_tx_timeout = rtl838x_eth_tx_timeout,
2300 .ndo_set_features = rtl83xx_set_features,
2301 .ndo_fix_features = rtl838x_fix_features,
2302 .ndo_setup_tc = rtl83xx_setup_tc,
2303 };
2304
2305 static const struct net_device_ops rtl930x_eth_netdev_ops = {
2306 .ndo_open = rtl838x_eth_open,
2307 .ndo_stop = rtl838x_eth_stop,
2308 .ndo_start_xmit = rtl838x_eth_tx,
2309 .ndo_select_queue = rtl93xx_pick_tx_queue,
2310 .ndo_set_mac_address = rtl838x_set_mac_address,
2311 .ndo_validate_addr = eth_validate_addr,
2312 .ndo_set_rx_mode = rtl930x_eth_set_multicast_list,
2313 .ndo_tx_timeout = rtl838x_eth_tx_timeout,
2314 .ndo_set_features = rtl93xx_set_features,
2315 .ndo_fix_features = rtl838x_fix_features,
2316 .ndo_setup_tc = rtl83xx_setup_tc,
2317 };
2318
2319 static const struct net_device_ops rtl931x_eth_netdev_ops = {
2320 .ndo_open = rtl838x_eth_open,
2321 .ndo_stop = rtl838x_eth_stop,
2322 .ndo_start_xmit = rtl838x_eth_tx,
2323 .ndo_select_queue = rtl93xx_pick_tx_queue,
2324 .ndo_set_mac_address = rtl838x_set_mac_address,
2325 .ndo_validate_addr = eth_validate_addr,
2326 .ndo_set_rx_mode = rtl931x_eth_set_multicast_list,
2327 .ndo_tx_timeout = rtl838x_eth_tx_timeout,
2328 .ndo_set_features = rtl93xx_set_features,
2329 .ndo_fix_features = rtl838x_fix_features,
2330 };
2331
2332 static const struct phylink_mac_ops rtl838x_phylink_ops = {
2333 .validate = rtl838x_validate,
2334 .mac_pcs_get_state = rtl838x_mac_pcs_get_state,
2335 .mac_an_restart = rtl838x_mac_an_restart,
2336 .mac_config = rtl838x_mac_config,
2337 .mac_link_down = rtl838x_mac_link_down,
2338 .mac_link_up = rtl838x_mac_link_up,
2339 };
2340
2341 static const struct ethtool_ops rtl838x_ethtool_ops = {
2342 .get_link_ksettings = rtl838x_get_link_ksettings,
2343 .set_link_ksettings = rtl838x_set_link_ksettings,
2344 };
2345
2346 static int __init rtl838x_eth_probe(struct platform_device *pdev)
2347 {
2348 struct net_device *dev;
2349 struct device_node *dn = pdev->dev.of_node;
2350 struct rtl838x_eth_priv *priv;
2351 struct resource *res, *mem;
2352 phy_interface_t phy_mode;
2353 struct phylink *phylink;
2354 int err = 0, rxrings, rxringlen;
2355 struct ring_b *ring;
2356
2357 pr_info("Probing RTL838X eth device pdev: %x, dev: %x\n",
2358 (u32)pdev, (u32)(&(pdev->dev)));
2359
2360 if (!dn) {
2361 dev_err(&pdev->dev, "No DT found\n");
2362 return -EINVAL;
2363 }
2364
2365 rxrings = (soc_info.family == RTL8380_FAMILY_ID
2366 || soc_info.family == RTL8390_FAMILY_ID) ? 8 : 32;
2367 rxrings = rxrings > MAX_RXRINGS ? MAX_RXRINGS : rxrings;
2368 rxringlen = MAX_ENTRIES / rxrings;
2369 rxringlen = rxringlen > MAX_RXLEN ? MAX_RXLEN : rxringlen;
2370
2371 dev = alloc_etherdev_mqs(sizeof(struct rtl838x_eth_priv), TXRINGS, rxrings);
2372 if (!dev) {
2373 err = -ENOMEM;
2374 goto err_free;
2375 }
2376 SET_NETDEV_DEV(dev, &pdev->dev);
2377 priv = netdev_priv(dev);
2378
2379 /* obtain buffer memory space */
2380 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2381 if (res) {
2382 mem = devm_request_mem_region(&pdev->dev, res->start,
2383 resource_size(res), res->name);
2384 if (!mem) {
2385 dev_err(&pdev->dev, "cannot request memory space\n");
2386 err = -ENXIO;
2387 goto err_free;
2388 }
2389
2390 dev->mem_start = mem->start;
2391 dev->mem_end = mem->end;
2392 } else {
2393 dev_err(&pdev->dev, "cannot request IO resource\n");
2394 err = -ENXIO;
2395 goto err_free;
2396 }
2397
2398 /* Allocate buffer memory */
2399 priv->membase = dmam_alloc_coherent(&pdev->dev, rxrings * rxringlen * RING_BUFFER +
2400 sizeof(struct ring_b) + sizeof(struct notify_b),
2401 (void *)&dev->mem_start, GFP_KERNEL);
2402 if (!priv->membase) {
2403 dev_err(&pdev->dev, "cannot allocate DMA buffer\n");
2404 err = -ENOMEM;
2405 goto err_free;
2406 }
2407
2408 /* Allocate ring-buffer space at the end of the allocated memory */
2409 ring = priv->membase;
2410 ring->rx_space = priv->membase + sizeof(struct ring_b) + sizeof(struct notify_b);
2411
2412 spin_lock_init(&priv->lock);
2413
2414 dev->ethtool_ops = &rtl838x_ethtool_ops;
2415 dev->min_mtu = ETH_ZLEN;
2416 dev->max_mtu = 1536;
2417 dev->features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM;
2418 dev->hw_features = NETIF_F_RXCSUM;
2419
2420 priv->id = soc_info.id;
2421 priv->family_id = soc_info.family;
2422 if (priv->id) {
2423 pr_info("Found SoC ID: %4x: %s, family %x\n",
2424 priv->id, soc_info.name, priv->family_id);
2425 } else {
2426 pr_err("Unknown chip id (%04x)\n", priv->id);
2427 return -ENODEV;
2428 }
2429
2430 switch (priv->family_id) {
2431 case RTL8380_FAMILY_ID:
2432 priv->cpu_port = RTL838X_CPU_PORT;
2433 priv->r = &rtl838x_reg;
2434 dev->netdev_ops = &rtl838x_eth_netdev_ops;
2435 break;
2436 case RTL8390_FAMILY_ID:
2437 priv->cpu_port = RTL839X_CPU_PORT;
2438 priv->r = &rtl839x_reg;
2439 dev->netdev_ops = &rtl839x_eth_netdev_ops;
2440 break;
2441 case RTL9300_FAMILY_ID:
2442 priv->cpu_port = RTL930X_CPU_PORT;
2443 priv->r = &rtl930x_reg;
2444 dev->netdev_ops = &rtl930x_eth_netdev_ops;
2445 break;
2446 case RTL9310_FAMILY_ID:
2447 priv->cpu_port = RTL931X_CPU_PORT;
2448 priv->r = &rtl931x_reg;
2449 dev->netdev_ops = &rtl931x_eth_netdev_ops;
2450 rtl931x_chip_init(priv);
2451 break;
2452 default:
2453 pr_err("Unknown SoC family\n");
2454 return -ENODEV;
2455 }
2456 priv->rxringlen = rxringlen;
2457 priv->rxrings = rxrings;
2458
2459 /* Obtain device IRQ number */
2460 dev->irq = platform_get_irq(pdev, 0);
2461 if (dev->irq < 0) {
2462 dev_err(&pdev->dev, "cannot obtain network-device IRQ\n");
2463 goto err_free;
2464 }
2465
2466 err = devm_request_irq(&pdev->dev, dev->irq, priv->r->net_irq,
2467 IRQF_SHARED, dev->name, dev);
2468 if (err) {
2469 dev_err(&pdev->dev, "%s: could not acquire interrupt: %d\n",
2470 __func__, err);
2471 goto err_free;
2472 }
2473
2474 rtl8380_init_mac(priv);
2475
2476 /* Try to get mac address in the following order:
2477 * 1) from device tree data
2478 * 2) from internal registers set by bootloader
2479 */
2480 of_get_mac_address(pdev->dev.of_node, dev->dev_addr);
2481 if (is_valid_ether_addr(dev->dev_addr)) {
2482 rtl838x_set_mac_hw(dev, (u8 *)dev->dev_addr);
2483 } else {
2484 dev->dev_addr[0] = (sw_r32(priv->r->mac) >> 8) & 0xff;
2485 dev->dev_addr[1] = sw_r32(priv->r->mac) & 0xff;
2486 dev->dev_addr[2] = (sw_r32(priv->r->mac + 4) >> 24) & 0xff;
2487 dev->dev_addr[3] = (sw_r32(priv->r->mac + 4) >> 16) & 0xff;
2488 dev->dev_addr[4] = (sw_r32(priv->r->mac + 4) >> 8) & 0xff;
2489 dev->dev_addr[5] = sw_r32(priv->r->mac + 4) & 0xff;
2490 }
2491 /* if the address is invalid, use a random value */
2492 if (!is_valid_ether_addr(dev->dev_addr)) {
2493 struct sockaddr sa = { AF_UNSPEC };
2494
2495 netdev_warn(dev, "Invalid MAC address, using random\n");
2496 eth_hw_addr_random(dev);
2497 memcpy(sa.sa_data, dev->dev_addr, ETH_ALEN);
2498 if (rtl838x_set_mac_address(dev, &sa))
2499 netdev_warn(dev, "Failed to set MAC address.\n");
2500 }
2501 pr_info("Using MAC %08x%08x\n", sw_r32(priv->r->mac),
2502 sw_r32(priv->r->mac + 4));
2503 strcpy(dev->name, "eth%d");
2504 priv->pdev = pdev;
2505 priv->netdev = dev;
2506
2507 err = rtl838x_mdio_init(priv);
2508 if (err)
2509 goto err_free;
2510
2511 err = register_netdev(dev);
2512 if (err)
2513 goto err_free;
2514
2515 for (int i = 0; i < priv->rxrings; i++) {
2516 priv->rx_qs[i].id = i;
2517 priv->rx_qs[i].priv = priv;
2518 netif_napi_add(dev, &priv->rx_qs[i].napi, rtl838x_poll_rx, 64);
2519 }
2520
2521 platform_set_drvdata(pdev, dev);
2522
2523 phy_mode = PHY_INTERFACE_MODE_NA;
2524 err = of_get_phy_mode(dn, &phy_mode);
2525 if (err < 0) {
2526 dev_err(&pdev->dev, "incorrect phy-mode\n");
2527 err = -EINVAL;
2528 goto err_free;
2529 }
2530 priv->phylink_config.dev = &dev->dev;
2531 priv->phylink_config.type = PHYLINK_NETDEV;
2532
2533 phylink = phylink_create(&priv->phylink_config, pdev->dev.fwnode,
2534 phy_mode, &rtl838x_phylink_ops);
2535
2536 if (IS_ERR(phylink)) {
2537 err = PTR_ERR(phylink);
2538 goto err_free;
2539 }
2540 priv->phylink = phylink;
2541
2542 return 0;
2543
2544 err_free:
2545 pr_err("Error setting up netdev, freeing it again.\n");
2546 free_netdev(dev);
2547
2548 return err;
2549 }
2550
2551 static int rtl838x_eth_remove(struct platform_device *pdev)
2552 {
2553 struct net_device *dev = platform_get_drvdata(pdev);
2554 struct rtl838x_eth_priv *priv = netdev_priv(dev);
2555
2556 if (dev) {
2557 pr_info("Removing platform driver for rtl838x-eth\n");
2558 rtl838x_mdio_remove(priv);
2559 rtl838x_hw_stop(priv);
2560
2561 netif_tx_stop_all_queues(dev);
2562
2563 for (int i = 0; i < priv->rxrings; i++)
2564 netif_napi_del(&priv->rx_qs[i].napi);
2565
2566 unregister_netdev(dev);
2567 free_netdev(dev);
2568 }
2569
2570 return 0;
2571 }
2572
2573 static const struct of_device_id rtl838x_eth_of_ids[] = {
2574 { .compatible = "realtek,rtl838x-eth"},
2575 { /* sentinel */ }
2576 };
2577 MODULE_DEVICE_TABLE(of, rtl838x_eth_of_ids);
2578
2579 static struct platform_driver rtl838x_eth_driver = {
2580 .probe = rtl838x_eth_probe,
2581 .remove = rtl838x_eth_remove,
2582 .driver = {
2583 .name = "rtl838x-eth",
2584 .pm = NULL,
2585 .of_match_table = rtl838x_eth_of_ids,
2586 },
2587 };
2588
2589 module_platform_driver(rtl838x_eth_driver);
2590
2591 MODULE_AUTHOR("B. Koblitz");
2592 MODULE_DESCRIPTION("RTL838X SoC Ethernet Driver");
2593 MODULE_LICENSE("GPL");