realtek: reset both ethernet NIC and queues
[openwrt/staging/nbd.git] / target / linux / realtek / files-5.10 / drivers / net / ethernet / rtl838x_eth.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/drivers/net/ethernet/rtl838x_eth.c
4 * Copyright (C) 2020 B. Koblitz
5 */
6
7 #include <linux/dma-mapping.h>
8 #include <linux/etherdevice.h>
9 #include <linux/interrupt.h>
10 #include <linux/io.h>
11 #include <linux/platform_device.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/of.h>
15 #include <linux/of_net.h>
16 #include <linux/of_mdio.h>
17 #include <linux/module.h>
18 #include <linux/phylink.h>
19 #include <linux/pkt_sched.h>
20 #include <net/dsa.h>
21 #include <net/switchdev.h>
22 #include <asm/cacheflush.h>
23
24 #include <asm/mach-rtl838x/mach-rtl83xx.h>
25 #include "rtl838x_eth.h"
26
27 extern struct rtl83xx_soc_info soc_info;
28
29 /*
30 * Maximum number of RX rings is 8 on RTL83XX and 32 on the 93XX
31 * The ring is assigned by switch based on packet/port priortity
32 * Maximum number of TX rings is 2, Ring 2 being the high priority
33 * ring on the RTL93xx SoCs. MAX_RXLEN gives the maximum length
34 * for an RX ring, MAX_ENTRIES the maximum number of entries
35 * available in total for all queues.
36 */
37 #define MAX_RXRINGS 32
38 #define MAX_RXLEN 200
39 #define MAX_ENTRIES (200 * 8)
40 #define TXRINGS 2
41 #define TXRINGLEN 160
42 #define NOTIFY_EVENTS 10
43 #define NOTIFY_BLOCKS 10
44 #define TX_EN 0x8
45 #define RX_EN 0x4
46 #define TX_EN_93XX 0x20
47 #define RX_EN_93XX 0x10
48 #define TX_DO 0x2
49 #define WRAP 0x2
50 #define MAX_PORTS 57
51 #define MAX_SMI_BUSSES 4
52
53 #define RING_BUFFER 1600
54
55 struct p_hdr {
56 uint8_t *buf;
57 uint16_t reserved;
58 uint16_t size; /* buffer size */
59 uint16_t offset;
60 uint16_t len; /* pkt len */
61 uint16_t cpu_tag[10];
62 } __packed __aligned(1);
63
64 struct n_event {
65 uint32_t type:2;
66 uint32_t fidVid:12;
67 uint64_t mac:48;
68 uint32_t slp:6;
69 uint32_t valid:1;
70 uint32_t reserved:27;
71 } __packed __aligned(1);
72
73 struct ring_b {
74 uint32_t rx_r[MAX_RXRINGS][MAX_RXLEN];
75 uint32_t tx_r[TXRINGS][TXRINGLEN];
76 struct p_hdr rx_header[MAX_RXRINGS][MAX_RXLEN];
77 struct p_hdr tx_header[TXRINGS][TXRINGLEN];
78 uint32_t c_rx[MAX_RXRINGS];
79 uint32_t c_tx[TXRINGS];
80 uint8_t tx_space[TXRINGS * TXRINGLEN * RING_BUFFER];
81 uint8_t *rx_space;
82 };
83
84 struct notify_block {
85 struct n_event events[NOTIFY_EVENTS];
86 };
87
88 struct notify_b {
89 struct notify_block blocks[NOTIFY_BLOCKS];
90 u32 reserved1[8];
91 u32 ring[NOTIFY_BLOCKS];
92 u32 reserved2[8];
93 };
94
95 static void rtl838x_create_tx_header(struct p_hdr *h, int dest_port, int prio)
96 {
97 prio &= 0x7;
98
99 if (dest_port > 0) {
100 // cpu_tag[0] is reserved on the RTL83XX SoCs
101 h->cpu_tag[1] = 0x0401; // BIT 10: RTL8380_CPU_TAG, BIT0: L2LEARNING on
102 h->cpu_tag[2] = 0x0200; // Set only AS_DPM, to enable DPM settings below
103 h->cpu_tag[3] = 0x0000;
104 h->cpu_tag[4] = BIT(dest_port) >> 16;
105 h->cpu_tag[5] = BIT(dest_port) & 0xffff;
106 // Set internal priority and AS_PRIO
107 if (prio >= 0)
108 h->cpu_tag[2] |= (prio | 0x8) << 12;
109 }
110 }
111
112 static void rtl839x_create_tx_header(struct p_hdr *h, int dest_port, int prio)
113 {
114 prio &= 0x7;
115
116 if (dest_port > 0) {
117 // cpu_tag[0] is reserved on the RTL83XX SoCs
118 h->cpu_tag[1] = 0x0100; // RTL8390_CPU_TAG marker
119 h->cpu_tag[2] = h->cpu_tag[3] = h->cpu_tag[4] = h->cpu_tag[5] = 0;
120 // h->cpu_tag[1] |= BIT(1) | BIT(0); // Bypass filter 1/2
121 if (dest_port >= 32) {
122 dest_port -= 32;
123 h->cpu_tag[2] = BIT(dest_port) >> 16;
124 h->cpu_tag[3] = BIT(dest_port) & 0xffff;
125 } else {
126 h->cpu_tag[4] = BIT(dest_port) >> 16;
127 h->cpu_tag[5] = BIT(dest_port) & 0xffff;
128 }
129 h->cpu_tag[2] |= BIT(20); // Enable destination port mask use
130 h->cpu_tag[2] |= BIT(23); // Enable L2 Learning
131 // Set internal priority and AS_PRIO
132 if (prio >= 0)
133 h->cpu_tag[1] |= prio | BIT(3);
134 }
135 }
136
137 static void rtl930x_create_tx_header(struct p_hdr *h, int dest_port, int prio)
138 {
139 h->cpu_tag[0] = 0x8000; // CPU tag marker
140 h->cpu_tag[1] = h->cpu_tag[2] = 0;
141 if (prio >= 0)
142 h->cpu_tag[2] = BIT(13) | prio << 8; // Enable and set Priority Queue
143 h->cpu_tag[3] = 0;
144 h->cpu_tag[4] = 0;
145 h->cpu_tag[5] = 0;
146 h->cpu_tag[6] = BIT(dest_port) >> 16;
147 h->cpu_tag[7] = BIT(dest_port) & 0xffff;
148 }
149
150 static void rtl931x_create_tx_header(struct p_hdr *h, int dest_port, int prio)
151 {
152 h->cpu_tag[0] = 0x8000; // CPU tag marker
153 h->cpu_tag[1] = h->cpu_tag[2] = 0;
154 if (prio >= 0)
155 h->cpu_tag[2] = BIT(13) | prio << 8; // Enable and set Priority Queue
156 h->cpu_tag[3] = 0;
157 h->cpu_tag[4] = h->cpu_tag[5] = h->cpu_tag[6] = h->cpu_tag[7] = 0;
158 if (dest_port >= 32) {
159 dest_port -= 32;
160 h->cpu_tag[4] = BIT(dest_port) >> 16;
161 h->cpu_tag[5] = BIT(dest_port) & 0xffff;
162 } else {
163 h->cpu_tag[6] = BIT(dest_port) >> 16;
164 h->cpu_tag[7] = BIT(dest_port) & 0xffff;
165 }
166 }
167
168 struct rtl838x_rx_q {
169 int id;
170 struct rtl838x_eth_priv *priv;
171 struct napi_struct napi;
172 };
173
174 struct rtl838x_eth_priv {
175 struct net_device *netdev;
176 struct platform_device *pdev;
177 void *membase;
178 spinlock_t lock;
179 struct mii_bus *mii_bus;
180 struct rtl838x_rx_q rx_qs[MAX_RXRINGS];
181 struct phylink *phylink;
182 struct phylink_config phylink_config;
183 u16 id;
184 u16 family_id;
185 const struct rtl838x_reg *r;
186 u8 cpu_port;
187 u32 lastEvent;
188 u16 rxrings;
189 u16 rxringlen;
190 u8 smi_bus[MAX_PORTS];
191 u8 smi_addr[MAX_PORTS];
192 bool smi_bus_isc45[MAX_SMI_BUSSES];
193 bool phy_is_internal[MAX_PORTS];
194 };
195
196 extern int rtl838x_phy_init(struct rtl838x_eth_priv *priv);
197 extern int rtl838x_read_sds_phy(int phy_addr, int phy_reg);
198 extern int rtl839x_read_sds_phy(int phy_addr, int phy_reg);
199 extern int rtl839x_write_sds_phy(int phy_addr, int phy_reg, u16 v);
200 extern int rtl930x_read_sds_phy(int phy_addr, int page, int phy_reg);
201 extern int rtl930x_write_sds_phy(int phy_addr, int page, int phy_reg, u16 v);
202 extern int rtl930x_read_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 *val);
203 extern int rtl930x_write_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 val);
204
205 /*
206 * On the RTL93XX, the RTL93XX_DMA_IF_RX_RING_CNTR track the fill level of
207 * the rings. Writing x into these registers substracts x from its content.
208 * When the content reaches the ring size, the ASIC no longer adds
209 * packets to this receive queue.
210 */
211 void rtl838x_update_cntr(int r, int released)
212 {
213 // This feature is not available on RTL838x SoCs
214 }
215
216 void rtl839x_update_cntr(int r, int released)
217 {
218 // This feature is not available on RTL839x SoCs
219 }
220
221 void rtl930x_update_cntr(int r, int released)
222 {
223 int pos = (r % 3) * 10;
224 u32 reg = RTL930X_DMA_IF_RX_RING_CNTR + ((r / 3) << 2);
225 u32 v = sw_r32(reg);
226
227 v = (v >> pos) & 0x3ff;
228 pr_debug("RX: Work done %d, old value: %d, pos %d, reg %04x\n", released, v, pos, reg);
229 sw_w32_mask(0x3ff << pos, released << pos, reg);
230 sw_w32(v, reg);
231 }
232
233 void rtl931x_update_cntr(int r, int released)
234 {
235 int pos = (r % 3) * 10;
236 u32 reg = RTL931X_DMA_IF_RX_RING_CNTR + ((r / 3) << 2);
237
238 sw_w32_mask(0x3ff << pos, released << pos, reg);
239 }
240
241 struct dsa_tag {
242 u8 reason;
243 u8 queue;
244 u16 port;
245 u8 l2_offloaded;
246 u8 prio;
247 bool crc_error;
248 };
249
250 bool rtl838x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
251 {
252 t->reason = h->cpu_tag[3] & 0xf;
253 t->queue = (h->cpu_tag[0] & 0xe0) >> 5;
254 t->port = h->cpu_tag[1] & 0x1f;
255 t->crc_error = t->reason == 13;
256
257 pr_debug("Reason: %d\n", t->reason);
258 if (t->reason != 4) // NIC_RX_REASON_SPECIAL_TRAP
259 t->l2_offloaded = 1;
260 else
261 t->l2_offloaded = 0;
262
263 return t->l2_offloaded;
264 }
265
266 bool rtl839x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
267 {
268 t->reason = h->cpu_tag[4] & 0x1f;
269 t->queue = (h->cpu_tag[3] & 0xe000) >> 13;
270 t->port = h->cpu_tag[1] & 0x3f;
271 t->crc_error = h->cpu_tag[3] & BIT(2);
272
273 pr_debug("Reason: %d\n", t->reason);
274 if ((t->reason != 7) && (t->reason != 8)) // NIC_RX_REASON_RMA_USR
275 t->l2_offloaded = 1;
276 else
277 t->l2_offloaded = 0;
278
279 return t->l2_offloaded;
280 }
281
282 bool rtl930x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
283 {
284 t->reason = h->cpu_tag[7] & 0x3f;
285 t->queue = (h->cpu_tag[2] >> 11) & 0x1f;
286 t->port = (h->cpu_tag[0] >> 8) & 0x1f;
287 t->crc_error = h->cpu_tag[1] & BIT(6);
288
289 pr_debug("Reason %d, port %d, queue %d\n", t->reason, t->port, t->queue);
290 if (t->reason >= 19 && t->reason <= 27)
291 t->l2_offloaded = 0;
292 else
293 t->l2_offloaded = 1;
294
295 return t->l2_offloaded;
296 }
297
298 bool rtl931x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
299 {
300 t->reason = h->cpu_tag[7] & 0x3f;
301 t->queue = (h->cpu_tag[2] >> 11) & 0x1f;
302 t->port = (h->cpu_tag[0] >> 8) & 0x3f;
303 t->crc_error = h->cpu_tag[1] & BIT(6);
304
305 pr_debug("Reason %d, port %d, queue %d\n", t->reason, t->port, t->queue);
306 if (t->reason >= 19 && t->reason <= 27)
307 t->l2_offloaded = 0;
308 else
309 t->l2_offloaded = 1;
310
311 return t->l2_offloaded;
312 }
313
314 /*
315 * Discard the RX ring-buffers, called as part of the net-ISR
316 * when the buffer runs over
317 * Caller needs to hold priv->lock
318 */
319 static void rtl838x_rb_cleanup(struct rtl838x_eth_priv *priv, int status)
320 {
321 int r;
322 u32 *last;
323 struct p_hdr *h;
324 struct ring_b *ring = priv->membase;
325
326 for (r = 0; r < priv->rxrings; r++) {
327 pr_debug("In %s working on r: %d\n", __func__, r);
328 last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur + r * 4));
329 do {
330 if ((ring->rx_r[r][ring->c_rx[r]] & 0x1))
331 break;
332 pr_debug("Got something: %d\n", ring->c_rx[r]);
333 h = &ring->rx_header[r][ring->c_rx[r]];
334 memset(h, 0, sizeof(struct p_hdr));
335 h->buf = (u8 *)KSEG1ADDR(ring->rx_space
336 + r * priv->rxringlen * RING_BUFFER
337 + ring->c_rx[r] * RING_BUFFER);
338 h->size = RING_BUFFER;
339 /* make sure the header is visible to the ASIC */
340 mb();
341
342 ring->rx_r[r][ring->c_rx[r]] = KSEG1ADDR(h) | 0x1
343 | (ring->c_rx[r] == (priv->rxringlen - 1) ? WRAP : 0x1);
344 ring->c_rx[r] = (ring->c_rx[r] + 1) % priv->rxringlen;
345 } while (&ring->rx_r[r][ring->c_rx[r]] != last);
346 }
347 }
348
349 struct fdb_update_work {
350 struct work_struct work;
351 struct net_device *ndev;
352 u64 macs[NOTIFY_EVENTS + 1];
353 };
354
355 void rtl838x_fdb_sync(struct work_struct *work)
356 {
357 const struct fdb_update_work *uw =
358 container_of(work, struct fdb_update_work, work);
359 struct switchdev_notifier_fdb_info info;
360 u8 addr[ETH_ALEN];
361 int i = 0;
362 int action;
363
364 while (uw->macs[i]) {
365 action = (uw->macs[i] & (1ULL << 63)) ? SWITCHDEV_FDB_ADD_TO_BRIDGE
366 : SWITCHDEV_FDB_DEL_TO_BRIDGE;
367 u64_to_ether_addr(uw->macs[i] & 0xffffffffffffULL, addr);
368 info.addr = &addr[0];
369 info.vid = 0;
370 info.offloaded = 1;
371 pr_debug("FDB entry %d: %llx, action %d\n", i, uw->macs[0], action);
372 call_switchdev_notifiers(action, uw->ndev, &info.info, NULL);
373 i++;
374 }
375 kfree(work);
376 }
377
378 static void rtl839x_l2_notification_handler(struct rtl838x_eth_priv *priv)
379 {
380 struct notify_b *nb = priv->membase + sizeof(struct ring_b);
381 u32 e = priv->lastEvent;
382 struct n_event *event;
383 int i;
384 u64 mac;
385 struct fdb_update_work *w;
386
387 while (!(nb->ring[e] & 1)) {
388 w = kzalloc(sizeof(*w), GFP_ATOMIC);
389 if (!w) {
390 pr_err("Out of memory: %s", __func__);
391 return;
392 }
393 INIT_WORK(&w->work, rtl838x_fdb_sync);
394
395 for (i = 0; i < NOTIFY_EVENTS; i++) {
396 event = &nb->blocks[e].events[i];
397 if (!event->valid)
398 continue;
399 mac = event->mac;
400 if (event->type)
401 mac |= 1ULL << 63;
402 w->ndev = priv->netdev;
403 w->macs[i] = mac;
404 }
405
406 /* Hand the ring entry back to the switch */
407 nb->ring[e] = nb->ring[e] | 1;
408 e = (e + 1) % NOTIFY_BLOCKS;
409
410 w->macs[i] = 0ULL;
411 schedule_work(&w->work);
412 }
413 priv->lastEvent = e;
414 }
415
416 static irqreturn_t rtl83xx_net_irq(int irq, void *dev_id)
417 {
418 struct net_device *dev = dev_id;
419 struct rtl838x_eth_priv *priv = netdev_priv(dev);
420 u32 status = sw_r32(priv->r->dma_if_intr_sts);
421 int i;
422
423 pr_debug("IRQ: %08x\n", status);
424
425 spin_lock(&priv->lock);
426 /* Ignore TX interrupt */
427 if ((status & 0xf0000)) {
428 /* Clear ISR */
429 sw_w32(0x000f0000, priv->r->dma_if_intr_sts);
430 }
431
432 /* RX interrupt */
433 if (status & 0x0ff00) {
434 /* ACK and disable RX interrupt for this ring */
435 sw_w32_mask(0xff00 & status, 0, priv->r->dma_if_intr_msk);
436 sw_w32(0x0000ff00 & status, priv->r->dma_if_intr_sts);
437 for (i = 0; i < priv->rxrings; i++) {
438 if (status & BIT(i + 8)) {
439 pr_debug("Scheduling queue: %d\n", i);
440 napi_schedule(&priv->rx_qs[i].napi);
441 }
442 }
443 }
444
445 /* RX buffer overrun */
446 if (status & 0x000ff) {
447 pr_debug("RX buffer overrun: status %x, mask: %x\n",
448 status, sw_r32(priv->r->dma_if_intr_msk));
449 sw_w32(status, priv->r->dma_if_intr_sts);
450 rtl838x_rb_cleanup(priv, status & 0xff);
451 }
452
453 if (priv->family_id == RTL8390_FAMILY_ID && status & 0x00100000) {
454 sw_w32(0x00100000, priv->r->dma_if_intr_sts);
455 rtl839x_l2_notification_handler(priv);
456 }
457
458 if (priv->family_id == RTL8390_FAMILY_ID && status & 0x00200000) {
459 sw_w32(0x00200000, priv->r->dma_if_intr_sts);
460 rtl839x_l2_notification_handler(priv);
461 }
462
463 if (priv->family_id == RTL8390_FAMILY_ID && status & 0x00400000) {
464 sw_w32(0x00400000, priv->r->dma_if_intr_sts);
465 rtl839x_l2_notification_handler(priv);
466 }
467
468 spin_unlock(&priv->lock);
469 return IRQ_HANDLED;
470 }
471
472 static irqreturn_t rtl93xx_net_irq(int irq, void *dev_id)
473 {
474 struct net_device *dev = dev_id;
475 struct rtl838x_eth_priv *priv = netdev_priv(dev);
476 u32 status_rx_r = sw_r32(priv->r->dma_if_intr_rx_runout_sts);
477 u32 status_rx = sw_r32(priv->r->dma_if_intr_rx_done_sts);
478 u32 status_tx = sw_r32(priv->r->dma_if_intr_tx_done_sts);
479 int i;
480
481 pr_debug("In %s, status_tx: %08x, status_rx: %08x, status_rx_r: %08x\n",
482 __func__, status_tx, status_rx, status_rx_r);
483 spin_lock(&priv->lock);
484
485 /* Ignore TX interrupt */
486 if (status_tx) {
487 /* Clear ISR */
488 pr_debug("TX done\n");
489 sw_w32(status_tx, priv->r->dma_if_intr_tx_done_sts);
490 }
491
492 /* RX interrupt */
493 if (status_rx) {
494 pr_debug("RX IRQ\n");
495 /* ACK and disable RX interrupt for given rings */
496 sw_w32(status_rx, priv->r->dma_if_intr_rx_done_sts);
497 sw_w32_mask(status_rx, 0, priv->r->dma_if_intr_rx_done_msk);
498 for (i = 0; i < priv->rxrings; i++) {
499 if (status_rx & BIT(i)) {
500 pr_debug("Scheduling queue: %d\n", i);
501 napi_schedule(&priv->rx_qs[i].napi);
502 }
503 }
504 }
505
506 /* RX buffer overrun */
507 if (status_rx_r) {
508 pr_debug("RX buffer overrun: status %x, mask: %x\n",
509 status_rx_r, sw_r32(priv->r->dma_if_intr_rx_runout_msk));
510 sw_w32(status_rx_r, priv->r->dma_if_intr_rx_runout_sts);
511 rtl838x_rb_cleanup(priv, status_rx_r);
512 }
513
514 spin_unlock(&priv->lock);
515 return IRQ_HANDLED;
516 }
517
518 static const struct rtl838x_reg rtl838x_reg = {
519 .net_irq = rtl83xx_net_irq,
520 .mac_port_ctrl = rtl838x_mac_port_ctrl,
521 .dma_if_intr_sts = RTL838X_DMA_IF_INTR_STS,
522 .dma_if_intr_msk = RTL838X_DMA_IF_INTR_MSK,
523 .dma_if_ctrl = RTL838X_DMA_IF_CTRL,
524 .mac_force_mode_ctrl = RTL838X_MAC_FORCE_MODE_CTRL,
525 .dma_rx_base = RTL838X_DMA_RX_BASE,
526 .dma_tx_base = RTL838X_DMA_TX_BASE,
527 .dma_if_rx_ring_size = rtl838x_dma_if_rx_ring_size,
528 .dma_if_rx_ring_cntr = rtl838x_dma_if_rx_ring_cntr,
529 .dma_if_rx_cur = RTL838X_DMA_IF_RX_CUR,
530 .rst_glb_ctrl = RTL838X_RST_GLB_CTRL_0,
531 .get_mac_link_sts = rtl838x_get_mac_link_sts,
532 .get_mac_link_dup_sts = rtl838x_get_mac_link_dup_sts,
533 .get_mac_link_spd_sts = rtl838x_get_mac_link_spd_sts,
534 .get_mac_rx_pause_sts = rtl838x_get_mac_rx_pause_sts,
535 .get_mac_tx_pause_sts = rtl838x_get_mac_tx_pause_sts,
536 .mac = RTL838X_MAC,
537 .l2_tbl_flush_ctrl = RTL838X_L2_TBL_FLUSH_CTRL,
538 .update_cntr = rtl838x_update_cntr,
539 .create_tx_header = rtl838x_create_tx_header,
540 .decode_tag = rtl838x_decode_tag,
541 };
542
543 static const struct rtl838x_reg rtl839x_reg = {
544 .net_irq = rtl83xx_net_irq,
545 .mac_port_ctrl = rtl839x_mac_port_ctrl,
546 .dma_if_intr_sts = RTL839X_DMA_IF_INTR_STS,
547 .dma_if_intr_msk = RTL839X_DMA_IF_INTR_MSK,
548 .dma_if_ctrl = RTL839X_DMA_IF_CTRL,
549 .mac_force_mode_ctrl = RTL839X_MAC_FORCE_MODE_CTRL,
550 .dma_rx_base = RTL839X_DMA_RX_BASE,
551 .dma_tx_base = RTL839X_DMA_TX_BASE,
552 .dma_if_rx_ring_size = rtl839x_dma_if_rx_ring_size,
553 .dma_if_rx_ring_cntr = rtl839x_dma_if_rx_ring_cntr,
554 .dma_if_rx_cur = RTL839X_DMA_IF_RX_CUR,
555 .rst_glb_ctrl = RTL839X_RST_GLB_CTRL,
556 .get_mac_link_sts = rtl839x_get_mac_link_sts,
557 .get_mac_link_dup_sts = rtl839x_get_mac_link_dup_sts,
558 .get_mac_link_spd_sts = rtl839x_get_mac_link_spd_sts,
559 .get_mac_rx_pause_sts = rtl839x_get_mac_rx_pause_sts,
560 .get_mac_tx_pause_sts = rtl839x_get_mac_tx_pause_sts,
561 .mac = RTL839X_MAC,
562 .l2_tbl_flush_ctrl = RTL839X_L2_TBL_FLUSH_CTRL,
563 .update_cntr = rtl839x_update_cntr,
564 .create_tx_header = rtl839x_create_tx_header,
565 .decode_tag = rtl839x_decode_tag,
566 };
567
568 static const struct rtl838x_reg rtl930x_reg = {
569 .net_irq = rtl93xx_net_irq,
570 .mac_port_ctrl = rtl930x_mac_port_ctrl,
571 .dma_if_intr_rx_runout_sts = RTL930X_DMA_IF_INTR_RX_RUNOUT_STS,
572 .dma_if_intr_rx_done_sts = RTL930X_DMA_IF_INTR_RX_DONE_STS,
573 .dma_if_intr_tx_done_sts = RTL930X_DMA_IF_INTR_TX_DONE_STS,
574 .dma_if_intr_rx_runout_msk = RTL930X_DMA_IF_INTR_RX_RUNOUT_MSK,
575 .dma_if_intr_rx_done_msk = RTL930X_DMA_IF_INTR_RX_DONE_MSK,
576 .dma_if_intr_tx_done_msk = RTL930X_DMA_IF_INTR_TX_DONE_MSK,
577 .l2_ntfy_if_intr_sts = RTL930X_L2_NTFY_IF_INTR_STS,
578 .l2_ntfy_if_intr_msk = RTL930X_L2_NTFY_IF_INTR_MSK,
579 .dma_if_ctrl = RTL930X_DMA_IF_CTRL,
580 .mac_force_mode_ctrl = RTL930X_MAC_FORCE_MODE_CTRL,
581 .dma_rx_base = RTL930X_DMA_RX_BASE,
582 .dma_tx_base = RTL930X_DMA_TX_BASE,
583 .dma_if_rx_ring_size = rtl930x_dma_if_rx_ring_size,
584 .dma_if_rx_ring_cntr = rtl930x_dma_if_rx_ring_cntr,
585 .dma_if_rx_cur = RTL930X_DMA_IF_RX_CUR,
586 .rst_glb_ctrl = RTL930X_RST_GLB_CTRL_0,
587 .get_mac_link_sts = rtl930x_get_mac_link_sts,
588 .get_mac_link_dup_sts = rtl930x_get_mac_link_dup_sts,
589 .get_mac_link_spd_sts = rtl930x_get_mac_link_spd_sts,
590 .get_mac_rx_pause_sts = rtl930x_get_mac_rx_pause_sts,
591 .get_mac_tx_pause_sts = rtl930x_get_mac_tx_pause_sts,
592 .mac = RTL930X_MAC_L2_ADDR_CTRL,
593 .l2_tbl_flush_ctrl = RTL930X_L2_TBL_FLUSH_CTRL,
594 .update_cntr = rtl930x_update_cntr,
595 .create_tx_header = rtl930x_create_tx_header,
596 .decode_tag = rtl930x_decode_tag,
597 };
598
599 static const struct rtl838x_reg rtl931x_reg = {
600 .net_irq = rtl93xx_net_irq,
601 .mac_port_ctrl = rtl931x_mac_port_ctrl,
602 .dma_if_intr_rx_runout_sts = RTL931X_DMA_IF_INTR_RX_RUNOUT_STS,
603 .dma_if_intr_rx_done_sts = RTL931X_DMA_IF_INTR_RX_DONE_STS,
604 .dma_if_intr_tx_done_sts = RTL931X_DMA_IF_INTR_TX_DONE_STS,
605 .dma_if_intr_rx_runout_msk = RTL931X_DMA_IF_INTR_RX_RUNOUT_MSK,
606 .dma_if_intr_rx_done_msk = RTL931X_DMA_IF_INTR_RX_DONE_MSK,
607 .dma_if_intr_tx_done_msk = RTL931X_DMA_IF_INTR_TX_DONE_MSK,
608 .l2_ntfy_if_intr_sts = RTL931X_L2_NTFY_IF_INTR_STS,
609 .l2_ntfy_if_intr_msk = RTL931X_L2_NTFY_IF_INTR_MSK,
610 .dma_if_ctrl = RTL931X_DMA_IF_CTRL,
611 .mac_force_mode_ctrl = RTL931X_MAC_FORCE_MODE_CTRL,
612 .dma_rx_base = RTL931X_DMA_RX_BASE,
613 .dma_tx_base = RTL931X_DMA_TX_BASE,
614 .dma_if_rx_ring_size = rtl931x_dma_if_rx_ring_size,
615 .dma_if_rx_ring_cntr = rtl931x_dma_if_rx_ring_cntr,
616 .dma_if_rx_cur = RTL931X_DMA_IF_RX_CUR,
617 .rst_glb_ctrl = RTL931X_RST_GLB_CTRL,
618 .get_mac_link_sts = rtl931x_get_mac_link_sts,
619 .get_mac_link_dup_sts = rtl931x_get_mac_link_dup_sts,
620 .get_mac_link_spd_sts = rtl931x_get_mac_link_spd_sts,
621 .get_mac_rx_pause_sts = rtl931x_get_mac_rx_pause_sts,
622 .get_mac_tx_pause_sts = rtl931x_get_mac_tx_pause_sts,
623 .mac = RTL931X_MAC_L2_ADDR_CTRL,
624 .l2_tbl_flush_ctrl = RTL931X_L2_TBL_FLUSH_CTRL,
625 .update_cntr = rtl931x_update_cntr,
626 .create_tx_header = rtl931x_create_tx_header,
627 .decode_tag = rtl931x_decode_tag,
628 };
629
630 static void rtl838x_hw_reset(struct rtl838x_eth_priv *priv)
631 {
632 u32 int_saved, nbuf;
633 u32 reset_mask;
634 int i, pos;
635
636 pr_info("RESETTING %x, CPU_PORT %d\n", priv->family_id, priv->cpu_port);
637 sw_w32_mask(0x3, 0, priv->r->mac_port_ctrl(priv->cpu_port));
638 mdelay(100);
639
640 /* Disable and clear interrupts */
641 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID) {
642 sw_w32(0x00000000, priv->r->dma_if_intr_rx_runout_msk);
643 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_runout_sts);
644 sw_w32(0x00000000, priv->r->dma_if_intr_rx_done_msk);
645 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_sts);
646 sw_w32(0x00000000, priv->r->dma_if_intr_tx_done_msk);
647 sw_w32(0x0000000f, priv->r->dma_if_intr_tx_done_sts);
648 } else {
649 sw_w32(0x00000000, priv->r->dma_if_intr_msk);
650 sw_w32(0xffffffff, priv->r->dma_if_intr_sts);
651 }
652
653 if (priv->family_id == RTL8390_FAMILY_ID) {
654 /* Preserve L2 notification and NBUF settings */
655 int_saved = sw_r32(priv->r->dma_if_intr_msk);
656 nbuf = sw_r32(RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL);
657
658 /* Disable link change interrupt on RTL839x */
659 sw_w32(0, RTL839X_IMR_PORT_LINK_STS_CHG);
660 sw_w32(0, RTL839X_IMR_PORT_LINK_STS_CHG + 4);
661
662 sw_w32(0x00000000, priv->r->dma_if_intr_msk);
663 sw_w32(0xffffffff, priv->r->dma_if_intr_sts);
664 }
665
666 /* Reset NIC (SW_NIC_RST) and queues (SW_Q_RST) */
667 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID)
668 reset_mask = 0x6;
669 else
670 reset_mask = 0xc;
671
672 sw_w32(reset_mask, priv->r->rst_glb_ctrl);
673
674 do { /* Wait for reset of NIC and Queues done */
675 udelay(20);
676 } while (sw_r32(priv->r->rst_glb_ctrl) & reset_mask);
677 mdelay(100);
678
679 /* Setup Head of Line */
680 if (priv->family_id == RTL8380_FAMILY_ID)
681 sw_w32(0, RTL838X_DMA_IF_RX_RING_SIZE); // Disabled on RTL8380
682 if (priv->family_id == RTL8390_FAMILY_ID)
683 sw_w32(0xffffffff, RTL839X_DMA_IF_RX_RING_CNTR);
684 if (priv->family_id == RTL9300_FAMILY_ID) {
685 for (i = 0; i < priv->rxrings; i++) {
686 pos = (i % 3) * 10;
687 sw_w32_mask(0x3ff << pos, 0, priv->r->dma_if_rx_ring_size(i));
688 sw_w32_mask(0x3ff << pos, priv->rxringlen,
689 priv->r->dma_if_rx_ring_cntr(i));
690 }
691 }
692
693 /* Re-enable link change interrupt */
694 if (priv->family_id == RTL8390_FAMILY_ID) {
695 sw_w32(0xffffffff, RTL839X_ISR_PORT_LINK_STS_CHG);
696 sw_w32(0xffffffff, RTL839X_ISR_PORT_LINK_STS_CHG + 4);
697 sw_w32(0xffffffff, RTL839X_IMR_PORT_LINK_STS_CHG);
698 sw_w32(0xffffffff, RTL839X_IMR_PORT_LINK_STS_CHG + 4);
699
700 /* Restore notification settings: on RTL838x these bits are null */
701 sw_w32_mask(7 << 20, int_saved & (7 << 20), priv->r->dma_if_intr_msk);
702 sw_w32(nbuf, RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL);
703 }
704 }
705
706 static void rtl838x_hw_ring_setup(struct rtl838x_eth_priv *priv)
707 {
708 int i;
709 struct ring_b *ring = priv->membase;
710
711 for (i = 0; i < priv->rxrings; i++)
712 sw_w32(KSEG1ADDR(&ring->rx_r[i]), priv->r->dma_rx_base + i * 4);
713
714 for (i = 0; i < TXRINGS; i++)
715 sw_w32(KSEG1ADDR(&ring->tx_r[i]), priv->r->dma_tx_base + i * 4);
716 }
717
718 static void rtl838x_hw_en_rxtx(struct rtl838x_eth_priv *priv)
719 {
720 /* Disable Head of Line features for all RX rings */
721 sw_w32(0xffffffff, priv->r->dma_if_rx_ring_size(0));
722
723 /* Truncate RX buffer to 0x640 (1600) bytes, pad TX */
724 sw_w32(0x06400020, priv->r->dma_if_ctrl);
725
726 /* Enable RX done, RX overflow and TX done interrupts */
727 sw_w32(0xfffff, priv->r->dma_if_intr_msk);
728
729 /* Enable DMA, engine expects empty FCS field */
730 sw_w32_mask(0, RX_EN | TX_EN, priv->r->dma_if_ctrl);
731
732 /* Restart TX/RX to CPU port */
733 sw_w32_mask(0x0, 0x3, priv->r->mac_port_ctrl(priv->cpu_port));
734 /* Set Speed, duplex, flow control
735 * FORCE_EN | LINK_EN | NWAY_EN | DUP_SEL
736 * | SPD_SEL = 0b10 | FORCE_FC_EN | PHY_MASTER_SLV_MANUAL_EN
737 * | MEDIA_SEL
738 */
739 sw_w32(0x6192F, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
740
741 /* Enable CRC checks on CPU-port */
742 sw_w32_mask(0, BIT(3), priv->r->mac_port_ctrl(priv->cpu_port));
743 }
744
745 static void rtl839x_hw_en_rxtx(struct rtl838x_eth_priv *priv)
746 {
747 /* Setup CPU-Port: RX Buffer */
748 sw_w32(0x0000c808, priv->r->dma_if_ctrl);
749
750 /* Enable Notify, RX done, RX overflow and TX done interrupts */
751 sw_w32(0x007fffff, priv->r->dma_if_intr_msk); // Notify IRQ!
752
753 /* Enable DMA */
754 sw_w32_mask(0, RX_EN | TX_EN, priv->r->dma_if_ctrl);
755
756 /* Restart TX/RX to CPU port, enable CRC checking */
757 sw_w32_mask(0x0, 0x3 | BIT(3), priv->r->mac_port_ctrl(priv->cpu_port));
758
759 /* CPU port joins Lookup Miss Flooding Portmask */
760 // TODO: The code below should also work for the RTL838x
761 sw_w32(0x28000, RTL839X_TBL_ACCESS_L2_CTRL);
762 sw_w32_mask(0, 0x80000000, RTL839X_TBL_ACCESS_L2_DATA(0));
763 sw_w32(0x38000, RTL839X_TBL_ACCESS_L2_CTRL);
764
765 /* Force CPU port link up */
766 sw_w32_mask(0, 3, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
767 }
768
769 static void rtl93xx_hw_en_rxtx(struct rtl838x_eth_priv *priv)
770 {
771 int i, pos;
772 u32 v;
773
774 /* Setup CPU-Port: RX Buffer truncated at 1600 Bytes */
775 sw_w32(0x06400040, priv->r->dma_if_ctrl);
776
777 for (i = 0; i < priv->rxrings; i++) {
778 pos = (i % 3) * 10;
779 sw_w32_mask(0x3ff << pos, priv->rxringlen << pos, priv->r->dma_if_rx_ring_size(i));
780
781 // Some SoCs have issues with missing underflow protection
782 v = (sw_r32(priv->r->dma_if_rx_ring_cntr(i)) >> pos) & 0x3ff;
783 sw_w32_mask(0x3ff << pos, v, priv->r->dma_if_rx_ring_cntr(i));
784 }
785
786 /* Enable Notify, RX done, RX overflow and TX done interrupts */
787 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_runout_msk);
788 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_msk);
789 sw_w32(0x0000000f, priv->r->dma_if_intr_tx_done_msk);
790
791 /* Enable DMA */
792 sw_w32_mask(0, RX_EN_93XX | TX_EN_93XX, priv->r->dma_if_ctrl);
793
794 /* Restart TX/RX to CPU port, enable CRC checking */
795 sw_w32_mask(0x0, 0x3 | BIT(4), priv->r->mac_port_ctrl(priv->cpu_port));
796
797 sw_w32_mask(0, BIT(priv->cpu_port), RTL930X_L2_UNKN_UC_FLD_PMSK);
798 sw_w32(0x217, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
799 }
800
801 static void rtl838x_setup_ring_buffer(struct rtl838x_eth_priv *priv, struct ring_b *ring)
802 {
803 int i, j;
804
805 struct p_hdr *h;
806
807 for (i = 0; i < priv->rxrings; i++) {
808 for (j = 0; j < priv->rxringlen; j++) {
809 h = &ring->rx_header[i][j];
810 memset(h, 0, sizeof(struct p_hdr));
811 h->buf = (u8 *)KSEG1ADDR(ring->rx_space
812 + i * priv->rxringlen * RING_BUFFER
813 + j * RING_BUFFER);
814 h->size = RING_BUFFER;
815 /* All rings owned by switch, last one wraps */
816 ring->rx_r[i][j] = KSEG1ADDR(h) | 1
817 | (j == (priv->rxringlen - 1) ? WRAP : 0);
818 }
819 ring->c_rx[i] = 0;
820 }
821
822 for (i = 0; i < TXRINGS; i++) {
823 for (j = 0; j < TXRINGLEN; j++) {
824 h = &ring->tx_header[i][j];
825 memset(h, 0, sizeof(struct p_hdr));
826 h->buf = (u8 *)KSEG1ADDR(ring->tx_space
827 + i * TXRINGLEN * RING_BUFFER
828 + j * RING_BUFFER);
829 h->size = RING_BUFFER;
830 ring->tx_r[i][j] = KSEG1ADDR(&ring->tx_header[i][j]);
831 }
832 /* Last header is wrapping around */
833 ring->tx_r[i][j-1] |= WRAP;
834 ring->c_tx[i] = 0;
835 }
836 }
837
838 static void rtl839x_setup_notify_ring_buffer(struct rtl838x_eth_priv *priv)
839 {
840 int i;
841 struct notify_b *b = priv->membase + sizeof(struct ring_b);
842
843 for (i = 0; i < NOTIFY_BLOCKS; i++)
844 b->ring[i] = KSEG1ADDR(&b->blocks[i]) | 1 | (i == (NOTIFY_BLOCKS - 1) ? WRAP : 0);
845
846 sw_w32((u32) b->ring, RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL);
847 sw_w32_mask(0x3ff << 2, 100 << 2, RTL839X_L2_NOTIFICATION_CTRL);
848
849 /* Setup notification events */
850 sw_w32_mask(0, 1 << 14, RTL839X_L2_CTRL_0); // RTL8390_L2_CTRL_0_FLUSH_NOTIFY_EN
851 sw_w32_mask(0, 1 << 12, RTL839X_L2_NOTIFICATION_CTRL); // SUSPEND_NOTIFICATION_EN
852
853 /* Enable Notification */
854 sw_w32_mask(0, 1 << 0, RTL839X_L2_NOTIFICATION_CTRL);
855 priv->lastEvent = 0;
856 }
857
858 static int rtl838x_eth_open(struct net_device *ndev)
859 {
860 unsigned long flags;
861 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
862 struct ring_b *ring = priv->membase;
863 int i, err;
864
865 pr_debug("%s called: RX rings %d(length %d), TX rings %d(length %d)\n",
866 __func__, priv->rxrings, priv->rxringlen, TXRINGS, TXRINGLEN);
867
868 spin_lock_irqsave(&priv->lock, flags);
869 rtl838x_hw_reset(priv);
870 rtl838x_setup_ring_buffer(priv, ring);
871 if (priv->family_id == RTL8390_FAMILY_ID) {
872 rtl839x_setup_notify_ring_buffer(priv);
873 /* Make sure the ring structure is visible to the ASIC */
874 mb();
875 flush_cache_all();
876 }
877
878 rtl838x_hw_ring_setup(priv);
879 err = request_irq(ndev->irq, priv->r->net_irq, IRQF_SHARED, ndev->name, ndev);
880 if (err) {
881 netdev_err(ndev, "%s: could not acquire interrupt: %d\n",
882 __func__, err);
883 return err;
884 }
885 phylink_start(priv->phylink);
886
887 for (i = 0; i < priv->rxrings; i++)
888 napi_enable(&priv->rx_qs[i].napi);
889
890 switch (priv->family_id) {
891 case RTL8380_FAMILY_ID:
892 rtl838x_hw_en_rxtx(priv);
893 /* Trap IGMP/MLD traffic to CPU-Port */
894 sw_w32(0x3, RTL838X_SPCL_TRAP_IGMP_CTRL);
895 /* Flush learned FDB entries on link down of a port */
896 sw_w32_mask(0, BIT(7), RTL838X_L2_CTRL_0);
897 break;
898
899 case RTL8390_FAMILY_ID:
900 rtl839x_hw_en_rxtx(priv);
901 // Trap MLD and IGMP messages to CPU_PORT
902 sw_w32(0x3, RTL839X_SPCL_TRAP_IGMP_CTRL);
903 /* Flush learned FDB entries on link down of a port */
904 sw_w32_mask(0, BIT(7), RTL839X_L2_CTRL_0);
905 break;
906
907 case RTL9300_FAMILY_ID:
908 rtl93xx_hw_en_rxtx(priv);
909 /* Flush learned FDB entries on link down of a port */
910 sw_w32_mask(0, BIT(7), RTL930X_L2_CTRL);
911 // Trap MLD and IGMP messages to CPU_PORT
912 sw_w32((0x2 << 3) | 0x2, RTL930X_VLAN_APP_PKT_CTRL);
913 break;
914
915 case RTL9310_FAMILY_ID:
916 rtl93xx_hw_en_rxtx(priv);
917 break;
918 }
919
920 netif_tx_start_all_queues(ndev);
921
922 spin_unlock_irqrestore(&priv->lock, flags);
923
924 return 0;
925 }
926
927 static void rtl838x_hw_stop(struct rtl838x_eth_priv *priv)
928 {
929 u32 force_mac = priv->family_id == RTL8380_FAMILY_ID ? 0x6192C : 0x75;
930 u32 clear_irq = priv->family_id == RTL8380_FAMILY_ID ? 0x000fffff : 0x007fffff;
931 int i;
932
933 // Disable RX/TX from/to CPU-port
934 sw_w32_mask(0x3, 0, priv->r->mac_port_ctrl(priv->cpu_port));
935
936 /* Disable traffic */
937 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID)
938 sw_w32_mask(RX_EN_93XX | TX_EN_93XX, 0, priv->r->dma_if_ctrl);
939 else
940 sw_w32_mask(RX_EN | TX_EN, 0, priv->r->dma_if_ctrl);
941 mdelay(200); // Test, whether this is needed
942
943 /* Block all ports */
944 if (priv->family_id == RTL8380_FAMILY_ID) {
945 sw_w32(0x03000000, RTL838X_TBL_ACCESS_DATA_0(0));
946 sw_w32(0x00000000, RTL838X_TBL_ACCESS_DATA_0(1));
947 sw_w32(1 << 15 | 2 << 12, RTL838X_TBL_ACCESS_CTRL_0);
948 }
949
950 /* Flush L2 address cache */
951 if (priv->family_id == RTL8380_FAMILY_ID) {
952 for (i = 0; i <= priv->cpu_port; i++) {
953 sw_w32(1 << 26 | 1 << 23 | i << 5, priv->r->l2_tbl_flush_ctrl);
954 do { } while (sw_r32(priv->r->l2_tbl_flush_ctrl) & (1 << 26));
955 }
956 } else if (priv->family_id == RTL8390_FAMILY_ID) {
957 for (i = 0; i <= priv->cpu_port; i++) {
958 sw_w32(1 << 28 | 1 << 25 | i << 5, priv->r->l2_tbl_flush_ctrl);
959 do { } while (sw_r32(priv->r->l2_tbl_flush_ctrl) & (1 << 28));
960 }
961 }
962 // TODO: L2 flush register is 64 bit on RTL931X and 930X
963
964 /* CPU-Port: Link down */
965 if (priv->family_id == RTL8380_FAMILY_ID || priv->family_id == RTL8390_FAMILY_ID)
966 sw_w32(force_mac, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
967 else
968 sw_w32_mask(0x3, 0, priv->r->mac_force_mode_ctrl + priv->cpu_port *4);
969 mdelay(100);
970
971 /* Disable all TX/RX interrupts */
972 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID) {
973 sw_w32(0x00000000, priv->r->dma_if_intr_rx_runout_msk);
974 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_runout_sts);
975 sw_w32(0x00000000, priv->r->dma_if_intr_rx_done_msk);
976 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_sts);
977 sw_w32(0x00000000, priv->r->dma_if_intr_tx_done_msk);
978 sw_w32(0x0000000f, priv->r->dma_if_intr_tx_done_sts);
979 } else {
980 sw_w32(0x00000000, priv->r->dma_if_intr_msk);
981 sw_w32(clear_irq, priv->r->dma_if_intr_sts);
982 }
983
984 /* Disable TX/RX DMA */
985 sw_w32(0x00000000, priv->r->dma_if_ctrl);
986 mdelay(200);
987 }
988
989 static int rtl838x_eth_stop(struct net_device *ndev)
990 {
991 unsigned long flags;
992 int i;
993 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
994
995 pr_info("in %s\n", __func__);
996
997 spin_lock_irqsave(&priv->lock, flags);
998 phylink_stop(priv->phylink);
999 rtl838x_hw_stop(priv);
1000 free_irq(ndev->irq, ndev);
1001
1002 for (i = 0; i < priv->rxrings; i++)
1003 napi_disable(&priv->rx_qs[i].napi);
1004
1005 netif_tx_stop_all_queues(ndev);
1006
1007 spin_unlock_irqrestore(&priv->lock, flags);
1008
1009 return 0;
1010 }
1011
1012 static void rtl839x_eth_set_multicast_list(struct net_device *ndev)
1013 {
1014 if (!(ndev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
1015 sw_w32(0x0, RTL839X_RMA_CTRL_0);
1016 sw_w32(0x0, RTL839X_RMA_CTRL_1);
1017 sw_w32(0x0, RTL839X_RMA_CTRL_2);
1018 sw_w32(0x0, RTL839X_RMA_CTRL_3);
1019 }
1020 if (ndev->flags & IFF_ALLMULTI) {
1021 sw_w32(0x7fffffff, RTL839X_RMA_CTRL_0);
1022 sw_w32(0x7fffffff, RTL839X_RMA_CTRL_1);
1023 sw_w32(0x7fffffff, RTL839X_RMA_CTRL_2);
1024 }
1025 if (ndev->flags & IFF_PROMISC) {
1026 sw_w32(0x7fffffff, RTL839X_RMA_CTRL_0);
1027 sw_w32(0x7fffffff, RTL839X_RMA_CTRL_1);
1028 sw_w32(0x7fffffff, RTL839X_RMA_CTRL_2);
1029 sw_w32(0x3ff, RTL839X_RMA_CTRL_3);
1030 }
1031 }
1032
1033 static void rtl838x_eth_set_multicast_list(struct net_device *ndev)
1034 {
1035 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
1036
1037 if (priv->family_id == RTL8390_FAMILY_ID)
1038 return rtl839x_eth_set_multicast_list(ndev);
1039
1040 if (!(ndev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
1041 sw_w32(0x0, RTL838X_RMA_CTRL_0);
1042 sw_w32(0x0, RTL838X_RMA_CTRL_1);
1043 }
1044 if (ndev->flags & IFF_ALLMULTI)
1045 sw_w32(0x1fffff, RTL838X_RMA_CTRL_0);
1046 if (ndev->flags & IFF_PROMISC) {
1047 sw_w32(0x1fffff, RTL838X_RMA_CTRL_0);
1048 sw_w32(0x7fff, RTL838X_RMA_CTRL_1);
1049 }
1050 }
1051
1052 static void rtl930x_eth_set_multicast_list(struct net_device *ndev)
1053 {
1054 if (!(ndev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
1055 sw_w32(0x0, RTL930X_RMA_CTRL_0);
1056 sw_w32(0x0, RTL930X_RMA_CTRL_1);
1057 sw_w32(0x0, RTL930X_RMA_CTRL_2);
1058 }
1059 if (ndev->flags & IFF_ALLMULTI) {
1060 sw_w32(0x7fffffff, RTL930X_RMA_CTRL_0);
1061 sw_w32(0x7fffffff, RTL930X_RMA_CTRL_1);
1062 sw_w32(0x7fffffff, RTL930X_RMA_CTRL_2);
1063 }
1064 if (ndev->flags & IFF_PROMISC) {
1065 sw_w32(0x7fffffff, RTL930X_RMA_CTRL_0);
1066 sw_w32(0x7fffffff, RTL930X_RMA_CTRL_1);
1067 sw_w32(0x7fffffff, RTL930X_RMA_CTRL_2);
1068 }
1069 }
1070
1071 static void rtl931x_eth_set_multicast_list(struct net_device *ndev)
1072 {
1073 if (!(ndev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
1074 sw_w32(0x0, RTL931X_RMA_CTRL_0);
1075 sw_w32(0x0, RTL931X_RMA_CTRL_1);
1076 sw_w32(0x0, RTL931X_RMA_CTRL_2);
1077 }
1078 if (ndev->flags & IFF_ALLMULTI) {
1079 sw_w32(0x7fffffff, RTL931X_RMA_CTRL_0);
1080 sw_w32(0x7fffffff, RTL931X_RMA_CTRL_1);
1081 sw_w32(0x7fffffff, RTL931X_RMA_CTRL_2);
1082 }
1083 if (ndev->flags & IFF_PROMISC) {
1084 sw_w32(0x7fffffff, RTL931X_RMA_CTRL_0);
1085 sw_w32(0x7fffffff, RTL931X_RMA_CTRL_1);
1086 sw_w32(0x7fffffff, RTL931X_RMA_CTRL_2);
1087 }
1088 }
1089
1090 static void rtl838x_eth_tx_timeout(struct net_device *ndev, unsigned int txqueue)
1091 {
1092 unsigned long flags;
1093 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
1094
1095 pr_warn("%s\n", __func__);
1096 spin_lock_irqsave(&priv->lock, flags);
1097 rtl838x_hw_stop(priv);
1098 rtl838x_hw_ring_setup(priv);
1099 rtl838x_hw_en_rxtx(priv);
1100 netif_trans_update(ndev);
1101 netif_start_queue(ndev);
1102 spin_unlock_irqrestore(&priv->lock, flags);
1103 }
1104
1105 static int rtl838x_eth_tx(struct sk_buff *skb, struct net_device *dev)
1106 {
1107 int len, i;
1108 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1109 struct ring_b *ring = priv->membase;
1110 uint32_t val;
1111 int ret;
1112 unsigned long flags;
1113 struct p_hdr *h;
1114 int dest_port = -1;
1115 int q = skb_get_queue_mapping(skb) % TXRINGS;
1116
1117 if (q) // Check for high prio queue
1118 pr_debug("SKB priority: %d\n", skb->priority);
1119
1120 spin_lock_irqsave(&priv->lock, flags);
1121 len = skb->len;
1122
1123 /* Check for DSA tagging at the end of the buffer */
1124 if (netdev_uses_dsa(dev) && skb->data[len-4] == 0x80 && skb->data[len-3] > 0
1125 && skb->data[len-3] < priv->cpu_port && skb->data[len-2] == 0x10
1126 && skb->data[len-1] == 0x00) {
1127 /* Reuse tag space for CRC if possible */
1128 dest_port = skb->data[len-3];
1129 skb->data[len-4] = skb->data[len-3] = skb->data[len-2] = skb->data[len-1] = 0x00;
1130 len -= 4;
1131 }
1132
1133 len += 4; // Add space for CRC
1134
1135 if (skb_padto(skb, len)) {
1136 ret = NETDEV_TX_OK;
1137 goto txdone;
1138 }
1139
1140 /* We can send this packet if CPU owns the descriptor */
1141 if (!(ring->tx_r[q][ring->c_tx[q]] & 0x1)) {
1142
1143 /* Set descriptor for tx */
1144 h = &ring->tx_header[q][ring->c_tx[q]];
1145 h->size = len;
1146 h->len = len;
1147 // On RTL8380 SoCs, small packet lengths being sent need adjustments
1148 if (priv->family_id == RTL8380_FAMILY_ID) {
1149 if (len < ETH_ZLEN - 4)
1150 h->len -= 4;
1151 }
1152
1153 priv->r->create_tx_header(h, dest_port, skb->priority >> 1);
1154
1155 /* Copy packet data to tx buffer */
1156 memcpy((void *)KSEG1ADDR(h->buf), skb->data, len);
1157 /* Make sure packet data is visible to ASIC */
1158 wmb();
1159
1160 /* Hand over to switch */
1161 ring->tx_r[q][ring->c_tx[q]] |= 1;
1162
1163 // Before starting TX, prevent a Lextra bus bug on RTL8380 SoCs
1164 if (priv->family_id == RTL8380_FAMILY_ID) {
1165 for (i = 0; i < 10; i++) {
1166 val = sw_r32(priv->r->dma_if_ctrl);
1167 if ((val & 0xc) == 0xc)
1168 break;
1169 }
1170 }
1171
1172 /* Tell switch to send data */
1173 if (priv->family_id == RTL9310_FAMILY_ID
1174 || priv->family_id == RTL9300_FAMILY_ID) {
1175 // Ring ID q == 0: Low priority, Ring ID = 1: High prio queue
1176 if (!q)
1177 sw_w32_mask(0, BIT(2), priv->r->dma_if_ctrl);
1178 else
1179 sw_w32_mask(0, BIT(3), priv->r->dma_if_ctrl);
1180 } else {
1181 sw_w32_mask(0, TX_DO, priv->r->dma_if_ctrl);
1182 }
1183
1184 dev->stats.tx_packets++;
1185 dev->stats.tx_bytes += len;
1186 dev_kfree_skb(skb);
1187 ring->c_tx[q] = (ring->c_tx[q] + 1) % TXRINGLEN;
1188 ret = NETDEV_TX_OK;
1189 } else {
1190 dev_warn(&priv->pdev->dev, "Data is owned by switch\n");
1191 ret = NETDEV_TX_BUSY;
1192 }
1193 txdone:
1194 spin_unlock_irqrestore(&priv->lock, flags);
1195 return ret;
1196 }
1197
1198 /*
1199 * Return queue number for TX. On the RTL83XX, these queues have equal priority
1200 * so we do round-robin
1201 */
1202 u16 rtl83xx_pick_tx_queue(struct net_device *dev, struct sk_buff *skb,
1203 struct net_device *sb_dev)
1204 {
1205 static u8 last = 0;
1206
1207 last++;
1208 return last % TXRINGS;
1209 }
1210
1211 /*
1212 * Return queue number for TX. On the RTL93XX, queue 1 is the high priority queue
1213 */
1214 u16 rtl93xx_pick_tx_queue(struct net_device *dev, struct sk_buff *skb,
1215 struct net_device *sb_dev)
1216 {
1217 if (skb->priority >= TC_PRIO_CONTROL)
1218 return 1;
1219 return 0;
1220 }
1221
1222 static int rtl838x_hw_receive(struct net_device *dev, int r, int budget)
1223 {
1224 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1225 struct ring_b *ring = priv->membase;
1226 struct sk_buff *skb;
1227 unsigned long flags;
1228 int i, len, work_done = 0;
1229 u8 *data, *skb_data;
1230 unsigned int val;
1231 u32 *last;
1232 struct p_hdr *h;
1233 bool dsa = netdev_uses_dsa(dev);
1234 struct dsa_tag tag;
1235
1236 spin_lock_irqsave(&priv->lock, flags);
1237 last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur + r * 4));
1238 pr_debug("---------------------------------------------------------- RX - %d\n", r);
1239
1240 do {
1241 if ((ring->rx_r[r][ring->c_rx[r]] & 0x1)) {
1242 if (&ring->rx_r[r][ring->c_rx[r]] != last) {
1243 netdev_warn(dev, "Ring contention: r: %x, last %x, cur %x\n",
1244 r, (uint32_t)last, (u32) &ring->rx_r[r][ring->c_rx[r]]);
1245 }
1246 break;
1247 }
1248
1249 h = &ring->rx_header[r][ring->c_rx[r]];
1250 data = (u8 *)KSEG1ADDR(h->buf);
1251 len = h->len;
1252 if (!len)
1253 break;
1254 work_done++;
1255
1256 len -= 4; /* strip the CRC */
1257 /* Add 4 bytes for cpu_tag */
1258 if (dsa)
1259 len += 4;
1260
1261 skb = alloc_skb(len + 4, GFP_KERNEL);
1262 skb_reserve(skb, NET_IP_ALIGN);
1263
1264 if (likely(skb)) {
1265 /* BUG: Prevent bug on RTL838x SoCs*/
1266 if (priv->family_id == RTL8380_FAMILY_ID) {
1267 sw_w32(0xffffffff, priv->r->dma_if_rx_ring_size(0));
1268 for (i = 0; i < priv->rxrings; i++) {
1269 /* Update each ring cnt */
1270 val = sw_r32(priv->r->dma_if_rx_ring_cntr(i));
1271 sw_w32(val, priv->r->dma_if_rx_ring_cntr(i));
1272 }
1273 }
1274
1275 skb_data = skb_put(skb, len);
1276 /* Make sure data is visible */
1277 mb();
1278 memcpy(skb->data, (u8 *)KSEG1ADDR(data), len);
1279 /* Overwrite CRC with cpu_tag */
1280 if (dsa) {
1281 priv->r->decode_tag(h, &tag);
1282 skb->data[len-4] = 0x80;
1283 skb->data[len-3] = tag.port;
1284 skb->data[len-2] = 0x10;
1285 skb->data[len-1] = 0x00;
1286 if (tag.l2_offloaded)
1287 skb->data[len-3] |= 0x40;
1288 }
1289
1290 if (tag.queue >= 0)
1291 pr_debug("Queue: %d, len: %d, reason %d port %d\n",
1292 tag.queue, len, tag.reason, tag.port);
1293
1294 skb->protocol = eth_type_trans(skb, dev);
1295 if (dev->features & NETIF_F_RXCSUM) {
1296 if (tag.crc_error)
1297 skb_checksum_none_assert(skb);
1298 else
1299 skb->ip_summed = CHECKSUM_UNNECESSARY;
1300 }
1301 dev->stats.rx_packets++;
1302 dev->stats.rx_bytes += len;
1303
1304 netif_receive_skb(skb);
1305 } else {
1306 if (net_ratelimit())
1307 dev_warn(&dev->dev, "low on memory - packet dropped\n");
1308 dev->stats.rx_dropped++;
1309 }
1310
1311 /* Reset header structure */
1312 memset(h, 0, sizeof(struct p_hdr));
1313 h->buf = data;
1314 h->size = RING_BUFFER;
1315
1316 ring->rx_r[r][ring->c_rx[r]] = KSEG1ADDR(h) | 0x1
1317 | (ring->c_rx[r] == (priv->rxringlen - 1) ? WRAP : 0x1);
1318 ring->c_rx[r] = (ring->c_rx[r] + 1) % priv->rxringlen;
1319 last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur + r * 4));
1320 } while (&ring->rx_r[r][ring->c_rx[r]] != last && work_done < budget);
1321
1322 // Update counters
1323 priv->r->update_cntr(r, 0);
1324
1325 spin_unlock_irqrestore(&priv->lock, flags);
1326 return work_done;
1327 }
1328
1329 static int rtl838x_poll_rx(struct napi_struct *napi, int budget)
1330 {
1331 struct rtl838x_rx_q *rx_q = container_of(napi, struct rtl838x_rx_q, napi);
1332 struct rtl838x_eth_priv *priv = rx_q->priv;
1333 int work_done = 0;
1334 int r = rx_q->id;
1335 int work;
1336
1337 while (work_done < budget) {
1338 work = rtl838x_hw_receive(priv->netdev, r, budget - work_done);
1339 if (!work)
1340 break;
1341 work_done += work;
1342 }
1343
1344 if (work_done < budget) {
1345 napi_complete_done(napi, work_done);
1346
1347 /* Enable RX interrupt */
1348 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID)
1349 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_msk);
1350 else
1351 sw_w32_mask(0, 0xf00ff | BIT(r + 8), priv->r->dma_if_intr_msk);
1352 }
1353 return work_done;
1354 }
1355
1356
1357 static void rtl838x_validate(struct phylink_config *config,
1358 unsigned long *supported,
1359 struct phylink_link_state *state)
1360 {
1361 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1362
1363 pr_debug("In %s\n", __func__);
1364
1365 if (!phy_interface_mode_is_rgmii(state->interface) &&
1366 state->interface != PHY_INTERFACE_MODE_1000BASEX &&
1367 state->interface != PHY_INTERFACE_MODE_MII &&
1368 state->interface != PHY_INTERFACE_MODE_REVMII &&
1369 state->interface != PHY_INTERFACE_MODE_GMII &&
1370 state->interface != PHY_INTERFACE_MODE_QSGMII &&
1371 state->interface != PHY_INTERFACE_MODE_INTERNAL &&
1372 state->interface != PHY_INTERFACE_MODE_SGMII) {
1373 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
1374 pr_err("Unsupported interface: %d\n", state->interface);
1375 return;
1376 }
1377
1378 /* Allow all the expected bits */
1379 phylink_set(mask, Autoneg);
1380 phylink_set_port_modes(mask);
1381 phylink_set(mask, Pause);
1382 phylink_set(mask, Asym_Pause);
1383
1384 /* With the exclusion of MII and Reverse MII, we support Gigabit,
1385 * including Half duplex
1386 */
1387 if (state->interface != PHY_INTERFACE_MODE_MII &&
1388 state->interface != PHY_INTERFACE_MODE_REVMII) {
1389 phylink_set(mask, 1000baseT_Full);
1390 phylink_set(mask, 1000baseT_Half);
1391 }
1392
1393 phylink_set(mask, 10baseT_Half);
1394 phylink_set(mask, 10baseT_Full);
1395 phylink_set(mask, 100baseT_Half);
1396 phylink_set(mask, 100baseT_Full);
1397
1398 bitmap_and(supported, supported, mask,
1399 __ETHTOOL_LINK_MODE_MASK_NBITS);
1400 bitmap_and(state->advertising, state->advertising, mask,
1401 __ETHTOOL_LINK_MODE_MASK_NBITS);
1402 }
1403
1404
1405 static void rtl838x_mac_config(struct phylink_config *config,
1406 unsigned int mode,
1407 const struct phylink_link_state *state)
1408 {
1409 /* This is only being called for the master device,
1410 * i.e. the CPU-Port. We don't need to do anything.
1411 */
1412
1413 pr_info("In %s, mode %x\n", __func__, mode);
1414 }
1415
1416 static void rtl838x_mac_an_restart(struct phylink_config *config)
1417 {
1418 struct net_device *dev = container_of(config->dev, struct net_device, dev);
1419 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1420
1421 /* This works only on RTL838x chips */
1422 if (priv->family_id != RTL8380_FAMILY_ID)
1423 return;
1424
1425 pr_debug("In %s\n", __func__);
1426 /* Restart by disabling and re-enabling link */
1427 sw_w32(0x6192D, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
1428 mdelay(20);
1429 sw_w32(0x6192F, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
1430 }
1431
1432 static void rtl838x_mac_pcs_get_state(struct phylink_config *config,
1433 struct phylink_link_state *state)
1434 {
1435 u32 speed;
1436 struct net_device *dev = container_of(config->dev, struct net_device, dev);
1437 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1438 int port = priv->cpu_port;
1439
1440 pr_debug("In %s\n", __func__);
1441
1442 state->link = priv->r->get_mac_link_sts(port) ? 1 : 0;
1443 state->duplex = priv->r->get_mac_link_dup_sts(port) ? 1 : 0;
1444
1445 speed = priv->r->get_mac_link_spd_sts(port);
1446 switch (speed) {
1447 case 0:
1448 state->speed = SPEED_10;
1449 break;
1450 case 1:
1451 state->speed = SPEED_100;
1452 break;
1453 case 2:
1454 state->speed = SPEED_1000;
1455 break;
1456 default:
1457 state->speed = SPEED_UNKNOWN;
1458 break;
1459 }
1460
1461 state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
1462 if (priv->r->get_mac_rx_pause_sts(port))
1463 state->pause |= MLO_PAUSE_RX;
1464 if (priv->r->get_mac_tx_pause_sts(port))
1465 state->pause |= MLO_PAUSE_TX;
1466 }
1467
1468 static void rtl838x_mac_link_down(struct phylink_config *config,
1469 unsigned int mode,
1470 phy_interface_t interface)
1471 {
1472 struct net_device *dev = container_of(config->dev, struct net_device, dev);
1473 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1474
1475 pr_debug("In %s\n", __func__);
1476 /* Stop TX/RX to port */
1477 sw_w32_mask(0x03, 0, priv->r->mac_port_ctrl(priv->cpu_port));
1478 }
1479
1480 static void rtl838x_mac_link_up(struct phylink_config *config,
1481 struct phy_device *phy, unsigned int mode,
1482 phy_interface_t interface, int speed, int duplex,
1483 bool tx_pause, bool rx_pause)
1484 {
1485 struct net_device *dev = container_of(config->dev, struct net_device, dev);
1486 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1487
1488 pr_debug("In %s\n", __func__);
1489 /* Restart TX/RX to port */
1490 sw_w32_mask(0, 0x03, priv->r->mac_port_ctrl(priv->cpu_port));
1491 }
1492
1493 static void rtl838x_set_mac_hw(struct net_device *dev, u8 *mac)
1494 {
1495 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1496 unsigned long flags;
1497
1498 spin_lock_irqsave(&priv->lock, flags);
1499 pr_debug("In %s\n", __func__);
1500 sw_w32((mac[0] << 8) | mac[1], priv->r->mac);
1501 sw_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5], priv->r->mac + 4);
1502
1503 if (priv->family_id == RTL8380_FAMILY_ID) {
1504 /* 2 more registers, ALE/MAC block */
1505 sw_w32((mac[0] << 8) | mac[1], RTL838X_MAC_ALE);
1506 sw_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
1507 (RTL838X_MAC_ALE + 4));
1508
1509 sw_w32((mac[0] << 8) | mac[1], RTL838X_MAC2);
1510 sw_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
1511 RTL838X_MAC2 + 4);
1512 }
1513 spin_unlock_irqrestore(&priv->lock, flags);
1514 }
1515
1516 static int rtl838x_set_mac_address(struct net_device *dev, void *p)
1517 {
1518 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1519 const struct sockaddr *addr = p;
1520 u8 *mac = (u8 *) (addr->sa_data);
1521
1522 if (!is_valid_ether_addr(addr->sa_data))
1523 return -EADDRNOTAVAIL;
1524
1525 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
1526 rtl838x_set_mac_hw(dev, mac);
1527
1528 pr_info("Using MAC %08x%08x\n", sw_r32(priv->r->mac), sw_r32(priv->r->mac + 4));
1529 return 0;
1530 }
1531
1532 static int rtl8390_init_mac(struct rtl838x_eth_priv *priv)
1533 {
1534 // We will need to set-up EEE and the egress-rate limitation
1535 return 0;
1536 }
1537
1538 static int rtl8380_init_mac(struct rtl838x_eth_priv *priv)
1539 {
1540 int i;
1541
1542 if (priv->family_id == 0x8390)
1543 return rtl8390_init_mac(priv);
1544
1545 pr_info("%s\n", __func__);
1546 /* fix timer for EEE */
1547 sw_w32(0x5001411, RTL838X_EEE_TX_TIMER_GIGA_CTRL);
1548 sw_w32(0x5001417, RTL838X_EEE_TX_TIMER_GELITE_CTRL);
1549
1550 /* Init VLAN */
1551 if (priv->id == 0x8382) {
1552 for (i = 0; i <= 28; i++)
1553 sw_w32(0, 0xd57c + i * 0x80);
1554 }
1555 if (priv->id == 0x8380) {
1556 for (i = 8; i <= 28; i++)
1557 sw_w32(0, 0xd57c + i * 0x80);
1558 }
1559 return 0;
1560 }
1561
1562 static int rtl838x_get_link_ksettings(struct net_device *ndev,
1563 struct ethtool_link_ksettings *cmd)
1564 {
1565 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
1566
1567 pr_debug("%s called\n", __func__);
1568 return phylink_ethtool_ksettings_get(priv->phylink, cmd);
1569 }
1570
1571 static int rtl838x_set_link_ksettings(struct net_device *ndev,
1572 const struct ethtool_link_ksettings *cmd)
1573 {
1574 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
1575
1576 pr_debug("%s called\n", __func__);
1577 return phylink_ethtool_ksettings_set(priv->phylink, cmd);
1578 }
1579
1580 static int rtl838x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1581 {
1582 u32 val;
1583 int err;
1584 struct rtl838x_eth_priv *priv = bus->priv;
1585
1586 if (mii_id >= 24 && mii_id <= 27 && priv->id == 0x8380)
1587 return rtl838x_read_sds_phy(mii_id, regnum);
1588 err = rtl838x_read_phy(mii_id, 0, regnum, &val);
1589 if (err)
1590 return err;
1591 return val;
1592 }
1593
1594 static int rtl839x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1595 {
1596 u32 val;
1597 int err;
1598 struct rtl838x_eth_priv *priv = bus->priv;
1599
1600 if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393)
1601 return rtl839x_read_sds_phy(mii_id, regnum);
1602
1603 err = rtl839x_read_phy(mii_id, 0, regnum, &val);
1604 if (err)
1605 return err;
1606 return val;
1607 }
1608
1609 static int rtl930x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1610 {
1611 u32 val;
1612 int err;
1613
1614 // TODO: These are hard-coded for the 2 Fibre Ports of the XGS1210
1615 if (mii_id >= 26 && mii_id <= 27)
1616 return rtl930x_read_sds_phy(mii_id - 18, 0, regnum);
1617
1618 if (regnum & MII_ADDR_C45) {
1619 regnum &= ~MII_ADDR_C45;
1620 err = rtl930x_read_mmd_phy(mii_id, regnum >> 16, regnum & 0xffff, &val);
1621 } else {
1622 err = rtl930x_read_phy(mii_id, 0, regnum, &val);
1623 }
1624 if (err)
1625 return err;
1626 return val;
1627 }
1628
1629 static int rtl931x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1630 {
1631 u32 val;
1632 int err;
1633 // struct rtl838x_eth_priv *priv = bus->priv;
1634
1635 // if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393)
1636 // return rtl839x_read_sds_phy(mii_id, regnum);
1637
1638 err = rtl931x_read_phy(mii_id, 0, regnum, &val);
1639 if (err)
1640 return err;
1641 return val;
1642 }
1643
1644 static int rtl838x_mdio_write(struct mii_bus *bus, int mii_id,
1645 int regnum, u16 value)
1646 {
1647 u32 offset = 0;
1648 struct rtl838x_eth_priv *priv = bus->priv;
1649
1650 if (mii_id >= 24 && mii_id <= 27 && priv->id == 0x8380) {
1651 if (mii_id == 26)
1652 offset = 0x100;
1653 sw_w32(value, RTL838X_SDS4_FIB_REG0 + offset + (regnum << 2));
1654 return 0;
1655 }
1656 return rtl838x_write_phy(mii_id, 0, regnum, value);
1657 }
1658
1659 static int rtl839x_mdio_write(struct mii_bus *bus, int mii_id,
1660 int regnum, u16 value)
1661 {
1662 struct rtl838x_eth_priv *priv = bus->priv;
1663
1664 if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393)
1665 return rtl839x_write_sds_phy(mii_id, regnum, value);
1666
1667 return rtl839x_write_phy(mii_id, 0, regnum, value);
1668 }
1669
1670 static int rtl930x_mdio_write(struct mii_bus *bus, int mii_id,
1671 int regnum, u16 value)
1672 {
1673 // struct rtl838x_eth_priv *priv = bus->priv;
1674
1675 // if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393)
1676 // return rtl839x_write_sds_phy(mii_id, regnum, value);
1677 if (regnum & MII_ADDR_C45) {
1678 regnum &= ~MII_ADDR_C45;
1679 return rtl930x_write_mmd_phy(mii_id, regnum >> 16, regnum & 0xffff, value);
1680 }
1681
1682 return rtl930x_write_phy(mii_id, 0, regnum, value);
1683 }
1684
1685 static int rtl931x_mdio_write(struct mii_bus *bus, int mii_id,
1686 int regnum, u16 value)
1687 {
1688 // struct rtl838x_eth_priv *priv = bus->priv;
1689
1690 // if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393)
1691 // return rtl839x_write_sds_phy(mii_id, regnum, value);
1692
1693 return rtl931x_write_phy(mii_id, 0, regnum, value);
1694 }
1695
1696 static int rtl838x_mdio_reset(struct mii_bus *bus)
1697 {
1698 pr_debug("%s called\n", __func__);
1699 /* Disable MAC polling the PHY so that we can start configuration */
1700 sw_w32(0x00000000, RTL838X_SMI_POLL_CTRL);
1701
1702 /* Enable PHY control via SoC */
1703 sw_w32_mask(0, 1 << 15, RTL838X_SMI_GLB_CTRL);
1704
1705 // Probably should reset all PHYs here...
1706 return 0;
1707 }
1708
1709 static int rtl839x_mdio_reset(struct mii_bus *bus)
1710 {
1711 return 0;
1712
1713 pr_debug("%s called\n", __func__);
1714 /* BUG: The following does not work, but should! */
1715 /* Disable MAC polling the PHY so that we can start configuration */
1716 sw_w32(0x00000000, RTL839X_SMI_PORT_POLLING_CTRL);
1717 sw_w32(0x00000000, RTL839X_SMI_PORT_POLLING_CTRL + 4);
1718 /* Disable PHY polling via SoC */
1719 sw_w32_mask(1 << 7, 0, RTL839X_SMI_GLB_CTRL);
1720
1721 // Probably should reset all PHYs here...
1722 return 0;
1723 }
1724
1725 static int rtl931x_mdio_reset(struct mii_bus *bus)
1726 {
1727 sw_w32(0x00000000, RTL931X_SMI_PORT_POLLING_CTRL);
1728 sw_w32(0x00000000, RTL931X_SMI_PORT_POLLING_CTRL + 4);
1729
1730 pr_debug("%s called\n", __func__);
1731
1732 return 0;
1733 }
1734
1735 static int rtl930x_mdio_reset(struct mii_bus *bus)
1736 {
1737 int i;
1738 int pos;
1739 struct rtl838x_eth_priv *priv = bus->priv;
1740 u32 c45_mask = 0;
1741 u32 poll_sel[2];
1742 u32 poll_ctrl = 0;
1743
1744 // Mapping of port to phy-addresses on an SMI bus
1745 poll_sel[0] = poll_sel[1] = 0;
1746 for (i = 0; i < 28; i++) {
1747 pos = (i % 6) * 5;
1748 sw_w32_mask(0x1f << pos, priv->smi_addr[i] << pos,
1749 RTL930X_SMI_PORT0_5_ADDR + (i / 6) * 4);
1750
1751 pos = (i * 2) % 32;
1752 poll_sel[i / 16] |= priv->smi_bus[i] << pos;
1753 poll_ctrl |= BIT(20 + priv->smi_bus[i]);
1754 }
1755
1756 // Configure which SMI bus is behind which port number
1757 sw_w32(poll_sel[0], RTL930X_SMI_PORT0_15_POLLING_SEL);
1758 sw_w32(poll_sel[1], RTL930X_SMI_PORT16_27_POLLING_SEL);
1759
1760 // Enable polling on the respective SMI busses
1761 sw_w32_mask(0, poll_ctrl, RTL930X_SMI_GLB_CTRL);
1762
1763 // Configure which SMI busses are polled in c45 based on a c45 PHY being on that bus
1764 for (i = 0; i < 4; i++)
1765 if (priv->smi_bus_isc45[i])
1766 c45_mask |= BIT(i + 16);
1767
1768 pr_info("c45_mask: %08x\n", c45_mask);
1769 sw_w32_mask(0, c45_mask, RTL930X_SMI_GLB_CTRL);
1770
1771 // Ports 24 to 27 are 2.5 or 10Gig, set this type (1) or (0) for internal SerDes
1772 for (i = 24; i < 28; i++) {
1773 pos = (i - 24) * 3 + 12;
1774 if (priv->phy_is_internal[i])
1775 sw_w32_mask(0x7 << pos, 0 << pos, RTL930X_SMI_MAC_TYPE_CTRL);
1776 else
1777 sw_w32_mask(0x7 << pos, 1 << pos, RTL930X_SMI_MAC_TYPE_CTRL);
1778 }
1779
1780 // TODO: Set up RTL9300_SMI_10GPHY_POLLING_SEL_0 for Aquantia PHYs on e.g. XGS 1250
1781
1782 return 0;
1783 }
1784
1785 static int rtl838x_mdio_init(struct rtl838x_eth_priv *priv)
1786 {
1787 struct device_node *mii_np, *dn;
1788 u32 pn;
1789 int ret;
1790
1791 pr_debug("%s called\n", __func__);
1792 mii_np = of_get_child_by_name(priv->pdev->dev.of_node, "mdio-bus");
1793
1794 if (!mii_np) {
1795 dev_err(&priv->pdev->dev, "no %s child node found", "mdio-bus");
1796 return -ENODEV;
1797 }
1798
1799 if (!of_device_is_available(mii_np)) {
1800 ret = -ENODEV;
1801 goto err_put_node;
1802 }
1803
1804 priv->mii_bus = devm_mdiobus_alloc(&priv->pdev->dev);
1805 if (!priv->mii_bus) {
1806 ret = -ENOMEM;
1807 goto err_put_node;
1808 }
1809
1810 switch(priv->family_id) {
1811 case RTL8380_FAMILY_ID:
1812 priv->mii_bus->name = "rtl838x-eth-mdio";
1813 priv->mii_bus->read = rtl838x_mdio_read;
1814 priv->mii_bus->write = rtl838x_mdio_write;
1815 priv->mii_bus->reset = rtl838x_mdio_reset;
1816 break;
1817 case RTL8390_FAMILY_ID:
1818 priv->mii_bus->name = "rtl839x-eth-mdio";
1819 priv->mii_bus->read = rtl839x_mdio_read;
1820 priv->mii_bus->write = rtl839x_mdio_write;
1821 priv->mii_bus->reset = rtl839x_mdio_reset;
1822 break;
1823 case RTL9300_FAMILY_ID:
1824 priv->mii_bus->name = "rtl930x-eth-mdio";
1825 priv->mii_bus->read = rtl930x_mdio_read;
1826 priv->mii_bus->write = rtl930x_mdio_write;
1827 priv->mii_bus->reset = rtl930x_mdio_reset;
1828 // priv->mii_bus->probe_capabilities = MDIOBUS_C22_C45; TODO for linux 5.9
1829 break;
1830 case RTL9310_FAMILY_ID:
1831 priv->mii_bus->name = "rtl931x-eth-mdio";
1832 priv->mii_bus->read = rtl931x_mdio_read;
1833 priv->mii_bus->write = rtl931x_mdio_write;
1834 priv->mii_bus->reset = rtl931x_mdio_reset;
1835 // priv->mii_bus->probe_capabilities = MDIOBUS_C22_C45; TODO for linux 5.9
1836 break;
1837 }
1838 priv->mii_bus->priv = priv;
1839 priv->mii_bus->parent = &priv->pdev->dev;
1840
1841 for_each_node_by_name(dn, "ethernet-phy") {
1842 u32 smi_addr[2];
1843
1844 if (of_property_read_u32(dn, "reg", &pn))
1845 continue;
1846
1847 if (of_property_read_u32_array(dn, "rtl9300,smi-address", &smi_addr[0], 2)) {
1848 smi_addr[0] = 0;
1849 smi_addr[1] = pn;
1850 }
1851
1852 if (pn < MAX_PORTS) {
1853 priv->smi_bus[pn] = smi_addr[0];
1854 priv->smi_addr[pn] = smi_addr[1];
1855 } else {
1856 pr_err("%s: illegal port number %d\n", __func__, pn);
1857 }
1858
1859 if (of_device_is_compatible(dn, "ethernet-phy-ieee802.3-c45"))
1860 priv->smi_bus_isc45[smi_addr[0]] = true;
1861
1862 if (of_property_read_bool(dn, "phy-is-integrated")) {
1863 priv->phy_is_internal[pn] = true;
1864 }
1865
1866 }
1867
1868 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
1869 ret = of_mdiobus_register(priv->mii_bus, mii_np);
1870
1871 err_put_node:
1872 of_node_put(mii_np);
1873 return ret;
1874 }
1875
1876 static int rtl838x_mdio_remove(struct rtl838x_eth_priv *priv)
1877 {
1878 pr_debug("%s called\n", __func__);
1879 if (!priv->mii_bus)
1880 return 0;
1881
1882 mdiobus_unregister(priv->mii_bus);
1883 mdiobus_free(priv->mii_bus);
1884
1885 return 0;
1886 }
1887
1888 static netdev_features_t rtl838x_fix_features(struct net_device *dev,
1889 netdev_features_t features)
1890 {
1891 return features;
1892 }
1893
1894 static int rtl83xx_set_features(struct net_device *dev, netdev_features_t features)
1895 {
1896 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1897
1898 if ((features ^ dev->features) & NETIF_F_RXCSUM) {
1899 if (!(features & NETIF_F_RXCSUM))
1900 sw_w32_mask(BIT(3), 0, priv->r->mac_port_ctrl(priv->cpu_port));
1901 else
1902 sw_w32_mask(0, BIT(4), priv->r->mac_port_ctrl(priv->cpu_port));
1903 }
1904
1905 return 0;
1906 }
1907
1908 static int rtl93xx_set_features(struct net_device *dev, netdev_features_t features)
1909 {
1910 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1911
1912 if ((features ^ dev->features) & NETIF_F_RXCSUM) {
1913 if (!(features & NETIF_F_RXCSUM))
1914 sw_w32_mask(BIT(4), 0, priv->r->mac_port_ctrl(priv->cpu_port));
1915 else
1916 sw_w32_mask(0, BIT(4), priv->r->mac_port_ctrl(priv->cpu_port));
1917 }
1918
1919 return 0;
1920 }
1921
1922 static const struct net_device_ops rtl838x_eth_netdev_ops = {
1923 .ndo_open = rtl838x_eth_open,
1924 .ndo_stop = rtl838x_eth_stop,
1925 .ndo_start_xmit = rtl838x_eth_tx,
1926 .ndo_select_queue = rtl83xx_pick_tx_queue,
1927 .ndo_set_mac_address = rtl838x_set_mac_address,
1928 .ndo_validate_addr = eth_validate_addr,
1929 .ndo_set_rx_mode = rtl838x_eth_set_multicast_list,
1930 .ndo_tx_timeout = rtl838x_eth_tx_timeout,
1931 .ndo_set_features = rtl83xx_set_features,
1932 .ndo_fix_features = rtl838x_fix_features,
1933 .ndo_setup_tc = rtl83xx_setup_tc,
1934 };
1935
1936 static const struct net_device_ops rtl839x_eth_netdev_ops = {
1937 .ndo_open = rtl838x_eth_open,
1938 .ndo_stop = rtl838x_eth_stop,
1939 .ndo_start_xmit = rtl838x_eth_tx,
1940 .ndo_select_queue = rtl83xx_pick_tx_queue,
1941 .ndo_set_mac_address = rtl838x_set_mac_address,
1942 .ndo_validate_addr = eth_validate_addr,
1943 .ndo_set_rx_mode = rtl839x_eth_set_multicast_list,
1944 .ndo_tx_timeout = rtl838x_eth_tx_timeout,
1945 .ndo_set_features = rtl83xx_set_features,
1946 .ndo_fix_features = rtl838x_fix_features,
1947 .ndo_setup_tc = rtl83xx_setup_tc,
1948 };
1949
1950 static const struct net_device_ops rtl930x_eth_netdev_ops = {
1951 .ndo_open = rtl838x_eth_open,
1952 .ndo_stop = rtl838x_eth_stop,
1953 .ndo_start_xmit = rtl838x_eth_tx,
1954 .ndo_select_queue = rtl93xx_pick_tx_queue,
1955 .ndo_set_mac_address = rtl838x_set_mac_address,
1956 .ndo_validate_addr = eth_validate_addr,
1957 .ndo_set_rx_mode = rtl930x_eth_set_multicast_list,
1958 .ndo_tx_timeout = rtl838x_eth_tx_timeout,
1959 .ndo_set_features = rtl93xx_set_features,
1960 .ndo_fix_features = rtl838x_fix_features,
1961 .ndo_setup_tc = rtl83xx_setup_tc,
1962 };
1963
1964 static const struct net_device_ops rtl931x_eth_netdev_ops = {
1965 .ndo_open = rtl838x_eth_open,
1966 .ndo_stop = rtl838x_eth_stop,
1967 .ndo_start_xmit = rtl838x_eth_tx,
1968 .ndo_select_queue = rtl93xx_pick_tx_queue,
1969 .ndo_set_mac_address = rtl838x_set_mac_address,
1970 .ndo_validate_addr = eth_validate_addr,
1971 .ndo_set_rx_mode = rtl931x_eth_set_multicast_list,
1972 .ndo_tx_timeout = rtl838x_eth_tx_timeout,
1973 .ndo_set_features = rtl93xx_set_features,
1974 .ndo_fix_features = rtl838x_fix_features,
1975 };
1976
1977 static const struct phylink_mac_ops rtl838x_phylink_ops = {
1978 .validate = rtl838x_validate,
1979 .mac_pcs_get_state = rtl838x_mac_pcs_get_state,
1980 .mac_an_restart = rtl838x_mac_an_restart,
1981 .mac_config = rtl838x_mac_config,
1982 .mac_link_down = rtl838x_mac_link_down,
1983 .mac_link_up = rtl838x_mac_link_up,
1984 };
1985
1986 static const struct ethtool_ops rtl838x_ethtool_ops = {
1987 .get_link_ksettings = rtl838x_get_link_ksettings,
1988 .set_link_ksettings = rtl838x_set_link_ksettings,
1989 };
1990
1991 static int __init rtl838x_eth_probe(struct platform_device *pdev)
1992 {
1993 struct net_device *dev;
1994 struct device_node *dn = pdev->dev.of_node;
1995 struct rtl838x_eth_priv *priv;
1996 struct resource *res, *mem;
1997 phy_interface_t phy_mode;
1998 struct phylink *phylink;
1999 int err = 0, i, rxrings, rxringlen;
2000 struct ring_b *ring;
2001
2002 pr_info("Probing RTL838X eth device pdev: %x, dev: %x\n",
2003 (u32)pdev, (u32)(&(pdev->dev)));
2004
2005 if (!dn) {
2006 dev_err(&pdev->dev, "No DT found\n");
2007 return -EINVAL;
2008 }
2009
2010 rxrings = (soc_info.family == RTL8380_FAMILY_ID
2011 || soc_info.family == RTL8390_FAMILY_ID) ? 8 : 32;
2012 rxrings = rxrings > MAX_RXRINGS ? MAX_RXRINGS : rxrings;
2013 rxringlen = MAX_ENTRIES / rxrings;
2014 rxringlen = rxringlen > MAX_RXLEN ? MAX_RXLEN : rxringlen;
2015
2016 dev = alloc_etherdev_mqs(sizeof(struct rtl838x_eth_priv), TXRINGS, rxrings);
2017 if (!dev) {
2018 err = -ENOMEM;
2019 goto err_free;
2020 }
2021 SET_NETDEV_DEV(dev, &pdev->dev);
2022 priv = netdev_priv(dev);
2023
2024 /* obtain buffer memory space */
2025 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2026 if (res) {
2027 mem = devm_request_mem_region(&pdev->dev, res->start,
2028 resource_size(res), res->name);
2029 if (!mem) {
2030 dev_err(&pdev->dev, "cannot request memory space\n");
2031 err = -ENXIO;
2032 goto err_free;
2033 }
2034
2035 dev->mem_start = mem->start;
2036 dev->mem_end = mem->end;
2037 } else {
2038 dev_err(&pdev->dev, "cannot request IO resource\n");
2039 err = -ENXIO;
2040 goto err_free;
2041 }
2042
2043 /* Allocate buffer memory */
2044 priv->membase = dmam_alloc_coherent(&pdev->dev, rxrings * rxringlen * RING_BUFFER
2045 + sizeof(struct ring_b) + sizeof(struct notify_b),
2046 (void *)&dev->mem_start, GFP_KERNEL);
2047 if (!priv->membase) {
2048 dev_err(&pdev->dev, "cannot allocate DMA buffer\n");
2049 err = -ENOMEM;
2050 goto err_free;
2051 }
2052
2053 // Allocate ring-buffer space at the end of the allocated memory
2054 ring = priv->membase;
2055 ring->rx_space = priv->membase + sizeof(struct ring_b) + sizeof(struct notify_b);
2056
2057 spin_lock_init(&priv->lock);
2058
2059 /* obtain device IRQ number */
2060 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2061 if (!res) {
2062 dev_err(&pdev->dev, "cannot obtain IRQ, using default 24\n");
2063 dev->irq = 24;
2064 } else {
2065 dev->irq = res->start;
2066 }
2067 dev->ethtool_ops = &rtl838x_ethtool_ops;
2068 dev->min_mtu = ETH_ZLEN;
2069 dev->max_mtu = 1536;
2070 dev->features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM;
2071 dev->hw_features = NETIF_F_RXCSUM;
2072
2073 priv->id = soc_info.id;
2074 priv->family_id = soc_info.family;
2075 if (priv->id) {
2076 pr_info("Found SoC ID: %4x: %s, family %x\n",
2077 priv->id, soc_info.name, priv->family_id);
2078 } else {
2079 pr_err("Unknown chip id (%04x)\n", priv->id);
2080 return -ENODEV;
2081 }
2082
2083 switch (priv->family_id) {
2084 case RTL8380_FAMILY_ID:
2085 priv->cpu_port = RTL838X_CPU_PORT;
2086 priv->r = &rtl838x_reg;
2087 dev->netdev_ops = &rtl838x_eth_netdev_ops;
2088 break;
2089 case RTL8390_FAMILY_ID:
2090 priv->cpu_port = RTL839X_CPU_PORT;
2091 priv->r = &rtl839x_reg;
2092 dev->netdev_ops = &rtl839x_eth_netdev_ops;
2093 break;
2094 case RTL9300_FAMILY_ID:
2095 priv->cpu_port = RTL930X_CPU_PORT;
2096 priv->r = &rtl930x_reg;
2097 dev->netdev_ops = &rtl930x_eth_netdev_ops;
2098 break;
2099 case RTL9310_FAMILY_ID:
2100 priv->cpu_port = RTL931X_CPU_PORT;
2101 priv->r = &rtl931x_reg;
2102 dev->netdev_ops = &rtl931x_eth_netdev_ops;
2103 break;
2104 default:
2105 pr_err("Unknown SoC family\n");
2106 return -ENODEV;
2107 }
2108 priv->rxringlen = rxringlen;
2109 priv->rxrings = rxrings;
2110
2111 rtl8380_init_mac(priv);
2112
2113 /* try to get mac address in the following order:
2114 * 1) from device tree data
2115 * 2) from internal registers set by bootloader
2116 */
2117 of_get_mac_address(pdev->dev.of_node, dev->dev_addr);
2118 if (is_valid_ether_addr(dev->dev_addr)) {
2119 rtl838x_set_mac_hw(dev, (u8 *)dev->dev_addr);
2120 } else {
2121 dev->dev_addr[0] = (sw_r32(priv->r->mac) >> 8) & 0xff;
2122 dev->dev_addr[1] = sw_r32(priv->r->mac) & 0xff;
2123 dev->dev_addr[2] = (sw_r32(priv->r->mac + 4) >> 24) & 0xff;
2124 dev->dev_addr[3] = (sw_r32(priv->r->mac + 4) >> 16) & 0xff;
2125 dev->dev_addr[4] = (sw_r32(priv->r->mac + 4) >> 8) & 0xff;
2126 dev->dev_addr[5] = sw_r32(priv->r->mac + 4) & 0xff;
2127 }
2128 /* if the address is invalid, use a random value */
2129 if (!is_valid_ether_addr(dev->dev_addr)) {
2130 struct sockaddr sa = { AF_UNSPEC };
2131
2132 netdev_warn(dev, "Invalid MAC address, using random\n");
2133 eth_hw_addr_random(dev);
2134 memcpy(sa.sa_data, dev->dev_addr, ETH_ALEN);
2135 if (rtl838x_set_mac_address(dev, &sa))
2136 netdev_warn(dev, "Failed to set MAC address.\n");
2137 }
2138 pr_info("Using MAC %08x%08x\n", sw_r32(priv->r->mac),
2139 sw_r32(priv->r->mac + 4));
2140 strcpy(dev->name, "eth%d");
2141 priv->pdev = pdev;
2142 priv->netdev = dev;
2143
2144 err = rtl838x_mdio_init(priv);
2145 if (err)
2146 goto err_free;
2147
2148 err = register_netdev(dev);
2149 if (err)
2150 goto err_free;
2151
2152 for (i = 0; i < priv->rxrings; i++) {
2153 priv->rx_qs[i].id = i;
2154 priv->rx_qs[i].priv = priv;
2155 netif_napi_add(dev, &priv->rx_qs[i].napi, rtl838x_poll_rx, 64);
2156 }
2157
2158 platform_set_drvdata(pdev, dev);
2159
2160 phy_mode = PHY_INTERFACE_MODE_NA;
2161 err = of_get_phy_mode(dn, &phy_mode);
2162 if (err < 0) {
2163 dev_err(&pdev->dev, "incorrect phy-mode\n");
2164 err = -EINVAL;
2165 goto err_free;
2166 }
2167 priv->phylink_config.dev = &dev->dev;
2168 priv->phylink_config.type = PHYLINK_NETDEV;
2169
2170 phylink = phylink_create(&priv->phylink_config, pdev->dev.fwnode,
2171 phy_mode, &rtl838x_phylink_ops);
2172 if (IS_ERR(phylink)) {
2173 err = PTR_ERR(phylink);
2174 goto err_free;
2175 }
2176 priv->phylink = phylink;
2177
2178 return 0;
2179
2180 err_free:
2181 pr_err("Error setting up netdev, freeing it again.\n");
2182 free_netdev(dev);
2183 return err;
2184 }
2185
2186 static int rtl838x_eth_remove(struct platform_device *pdev)
2187 {
2188 struct net_device *dev = platform_get_drvdata(pdev);
2189 struct rtl838x_eth_priv *priv = netdev_priv(dev);
2190 int i;
2191
2192 if (dev) {
2193 pr_info("Removing platform driver for rtl838x-eth\n");
2194 rtl838x_mdio_remove(priv);
2195 rtl838x_hw_stop(priv);
2196
2197 netif_tx_stop_all_queues(dev);
2198
2199 for (i = 0; i < priv->rxrings; i++)
2200 netif_napi_del(&priv->rx_qs[i].napi);
2201
2202 unregister_netdev(dev);
2203 free_netdev(dev);
2204 }
2205 return 0;
2206 }
2207
2208 static const struct of_device_id rtl838x_eth_of_ids[] = {
2209 { .compatible = "realtek,rtl838x-eth"},
2210 { /* sentinel */ }
2211 };
2212 MODULE_DEVICE_TABLE(of, rtl838x_eth_of_ids);
2213
2214 static struct platform_driver rtl838x_eth_driver = {
2215 .probe = rtl838x_eth_probe,
2216 .remove = rtl838x_eth_remove,
2217 .driver = {
2218 .name = "rtl838x-eth",
2219 .pm = NULL,
2220 .of_match_table = rtl838x_eth_of_ids,
2221 },
2222 };
2223
2224 module_platform_driver(rtl838x_eth_driver);
2225
2226 MODULE_AUTHOR("B. Koblitz");
2227 MODULE_DESCRIPTION("RTL838X SoC Ethernet Driver");
2228 MODULE_LICENSE("GPL");