kernel: bump 5.4 to 5.4.80
[openwrt/staging/rmilecki.git] / target / linux / rtl838x / files-5.4 / drivers / net / ethernet / rtl838x_eth.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/drivers/net/ethernet/rtl838x_eth.c
4 * Copyright (C) 2020 B. Koblitz
5 */
6
7 #include <linux/dma-mapping.h>
8 #include <linux/etherdevice.h>
9 #include <linux/interrupt.h>
10 #include <linux/io.h>
11 #include <linux/platform_device.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/of.h>
15 #include <linux/of_net.h>
16 #include <linux/of_mdio.h>
17 #include <linux/module.h>
18 #include <linux/phylink.h>
19 #include <net/dsa.h>
20 #include <net/switchdev.h>
21 #include <asm/cacheflush.h>
22
23 #include <asm/mach-rtl838x/mach-rtl838x.h>
24 #include "rtl838x_eth.h"
25
26 /*
27 * Maximum number of RX rings is 8, assigned by switch based on
28 * packet/port priortity (not implemented)
29 * Maximum number of TX rings is 2 (only ring 0 used)
30 * RX ringlength needs to be at least 200, otherwise CPU and Switch
31 * may gridlock.
32 */
33 #define RXRINGS 8
34 #define RXRINGLEN 300
35 #define TXRINGS 2
36 #define TXRINGLEN 160
37 #define NOTIFY_EVENTS 10
38 #define NOTIFY_BLOCKS 10
39 #define TX_EN 0x8
40 #define RX_EN 0x4
41 #define TX_DO 0x2
42 #define WRAP 0x2
43
44 #define RING_BUFFER 1600
45
46 struct p_hdr {
47 uint8_t *buf;
48 uint16_t reserved;
49 uint16_t size; /* buffer size */
50 uint16_t offset;
51 uint16_t len; /* pkt len */
52 uint16_t reserved2;
53 uint16_t cpu_tag[5];
54 } __packed __aligned(1);
55
56 struct n_event {
57 uint32_t type:2;
58 uint32_t fidVid:12;
59 uint64_t mac:48;
60 uint32_t slp:6;
61 uint32_t valid:1;
62 uint32_t reserved:27;
63 } __packed __aligned(1);
64
65 struct ring_b {
66 uint32_t rx_r[RXRINGS][RXRINGLEN];
67 uint32_t tx_r[TXRINGS][TXRINGLEN];
68 struct p_hdr rx_header[RXRINGS][RXRINGLEN];
69 struct p_hdr tx_header[TXRINGS][TXRINGLEN];
70 uint32_t c_rx[RXRINGS];
71 uint32_t c_tx[TXRINGS];
72 uint8_t rx_space[RXRINGS*RXRINGLEN*RING_BUFFER];
73 uint8_t tx_space[TXRINGLEN*RING_BUFFER];
74 };
75
76 struct notify_block {
77 struct n_event events[NOTIFY_EVENTS];
78 };
79
80 struct notify_b {
81 struct notify_block blocks[NOTIFY_BLOCKS];
82 u32 reserved1[8];
83 u32 ring[NOTIFY_BLOCKS];
84 u32 reserved2[8];
85 };
86
87 inline void rtl838x_create_tx_header(struct p_hdr *h, int dest_port)
88 {
89 if (dest_port > 0) {
90 h->cpu_tag[0] = 0x0400;
91 h->cpu_tag[1] = 0x0200;
92 h->cpu_tag[2] = 0x0000;
93 h->cpu_tag[3] = (1 << dest_port) >> 16;
94 h->cpu_tag[4] = (1 << dest_port) & 0xffff;
95 } else {
96 h->cpu_tag[0] = 0;
97 h->cpu_tag[1] = 0;
98 h->cpu_tag[2] = 0;
99 h->cpu_tag[3] = 0;
100 h->cpu_tag[4] = 0;
101 }
102 }
103
104 inline void rtl839x_create_tx_header(struct p_hdr *h, int dest_port)
105 {
106 if (dest_port > 0) {
107 h->cpu_tag[0] = 0x0100;
108 h->cpu_tag[1] = ((1 << (dest_port - 32)) >> 16) | (1 << 21);
109 h->cpu_tag[2] = (1 << (dest_port - 32)) & 0xffff;
110 h->cpu_tag[3] = (1 << dest_port) >> 16;
111 h->cpu_tag[4] = (1 << dest_port) & 0xffff;
112 } else {
113 h->cpu_tag[0] = 0;
114 h->cpu_tag[1] = 0;
115 h->cpu_tag[2] = 0;
116 h->cpu_tag[3] = 0;
117 h->cpu_tag[4] = 0;
118 }
119 }
120
121 extern void rtl838x_fdb_sync(struct work_struct *work);
122
123 struct rtl838x_eth_priv {
124 struct net_device *netdev;
125 struct platform_device *pdev;
126 void *membase;
127 spinlock_t lock;
128 struct mii_bus *mii_bus;
129 struct napi_struct napi;
130 struct phylink *phylink;
131 struct phylink_config phylink_config;
132 u16 id;
133 u16 family_id;
134 const struct rtl838x_reg *r;
135 u8 cpu_port;
136 u8 port_mask;
137 u32 lastEvent;
138 };
139
140 static const struct rtl838x_reg rtl838x_reg = {
141 .mac_port_ctrl = rtl838x_mac_port_ctrl,
142 .dma_if_intr_sts = RTL838X_DMA_IF_INTR_STS,
143 .dma_if_intr_msk = RTL838X_DMA_IF_INTR_MSK,
144 .dma_if_ctrl = RTL838X_DMA_IF_CTRL,
145 .mac_force_mode_ctrl = rtl838x_mac_force_mode_ctrl,
146 .dma_rx_base = rtl838x_dma_rx_base,
147 .dma_tx_base = rtl838x_dma_tx_base,
148 .dma_if_rx_ring_size = rtl838x_dma_if_rx_ring_size,
149 .dma_if_rx_ring_cntr = rtl838x_dma_if_rx_ring_cntr,
150 .dma_if_rx_cur = rtl838x_dma_if_rx_cur,
151 .rst_glb_ctrl = RTL838X_RST_GLB_CTRL_0,
152 .get_mac_link_sts = rtl838x_get_mac_link_sts,
153 .get_mac_link_dup_sts = rtl838x_get_mac_link_dup_sts,
154 .get_mac_link_spd_sts = rtl838x_get_mac_link_spd_sts,
155 .get_mac_rx_pause_sts = rtl838x_get_mac_rx_pause_sts,
156 .get_mac_tx_pause_sts = rtl838x_get_mac_tx_pause_sts,
157 .mac = RTL838X_MAC,
158 .l2_tbl_flush_ctrl = RTL838X_L2_TBL_FLUSH_CTRL,
159 };
160
161 static const struct rtl838x_reg rtl839x_reg = {
162 .mac_port_ctrl = rtl839x_mac_port_ctrl,
163 .dma_if_intr_sts = RTL839X_DMA_IF_INTR_STS,
164 .dma_if_intr_msk = RTL839X_DMA_IF_INTR_MSK,
165 .dma_if_ctrl = RTL839X_DMA_IF_CTRL,
166 .mac_force_mode_ctrl = rtl839x_mac_force_mode_ctrl,
167 .dma_rx_base = rtl839x_dma_rx_base,
168 .dma_tx_base = rtl839x_dma_tx_base,
169 .dma_if_rx_ring_size = rtl839x_dma_if_rx_ring_size,
170 .dma_if_rx_ring_cntr = rtl839x_dma_if_rx_ring_cntr,
171 .dma_if_rx_cur = rtl839x_dma_if_rx_cur,
172 .rst_glb_ctrl = RTL839X_RST_GLB_CTRL,
173 .get_mac_link_sts = rtl839x_get_mac_link_sts,
174 .get_mac_link_dup_sts = rtl839x_get_mac_link_dup_sts,
175 .get_mac_link_spd_sts = rtl839x_get_mac_link_spd_sts,
176 .get_mac_rx_pause_sts = rtl839x_get_mac_rx_pause_sts,
177 .get_mac_tx_pause_sts = rtl839x_get_mac_tx_pause_sts,
178 .mac = RTL839X_MAC,
179 .l2_tbl_flush_ctrl = RTL839X_L2_TBL_FLUSH_CTRL,
180 };
181
182 extern int rtl838x_phy_init(struct rtl838x_eth_priv *priv);
183 extern int rtl838x_read_sds_phy(int phy_addr, int phy_reg);
184 extern int rtl839x_read_sds_phy(int phy_addr, int phy_reg);
185 extern int rtl839x_write_sds_phy(int phy_addr, int phy_reg, u16 v);
186
187 /*
188 * Discard the RX ring-buffers, called as part of the net-ISR
189 * when the buffer runs over
190 * Caller needs to hold priv->lock
191 */
192 static void rtl838x_rb_cleanup(struct rtl838x_eth_priv *priv)
193 {
194 int r;
195 u32 *last;
196 struct p_hdr *h;
197 struct ring_b *ring = priv->membase;
198
199 for (r = 0; r < RXRINGS; r++) {
200 last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur(r)));
201 do {
202 if ((ring->rx_r[r][ring->c_rx[r]] & 0x1))
203 break;
204 h = &ring->rx_header[r][ring->c_rx[r]];
205 h->buf = (u8 *)KSEG1ADDR(ring->rx_space
206 + r * ring->c_rx[r] * RING_BUFFER);
207 h->size = RING_BUFFER;
208 h->len = 0;
209 /* make sure the header is visible to the ASIC */
210 mb();
211
212 ring->rx_r[r][ring->c_rx[r]] = KSEG1ADDR(h) | 0x1
213 | (ring->c_rx[r] == (RXRINGLEN - 1) ? WRAP : 0x1);
214 ring->c_rx[r] = (ring->c_rx[r] + 1) % RXRINGLEN;
215 } while (&ring->rx_r[r][ring->c_rx[r]] != last);
216 }
217 }
218
219 struct fdb_update_work {
220 struct work_struct work;
221 struct net_device *ndev;
222 u64 macs[NOTIFY_EVENTS + 1];
223 };
224
225 static void rtl839x_l2_notification_handler(struct rtl838x_eth_priv *priv)
226 {
227 struct notify_b *nb = priv->membase + sizeof(struct ring_b);
228 u32 e = priv->lastEvent;
229 struct n_event *event;
230 int i;
231 u64 mac;
232 struct fdb_update_work *w;
233
234 while (!(nb->ring[e] & 1)) {
235 w = kzalloc(sizeof(*w), GFP_ATOMIC);
236 if (!w) {
237 pr_err("Out of memory: %s", __func__);
238 return;
239 }
240 INIT_WORK(&w->work, rtl838x_fdb_sync);
241
242 for (i = 0; i < NOTIFY_EVENTS; i++) {
243 event = &nb->blocks[e].events[i];
244 if (!event->valid)
245 continue;
246 mac = event->mac;
247 if (event->type)
248 mac |= 1ULL << 63;
249 w->ndev = priv->netdev;
250 w->macs[i] = mac;
251 }
252
253 /* Hand the ring entry back to the switch */
254 nb->ring[e] = nb->ring[e] | 1;
255 e = (e + 1) % NOTIFY_BLOCKS;
256
257 w->macs[i] = 0ULL;
258 schedule_work(&w->work);
259 }
260 priv->lastEvent = e;
261 }
262
263 static irqreturn_t rtl838x_net_irq(int irq, void *dev_id)
264 {
265 struct net_device *dev = dev_id;
266 struct rtl838x_eth_priv *priv = netdev_priv(dev);
267 u32 status = sw_r32(priv->r->dma_if_intr_sts);
268
269 spin_lock(&priv->lock);
270 /* Ignore TX interrupt */
271 if ((status & 0xf0000)) {
272 /* Clear ISR */
273 sw_w32(0x000f0000, priv->r->dma_if_intr_sts);
274 }
275
276 /* RX interrupt */
277 if (status & 0x0ff00) {
278 /* Disable RX interrupt */
279 sw_w32_mask(0xff00, 0, priv->r->dma_if_intr_msk);
280 sw_w32(0x0000ff00, priv->r->dma_if_intr_sts);
281 napi_schedule(&priv->napi);
282 }
283
284 /* RX buffer overrun */
285 if (status & 0x000ff) {
286 pr_debug("RX buffer overrun: status %x, mask: %x\n",
287 status, sw_r32(priv->r->dma_if_intr_msk));
288 sw_w32(0x000000ff, priv->r->dma_if_intr_sts);
289 rtl838x_rb_cleanup(priv);
290 }
291
292 if (priv->family_id == RTL8390_FAMILY_ID && status & 0x00100000) {
293 sw_w32(0x00700000, priv->r->dma_if_intr_sts);
294 rtl839x_l2_notification_handler(priv);
295 }
296
297 spin_unlock(&priv->lock);
298 return IRQ_HANDLED;
299 }
300
301 static void rtl838x_hw_reset(struct rtl838x_eth_priv *priv)
302 {
303 u32 int_saved, nbuf;
304
305 pr_info("RESETTING %x, CPU_PORT %d\n", priv->family_id, priv->cpu_port);
306 /* Stop TX/RX */
307 sw_w32_mask(0x3, 0, priv->r->mac_port_ctrl(priv->cpu_port));
308 mdelay(500);
309
310 if (priv->family_id == RTL8390_FAMILY_ID) {
311 /* Preserve L2 notification and NBUF settings */
312 int_saved = sw_r32(priv->r->dma_if_intr_msk);
313 nbuf = sw_r32(RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL);
314
315 /* Disable link change interrupt on RTL839x */
316 sw_w32(0, RTL839X_IMR_PORT_LINK_STS_CHG);
317 sw_w32(0, RTL839X_IMR_PORT_LINK_STS_CHG + 4);
318
319 sw_w32(0x00000000, priv->r->dma_if_intr_msk);
320 sw_w32(0xffffffff, priv->r->dma_if_intr_sts);
321 }
322
323 /* Reset NIC and Queue */
324 sw_w32(0x08, priv->r->rst_glb_ctrl);
325 if (priv->family_id == RTL8390_FAMILY_ID)
326 sw_w32(0xffffffff, RTL839X_DMA_IF_RX_RING_CNTR);
327 do { /* Reset NIC */
328 udelay(20);
329 } while (sw_r32(priv->r->rst_glb_ctrl) & 0x08);
330 do { /* Reset Queues */
331 udelay(20);
332 } while (sw_r32(priv->r->rst_glb_ctrl) & 0x04);
333 mdelay(100);
334
335 /* Re-enable link change interrupt */
336 if (priv->family_id == RTL8390_FAMILY_ID) {
337 sw_w32(0xffffffff, RTL839X_ISR_PORT_LINK_STS_CHG);
338 sw_w32(0xffffffff, RTL839X_ISR_PORT_LINK_STS_CHG + 4);
339 sw_w32(0xffffffff, RTL839X_IMR_PORT_LINK_STS_CHG);
340 sw_w32(0xffffffff, RTL839X_IMR_PORT_LINK_STS_CHG + 4);
341
342 /* Restore notification settings: on RTL838x these bits are null */
343 sw_w32_mask(7 << 20, int_saved & (7 << 20), priv->r->dma_if_intr_msk);
344 sw_w32(nbuf, RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL);
345 }
346
347 /* Restart TX/RX to CPU port */
348 sw_w32_mask(0x0, 0x3, priv->r->mac_port_ctrl(priv->cpu_port));
349
350 if (priv->family_id == RTL8380_FAMILY_ID) {
351 /* Set Speed, duplex, flow control
352 * FORCE_EN | LINK_EN | NWAY_EN | DUP_SEL
353 * | SPD_SEL = 0b10 | FORCE_FC_EN | PHY_MASTER_SLV_MANUAL_EN
354 * | MEDIA_SEL
355 */
356 sw_w32(0x6192F, priv->r->mac_force_mode_ctrl(priv->cpu_port));
357 /* allow CRC errors on CPU-port */
358 sw_w32_mask(0, 0x8, priv->r->mac_port_ctrl(priv->cpu_port));
359 } else {
360 /* CPU port joins Lookup Miss Flooding Portmask */
361 sw_w32(0x28000, RTL839X_TBL_ACCESS_L2_CTRL);
362 sw_w32_mask(0, 0x80000000, RTL839X_TBL_ACCESS_L2_DATA(0));
363 sw_w32(0x38000, RTL839X_TBL_ACCESS_L2_CTRL);
364
365 /* Force CPU port link up */
366 sw_w32_mask(0, 3, priv->r->mac_force_mode_ctrl(priv->cpu_port));
367 }
368
369 /* Disable and clear interrupts */
370 sw_w32(0x00000000, priv->r->dma_if_intr_msk);
371 sw_w32(0xffffffff, priv->r->dma_if_intr_sts);
372 }
373
374 static void rtl838x_hw_ring_setup(struct rtl838x_eth_priv *priv)
375 {
376 int i;
377 struct ring_b *ring = priv->membase;
378
379 for (i = 0; i < RXRINGS; i++)
380 sw_w32(KSEG1ADDR(&ring->rx_r[i]), priv->r->dma_rx_base(i));
381
382 for (i = 0; i < TXRINGS; i++)
383 sw_w32(KSEG1ADDR(&ring->tx_r[i]), priv->r->dma_tx_base(i));
384 }
385
386 static void rtl838x_hw_en_rxtx(struct rtl838x_eth_priv *priv)
387 {
388 /* Disable Head of Line features for all RX rings */
389 sw_w32(0xffffffff, priv->r->dma_if_rx_ring_size(0));
390
391 /* Truncate RX buffer to 0x640 (1600) bytes, pad TX */
392 sw_w32(0x06400020, priv->r->dma_if_ctrl);
393
394 /* Enable RX done, RX overflow and TX done interrupts */
395 sw_w32(0xfffff, priv->r->dma_if_intr_msk);
396
397 /* Enable traffic, engine expects empty FCS field */
398 sw_w32_mask(0, RX_EN | TX_EN, priv->r->dma_if_ctrl);
399 }
400
401 static void rtl839x_hw_en_rxtx(struct rtl838x_eth_priv *priv)
402 {
403 /* Setup CPU-Port: RX Buffer */
404 sw_w32(0x0000c808, priv->r->dma_if_ctrl);
405
406 /* Enable Notify, RX done, RX overflow and TX done interrupts */
407 sw_w32(0x007fffff, priv->r->dma_if_intr_msk); // Notify IRQ!
408
409 /* Enable traffic */
410 sw_w32_mask(0, RX_EN | TX_EN, priv->r->dma_if_ctrl);
411 }
412
413 static void rtl838x_setup_ring_buffer(struct ring_b *ring)
414 {
415 int i, j;
416
417 struct p_hdr *h;
418
419 for (i = 0; i < RXRINGS; i++) {
420 for (j = 0; j < RXRINGLEN; j++) {
421 h = &ring->rx_header[i][j];
422 h->buf = (u8 *)KSEG1ADDR(ring->rx_space + i * j * RING_BUFFER);
423 h->reserved = 0;
424 h->size = RING_BUFFER;
425 h->offset = 0;
426 h->len = 0;
427 memset(&h->cpu_tag, 0, sizeof(uint16_t[5]));
428 /* All rings owned by switch, last one wraps */
429 ring->rx_r[i][j] = KSEG1ADDR(h) | 1 | (j == (RXRINGLEN - 1) ? WRAP : 0);
430 }
431 ring->c_rx[i] = 0;
432 }
433
434 for (i = 0; i < TXRINGS; i++) {
435 for (j = 0; j < TXRINGLEN; j++) {
436 h = &ring->tx_header[i][j];
437 h->buf = (u8 *)KSEG1ADDR(ring->tx_space + i * j * RING_BUFFER);
438 h->reserved = 0;
439 h->size = RING_BUFFER;
440 h->offset = 0;
441 h->len = 0;
442 memset(&h->cpu_tag, 0, sizeof(uint16_t[5]));
443 ring->tx_r[i][j] = KSEG1ADDR(&ring->tx_header[i][j]);
444 }
445 /* Last header is wrapping around */
446 ring->tx_r[i][j-1] |= WRAP;
447 ring->c_tx[i] = 0;
448 }
449 }
450
451 static void rtl839x_setup_notify_ring_buffer(struct rtl838x_eth_priv *priv)
452 {
453 int i;
454 struct notify_b *b = priv->membase + sizeof(struct ring_b);
455
456 for (i = 0; i < NOTIFY_BLOCKS; i++)
457 b->ring[i] = KSEG1ADDR(&b->blocks[i]) | 1 | (i == (NOTIFY_BLOCKS - 1) ? WRAP : 0);
458
459 sw_w32((u32) b->ring, RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL);
460 sw_w32_mask(0x3ff << 2, 100 << 2, RTL839X_L2_NOTIFICATION_CTRL);
461
462 /* Setup notification events */
463 sw_w32_mask(0, 1 << 14, RTL839X_L2_CTRL_0); // RTL8390_L2_CTRL_0_FLUSH_NOTIFY_EN
464 sw_w32_mask(0, 1 << 12, RTL839X_L2_NOTIFICATION_CTRL); // SUSPEND_NOTIFICATION_EN
465
466 /* Enable Notification */
467 sw_w32_mask(0, 1 << 0, RTL839X_L2_NOTIFICATION_CTRL);
468 priv->lastEvent = 0;
469 }
470
471 static int rtl838x_eth_open(struct net_device *ndev)
472 {
473 unsigned long flags;
474 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
475 struct ring_b *ring = priv->membase;
476 int err;
477
478 pr_info("%s called: RX rings %d, TX rings %d\n", __func__, RXRINGS, TXRINGS);
479
480 spin_lock_irqsave(&priv->lock, flags);
481 rtl838x_hw_reset(priv);
482 rtl838x_setup_ring_buffer(ring);
483 if (priv->family_id == RTL8390_FAMILY_ID) {
484 rtl839x_setup_notify_ring_buffer(priv);
485 /* Make sure the ring structure is visible to the ASIC */
486 mb();
487 flush_cache_all();
488 }
489
490 rtl838x_hw_ring_setup(priv);
491 err = request_irq(ndev->irq, rtl838x_net_irq, IRQF_SHARED,
492 ndev->name, ndev);
493 if (err) {
494 netdev_err(ndev, "%s: could not acquire interrupt: %d\n",
495 __func__, err);
496 return err;
497 }
498 phylink_start(priv->phylink);
499
500 napi_enable(&priv->napi);
501 netif_start_queue(ndev);
502
503 if (priv->family_id == RTL8380_FAMILY_ID) {
504 rtl838x_hw_en_rxtx(priv);
505 /* Trap IGMP traffic to CPU-Port */
506 sw_w32(0x3, RTL838X_SPCL_TRAP_IGMP_CTRL);
507 /* Flush learned FDB entries on link down of a port */
508 sw_w32_mask(0, 1 << 7, RTL838X_L2_CTRL_0);
509 } else {
510 rtl839x_hw_en_rxtx(priv);
511 sw_w32(0x3, RTL839X_SPCL_TRAP_IGMP_CTRL);
512 sw_w32_mask(0, 1 << 7, RTL839X_L2_CTRL_0);
513 }
514
515 spin_unlock_irqrestore(&priv->lock, flags);
516
517 return 0;
518 }
519
520 static void rtl838x_hw_stop(struct rtl838x_eth_priv *priv)
521 {
522 u32 force_mac = priv->family_id == RTL8380_FAMILY_ID ? 0x6192D : 0x75;
523 u32 clear_irq = priv->family_id == RTL8380_FAMILY_ID ? 0x000fffff : 0x007fffff;
524 int i;
525
526 /* Block all ports */
527 if (priv->family_id == RTL8380_FAMILY_ID) {
528 sw_w32(0x03000000, RTL838X_TBL_ACCESS_DATA_0(0));
529 sw_w32(0x00000000, RTL838X_TBL_ACCESS_DATA_0(1));
530 sw_w32(1 << 15 | 2 << 12, RTL838X_TBL_ACCESS_CTRL_0);
531 }
532
533 /* Flush L2 address cache */
534 if (priv->family_id == RTL8380_FAMILY_ID) {
535 for (i = 0; i <= priv->cpu_port; i++) {
536 sw_w32(1 << 26 | 1 << 23 | i << 5, priv->r->l2_tbl_flush_ctrl);
537 do { } while (sw_r32(priv->r->l2_tbl_flush_ctrl) & (1 << 26));
538 }
539 } else {
540 for (i = 0; i <= priv->cpu_port; i++) {
541 sw_w32(1 << 28 | 1 << 25 | i << 5, priv->r->l2_tbl_flush_ctrl);
542 do { } while (sw_r32(priv->r->l2_tbl_flush_ctrl) & (1 << 28));
543 }
544 }
545
546 /* CPU-Port: Link down */
547 sw_w32(force_mac, priv->r->mac_force_mode_ctrl(priv->cpu_port));
548 mdelay(100);
549
550 /* Disable traffic */
551 sw_w32_mask(RX_EN | TX_EN, 0, priv->r->dma_if_ctrl);
552 mdelay(200); // Test, whether this is needed
553
554 /* Disable all TX/RX interrupts */
555 sw_w32(0x00000000, priv->r->dma_if_intr_msk);
556 sw_w32(clear_irq, priv->r->dma_if_intr_sts);
557
558 /* Disable TX/RX DMA */
559 sw_w32(0x00000000, priv->r->dma_if_ctrl);
560 mdelay(200);
561 }
562
563 static int rtl838x_eth_stop(struct net_device *ndev)
564 {
565 unsigned long flags;
566 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
567
568 pr_info("in %s\n", __func__);
569
570 spin_lock_irqsave(&priv->lock, flags);
571 phylink_stop(priv->phylink);
572 rtl838x_hw_stop(priv);
573 free_irq(ndev->irq, ndev);
574 napi_disable(&priv->napi);
575 netif_stop_queue(ndev);
576 spin_unlock_irqrestore(&priv->lock, flags);
577
578 return 0;
579 }
580
581 static void rtl839x_eth_set_multicast_list(struct net_device *ndev)
582 {
583 if (!(ndev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
584 sw_w32(0x0, RTL839X_RMA_CTRL_0);
585 sw_w32(0x0, RTL839X_RMA_CTRL_1);
586 sw_w32(0x0, RTL839X_RMA_CTRL_2);
587 sw_w32(0x0, RTL839X_RMA_CTRL_3);
588 }
589 if (ndev->flags & IFF_ALLMULTI) {
590 sw_w32(0x7fffffff, RTL839X_RMA_CTRL_0);
591 sw_w32(0x7fffffff, RTL839X_RMA_CTRL_1);
592 sw_w32(0x7fffffff, RTL839X_RMA_CTRL_2);
593 }
594 if (ndev->flags & IFF_PROMISC) {
595 sw_w32(0x7fffffff, RTL839X_RMA_CTRL_0);
596 sw_w32(0x7fffffff, RTL839X_RMA_CTRL_1);
597 sw_w32(0x7fffffff, RTL839X_RMA_CTRL_2);
598 sw_w32(0x3ff, RTL839X_RMA_CTRL_3);
599 }
600 }
601
602 static void rtl838x_eth_set_multicast_list(struct net_device *ndev)
603 {
604 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
605
606 if (priv->family_id == RTL8390_FAMILY_ID)
607 return rtl839x_eth_set_multicast_list(ndev);
608
609 if (!(ndev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
610 sw_w32(0x0, RTL838X_RMA_CTRL_0);
611 sw_w32(0x0, RTL838X_RMA_CTRL_1);
612 }
613 if (ndev->flags & IFF_ALLMULTI)
614 sw_w32(0x1fffff, RTL838X_RMA_CTRL_0);
615 if (ndev->flags & IFF_PROMISC) {
616 sw_w32(0x1fffff, RTL838X_RMA_CTRL_0);
617 sw_w32(0x7fff, RTL838X_RMA_CTRL_1);
618 }
619 }
620
621 static void rtl838x_eth_tx_timeout(struct net_device *ndev)
622 {
623 unsigned long flags;
624 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
625
626 pr_info("in %s\n", __func__);
627 spin_lock_irqsave(&priv->lock, flags);
628 rtl838x_hw_stop(priv);
629 rtl838x_hw_ring_setup(priv);
630 rtl838x_hw_en_rxtx(priv);
631 netif_trans_update(ndev);
632 netif_start_queue(ndev);
633 spin_unlock_irqrestore(&priv->lock, flags);
634 }
635
636 static int rtl838x_eth_tx(struct sk_buff *skb, struct net_device *dev)
637 {
638 int len, i;
639 struct rtl838x_eth_priv *priv = netdev_priv(dev);
640 struct ring_b *ring = priv->membase;
641 uint32_t val;
642 int ret;
643 unsigned long flags;
644 struct p_hdr *h;
645 int dest_port = -1;
646
647 spin_lock_irqsave(&priv->lock, flags);
648 len = skb->len;
649
650 /* Check for DSA tagging at the end of the buffer */
651 if (netdev_uses_dsa(dev) && skb->data[len-4] == 0x80 && skb->data[len-3] > 0
652 && skb->data[len-3] < 28 && skb->data[len-2] == 0x10
653 && skb->data[len-1] == 0x00) {
654 /* Reuse tag space for CRC */
655 dest_port = skb->data[len-3];
656 len -= 4;
657 }
658 if (len < ETH_ZLEN)
659 len = ETH_ZLEN;
660
661 /* ASIC expects that packet includes CRC, so we extend by 4 bytes */
662 len += 4;
663
664 if (skb_padto(skb, len)) {
665 ret = NETDEV_TX_OK;
666 goto txdone;
667 }
668
669 /* We can send this packet if CPU owns the descriptor */
670 if (!(ring->tx_r[0][ring->c_tx[0]] & 0x1)) {
671 /* Set descriptor for tx */
672 h = &ring->tx_header[0][ring->c_tx[0]];
673
674 h->buf = (u8 *)KSEG1ADDR(ring->tx_space);
675 h->size = len;
676 h->len = len;
677
678 /* Create cpu_tag */
679 if (priv->family_id == RTL8380_FAMILY_ID)
680 rtl838x_create_tx_header(h, dest_port);
681 else
682 rtl839x_create_tx_header(h, dest_port);
683
684 /* Copy packet data to tx buffer */
685 memcpy((void *)KSEG1ADDR(h->buf), skb->data, len);
686 /* Make sure packet data is visible to ASIC */
687 mb(); /* wmb() probably works, too */
688
689 /* Hand over to switch */
690 ring->tx_r[0][ring->c_tx[0]] = ring->tx_r[0][ring->c_tx[0]] | 0x1;
691
692 /* BUG: before tx fetch, need to make sure right data is accessed
693 * This might not be necessary on newer RTL839x, though.
694 */
695 for (i = 0; i < 10; i++) {
696 val = sw_r32(priv->r->dma_if_ctrl);
697 if ((val & 0xc) == 0xc)
698 break;
699 }
700
701 /* Tell switch to send data */
702 sw_w32_mask(0, TX_DO, priv->r->dma_if_ctrl);
703
704 dev->stats.tx_packets++;
705 dev->stats.tx_bytes += len;
706 dev_kfree_skb(skb);
707 ring->c_tx[0] = (ring->c_tx[0] + 1) % TXRINGLEN;
708 ret = NETDEV_TX_OK;
709 } else {
710 dev_warn(&priv->pdev->dev, "Data is owned by switch\n");
711 ret = NETDEV_TX_BUSY;
712 }
713 txdone:
714 spin_unlock_irqrestore(&priv->lock, flags);
715 return ret;
716 }
717
718 static int rtl838x_hw_receive(struct net_device *dev, int r, int budget)
719 {
720 struct rtl838x_eth_priv *priv = netdev_priv(dev);
721 struct ring_b *ring = priv->membase;
722 struct sk_buff *skb;
723 unsigned long flags;
724 int i, len, work_done = 0;
725 u8 *data, *skb_data;
726 unsigned int val;
727 u32 *last;
728 struct p_hdr *h;
729 bool dsa = netdev_uses_dsa(dev);
730
731 spin_lock_irqsave(&priv->lock, flags);
732 last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur(r)));
733
734 if (&ring->rx_r[r][ring->c_rx[r]] == last) {
735 spin_unlock_irqrestore(&priv->lock, flags);
736 return 0;
737 }
738 do {
739 if ((ring->rx_r[r][ring->c_rx[r]] & 0x1)) {
740 netdev_warn(dev, "WARNING Ring contention: ring %x, last %x, current %x, cPTR %x, ISR %x\n", r, (uint32_t)last,
741 (u32) &ring->rx_r[r][ring->c_rx[r]],
742 ring->rx_r[r][ring->c_rx[r]],
743 sw_r32(priv->r->dma_if_intr_sts));
744 break;
745 }
746
747 h = &ring->rx_header[r][ring->c_rx[r]];
748 data = (u8 *)KSEG1ADDR(h->buf);
749 len = h->len;
750
751 if (!len)
752 break;
753 work_done++;
754
755 len -= 4; /* strip the CRC */
756 /* Add 4 bytes for cpu_tag */
757 if (dsa)
758 len += 4;
759
760 skb = alloc_skb(len + 4, GFP_KERNEL);
761 skb_reserve(skb, NET_IP_ALIGN);
762
763 if (likely(skb)) {
764 /* BUG: Prevent bug on RTL838x SoCs*/
765 if (priv->family_id == RTL8380_FAMILY_ID) {
766 sw_w32(0xffffffff, priv->r->dma_if_rx_ring_size(0));
767 for (i = 0; i < RXRINGS; i++) {
768 /* Update each ring cnt */
769 val = sw_r32(priv->r->dma_if_rx_ring_cntr(i));
770 sw_w32(val, priv->r->dma_if_rx_ring_cntr(i));
771 }
772 }
773
774 skb_data = skb_put(skb, len);
775 /* Make sure data is visible */
776 mb();
777 memcpy(skb->data, (u8 *)KSEG1ADDR(data), len);
778 /* Overwrite CRC with cpu_tag */
779 if (dsa) {
780 skb->data[len-4] = 0x80;
781 skb->data[len-3] = h->cpu_tag[0] & priv->port_mask;
782 skb->data[len-2] = 0x10;
783 skb->data[len-1] = 0x00;
784 }
785
786 skb->protocol = eth_type_trans(skb, dev);
787 dev->stats.rx_packets++;
788 dev->stats.rx_bytes += len;
789
790 netif_receive_skb(skb);
791 } else {
792 if (net_ratelimit())
793 dev_warn(&dev->dev, "low on memory - packet dropped\n");
794 dev->stats.rx_dropped++;
795 }
796
797 h->buf = (u8 *)KSEG1ADDR(ring->rx_space
798 + r * ring->c_rx[r] * RING_BUFFER);
799 h->size = RING_BUFFER;
800 h->len = 0;
801 memset(&h->cpu_tag, 0, sizeof(uint16_t[5]));
802
803 ring->rx_r[r][ring->c_rx[r]]
804 = KSEG1ADDR(h) | 0x1 | (ring->c_rx[r] == (RXRINGLEN-1) ? WRAP : 0x1);
805 ring->c_rx[r] = (ring->c_rx[r] + 1) % RXRINGLEN;
806 } while (&ring->rx_r[r][ring->c_rx[r]] != last && work_done < budget);
807
808 spin_unlock_irqrestore(&priv->lock, flags);
809 return work_done;
810 }
811
812 static int rtl838x_poll_rx(struct napi_struct *napi, int budget)
813 {
814 struct rtl838x_eth_priv *priv = container_of(napi, struct rtl838x_eth_priv, napi);
815 int work_done = 0, r = 0;
816
817 while (work_done < budget && r < RXRINGS) {
818 work_done += rtl838x_hw_receive(priv->netdev, r, budget - work_done);
819 r++;
820 }
821
822 if (work_done < budget) {
823 napi_complete_done(napi, work_done);
824 /* Enable RX interrupt */
825 sw_w32_mask(0, 0xfffff, priv->r->dma_if_intr_msk);
826 }
827 return work_done;
828 }
829
830
831 static void rtl838x_validate(struct phylink_config *config,
832 unsigned long *supported,
833 struct phylink_link_state *state)
834 {
835 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
836
837 pr_info("In %s\n", __func__);
838
839 if (!phy_interface_mode_is_rgmii(state->interface) &&
840 state->interface != PHY_INTERFACE_MODE_1000BASEX &&
841 state->interface != PHY_INTERFACE_MODE_MII &&
842 state->interface != PHY_INTERFACE_MODE_REVMII &&
843 state->interface != PHY_INTERFACE_MODE_GMII &&
844 state->interface != PHY_INTERFACE_MODE_QSGMII &&
845 state->interface != PHY_INTERFACE_MODE_INTERNAL &&
846 state->interface != PHY_INTERFACE_MODE_SGMII) {
847 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
848 pr_err("Unsupported interface: %d\n", state->interface);
849 return;
850 }
851
852 /* Allow all the expected bits */
853 phylink_set(mask, Autoneg);
854 phylink_set_port_modes(mask);
855 phylink_set(mask, Pause);
856 phylink_set(mask, Asym_Pause);
857
858 /* With the exclusion of MII and Reverse MII, we support Gigabit,
859 * including Half duplex
860 */
861 if (state->interface != PHY_INTERFACE_MODE_MII &&
862 state->interface != PHY_INTERFACE_MODE_REVMII) {
863 phylink_set(mask, 1000baseT_Full);
864 phylink_set(mask, 1000baseT_Half);
865 }
866
867 phylink_set(mask, 10baseT_Half);
868 phylink_set(mask, 10baseT_Full);
869 phylink_set(mask, 100baseT_Half);
870 phylink_set(mask, 100baseT_Full);
871
872 bitmap_and(supported, supported, mask,
873 __ETHTOOL_LINK_MODE_MASK_NBITS);
874 bitmap_and(state->advertising, state->advertising, mask,
875 __ETHTOOL_LINK_MODE_MASK_NBITS);
876 }
877
878
879 static void rtl838x_mac_config(struct phylink_config *config,
880 unsigned int mode,
881 const struct phylink_link_state *state)
882 {
883 /* This is only being called for the master device,
884 * i.e. the CPU-Port. We don't need to do anything.
885 */
886
887 pr_info("In %s, mode %x\n", __func__, mode);
888 }
889
890 static void rtl838x_mac_an_restart(struct phylink_config *config)
891 {
892 struct net_device *dev = container_of(config->dev, struct net_device, dev);
893 struct rtl838x_eth_priv *priv = netdev_priv(dev);
894
895 /* This works only on RTL838x chips */
896 if (priv->family_id != RTL8380_FAMILY_ID)
897 return;
898
899 pr_info("In %s\n", __func__);
900 /* Restart by disabling and re-enabling link */
901 sw_w32(0x6192D, priv->r->mac_force_mode_ctrl(priv->cpu_port));
902 mdelay(20);
903 sw_w32(0x6192F, priv->r->mac_force_mode_ctrl(priv->cpu_port));
904 }
905
906 static int rtl838x_mac_pcs_get_state(struct phylink_config *config,
907 struct phylink_link_state *state)
908 {
909 u32 speed;
910 struct net_device *dev = container_of(config->dev, struct net_device, dev);
911 struct rtl838x_eth_priv *priv = netdev_priv(dev);
912 int port = priv->cpu_port;
913
914 pr_info("In %s\n", __func__);
915
916 state->link = priv->r->get_mac_link_sts(port) ? 1 : 0;
917 state->duplex = priv->r->get_mac_link_dup_sts(port) ? 1 : 0;
918
919 speed = priv->r->get_mac_link_spd_sts(port);
920 switch (speed) {
921 case 0:
922 state->speed = SPEED_10;
923 break;
924 case 1:
925 state->speed = SPEED_100;
926 break;
927 state->speed = SPEED_1000;
928 break;
929 default:
930 state->speed = SPEED_UNKNOWN;
931 break;
932 }
933
934 state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
935 if (priv->r->get_mac_rx_pause_sts(port))
936 state->pause |= MLO_PAUSE_RX;
937 if (priv->r->get_mac_tx_pause_sts(port))
938 state->pause |= MLO_PAUSE_TX;
939
940 return 1;
941 }
942
943 static void rtl838x_mac_link_down(struct phylink_config *config,
944 unsigned int mode,
945 phy_interface_t interface)
946 {
947 struct net_device *dev = container_of(config->dev, struct net_device, dev);
948 struct rtl838x_eth_priv *priv = netdev_priv(dev);
949
950 pr_info("In %s\n", __func__);
951 /* Stop TX/RX to port */
952 sw_w32_mask(0x03, 0, priv->r->mac_port_ctrl(priv->cpu_port));
953 }
954
955 static void rtl838x_mac_link_up(struct phylink_config *config, unsigned int mode,
956 phy_interface_t interface,
957 struct phy_device *phy)
958 {
959 struct net_device *dev = container_of(config->dev, struct net_device, dev);
960 struct rtl838x_eth_priv *priv = netdev_priv(dev);
961
962 pr_info("In %s\n", __func__);
963 /* Restart TX/RX to port */
964 sw_w32_mask(0, 0x03, priv->r->mac_port_ctrl(priv->cpu_port));
965 }
966
967 static void rtl838x_set_mac_hw(struct net_device *dev, u8 *mac)
968 {
969 struct rtl838x_eth_priv *priv = netdev_priv(dev);
970 unsigned long flags;
971
972 spin_lock_irqsave(&priv->lock, flags);
973 pr_info("In %s\n", __func__);
974 sw_w32((mac[0] << 8) | mac[1], priv->r->mac);
975 sw_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5], priv->r->mac + 4);
976
977 if (priv->family_id == RTL8380_FAMILY_ID) {
978 /* 2 more registers, ALE/MAC block */
979 sw_w32((mac[0] << 8) | mac[1], RTL838X_MAC_ALE);
980 sw_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
981 (RTL838X_MAC_ALE + 4));
982
983 sw_w32((mac[0] << 8) | mac[1], RTL838X_MAC2);
984 sw_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
985 RTL838X_MAC2 + 4);
986 }
987 spin_unlock_irqrestore(&priv->lock, flags);
988 }
989
990 static int rtl838x_set_mac_address(struct net_device *dev, void *p)
991 {
992 struct rtl838x_eth_priv *priv = netdev_priv(dev);
993 const struct sockaddr *addr = p;
994 u8 *mac = (u8 *) (addr->sa_data);
995
996 if (!is_valid_ether_addr(addr->sa_data))
997 return -EADDRNOTAVAIL;
998
999 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
1000 rtl838x_set_mac_hw(dev, mac);
1001
1002 pr_info("Using MAC %08x%08x\n", sw_r32(priv->r->mac), sw_r32(priv->r->mac + 4));
1003 return 0;
1004 }
1005
1006 static int rtl8390_init_mac(struct rtl838x_eth_priv *priv)
1007 {
1008 // We will need to set-up EEE and the egress-rate limitation
1009 return 0;
1010 }
1011
1012 static int rtl8380_init_mac(struct rtl838x_eth_priv *priv)
1013 {
1014 int i;
1015
1016 if (priv->family_id == 0x8390)
1017 return rtl8390_init_mac(priv);
1018
1019 pr_info("%s\n", __func__);
1020 /* fix timer for EEE */
1021 sw_w32(0x5001411, RTL838X_EEE_TX_TIMER_GIGA_CTRL);
1022 sw_w32(0x5001417, RTL838X_EEE_TX_TIMER_GELITE_CTRL);
1023
1024 /* Init VLAN */
1025 if (priv->id == 0x8382) {
1026 for (i = 0; i <= 28; i++)
1027 sw_w32(0, 0xd57c + i * 0x80);
1028 }
1029 if (priv->id == 0x8380) {
1030 for (i = 8; i <= 28; i++)
1031 sw_w32(0, 0xd57c + i * 0x80);
1032 }
1033 return 0;
1034 }
1035
1036 static int rtl838x_get_link_ksettings(struct net_device *ndev,
1037 struct ethtool_link_ksettings *cmd)
1038 {
1039 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
1040
1041 pr_info("%s called\n", __func__);
1042 return phylink_ethtool_ksettings_get(priv->phylink, cmd);
1043 }
1044
1045 static int rtl838x_set_link_ksettings(struct net_device *ndev,
1046 const struct ethtool_link_ksettings *cmd)
1047 {
1048 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
1049
1050 pr_info("%s called\n", __func__);
1051 return phylink_ethtool_ksettings_set(priv->phylink, cmd);
1052 }
1053
1054 static int rtl838x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1055 {
1056 u32 val;
1057 int err;
1058 struct rtl838x_eth_priv *priv = bus->priv;
1059
1060 if (mii_id >= 24 && mii_id <= 27 && priv->id == 0x8380)
1061 return rtl838x_read_sds_phy(mii_id, regnum);
1062 err = rtl838x_read_phy(mii_id, 0, regnum, &val);
1063 if (err)
1064 return err;
1065 return val;
1066 }
1067
1068 static int rtl839x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1069 {
1070 u32 val;
1071 int err;
1072 struct rtl838x_eth_priv *priv = bus->priv;
1073
1074 if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393)
1075 return rtl839x_read_sds_phy(mii_id, regnum);
1076
1077 err = rtl839x_read_phy(mii_id, 0, regnum, &val);
1078 if (err)
1079 return err;
1080 return val;
1081 }
1082
1083 static int rtl838x_mdio_write(struct mii_bus *bus, int mii_id,
1084 int regnum, u16 value)
1085 {
1086 u32 offset = 0;
1087 struct rtl838x_eth_priv *priv = bus->priv;
1088
1089 if (mii_id >= 24 && mii_id <= 27 && priv->id == 0x8380) {
1090 if (mii_id == 26)
1091 offset = 0x100;
1092 sw_w32(value, MAPLE_SDS4_FIB_REG0r + offset + (regnum << 2));
1093 return 0;
1094 }
1095 return rtl838x_write_phy(mii_id, 0, regnum, value);
1096 }
1097
1098 static int rtl839x_mdio_write(struct mii_bus *bus, int mii_id,
1099 int regnum, u16 value)
1100 {
1101 struct rtl838x_eth_priv *priv = bus->priv;
1102
1103 if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393)
1104 return rtl839x_write_sds_phy(mii_id, regnum, value);
1105
1106 return rtl839x_write_phy(mii_id, 0, regnum, value);
1107 }
1108
1109 static int rtl838x_mdio_reset(struct mii_bus *bus)
1110 {
1111 pr_info("%s called\n", __func__);
1112 /* Disable MAC polling the PHY so that we can start configuration */
1113 sw_w32(0x00000000, RTL838X_SMI_POLL_CTRL);
1114
1115 /* Enable PHY control via SoC */
1116 sw_w32_mask(0, 1 << 15, RTL838X_SMI_GLB_CTRL);
1117
1118 // Probably should reset all PHYs here...
1119 return 0;
1120 }
1121
1122 static int rtl839x_mdio_reset(struct mii_bus *bus)
1123 {
1124 return 0;
1125
1126 pr_info("%s called\n", __func__);
1127 /* BUG: The following does not work, but should! */
1128 /* Disable MAC polling the PHY so that we can start configuration */
1129 sw_w32(0x00000000, RTL839X_SMI_PORT_POLLING_CTRL);
1130 sw_w32(0x00000000, RTL839X_SMI_PORT_POLLING_CTRL + 4);
1131 /* Disable PHY polling via SoC */
1132 sw_w32_mask(1 << 7, 0, RTL839X_SMI_GLB_CTRL);
1133
1134 // Probably should reset all PHYs here...
1135 return 0;
1136 }
1137
1138
1139 static int rtl838x_mdio_init(struct rtl838x_eth_priv *priv)
1140 {
1141 struct device_node *mii_np;
1142 int ret;
1143
1144 pr_info("%s called\n", __func__);
1145 mii_np = of_get_child_by_name(priv->pdev->dev.of_node, "mdio-bus");
1146
1147 if (!mii_np) {
1148 dev_err(&priv->pdev->dev, "no %s child node found", "mdio-bus");
1149 return -ENODEV;
1150 }
1151
1152 if (!of_device_is_available(mii_np)) {
1153 ret = -ENODEV;
1154 goto err_put_node;
1155 }
1156
1157 priv->mii_bus = devm_mdiobus_alloc(&priv->pdev->dev);
1158 if (!priv->mii_bus) {
1159 ret = -ENOMEM;
1160 goto err_put_node;
1161 }
1162
1163 if (priv->family_id == RTL8380_FAMILY_ID) {
1164 priv->mii_bus->name = "rtl838x-eth-mdio";
1165 priv->mii_bus->read = rtl838x_mdio_read;
1166 priv->mii_bus->write = rtl838x_mdio_write;
1167 priv->mii_bus->reset = rtl838x_mdio_reset;
1168 } else {
1169 priv->mii_bus->name = "rtl839x-eth-mdio";
1170 priv->mii_bus->read = rtl839x_mdio_read;
1171 priv->mii_bus->write = rtl839x_mdio_write;
1172 priv->mii_bus->reset = rtl839x_mdio_reset;
1173 }
1174 priv->mii_bus->priv = priv;
1175 priv->mii_bus->parent = &priv->pdev->dev;
1176
1177 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
1178 ret = of_mdiobus_register(priv->mii_bus, mii_np);
1179
1180 err_put_node:
1181 of_node_put(mii_np);
1182 return ret;
1183 }
1184
1185 static int rtl838x_mdio_remove(struct rtl838x_eth_priv *priv)
1186 {
1187 pr_info("%s called\n", __func__);
1188 if (!priv->mii_bus)
1189 return 0;
1190
1191 mdiobus_unregister(priv->mii_bus);
1192 mdiobus_free(priv->mii_bus);
1193
1194 return 0;
1195 }
1196
1197 static const struct net_device_ops rtl838x_eth_netdev_ops = {
1198 .ndo_open = rtl838x_eth_open,
1199 .ndo_stop = rtl838x_eth_stop,
1200 .ndo_start_xmit = rtl838x_eth_tx,
1201 .ndo_set_mac_address = rtl838x_set_mac_address,
1202 .ndo_validate_addr = eth_validate_addr,
1203 .ndo_set_rx_mode = rtl838x_eth_set_multicast_list,
1204 .ndo_tx_timeout = rtl838x_eth_tx_timeout,
1205 };
1206
1207 static const struct phylink_mac_ops rtl838x_phylink_ops = {
1208 .validate = rtl838x_validate,
1209 .mac_link_state = rtl838x_mac_pcs_get_state,
1210 .mac_an_restart = rtl838x_mac_an_restart,
1211 .mac_config = rtl838x_mac_config,
1212 .mac_link_down = rtl838x_mac_link_down,
1213 .mac_link_up = rtl838x_mac_link_up,
1214 };
1215
1216 static const struct ethtool_ops rtl838x_ethtool_ops = {
1217 .get_link_ksettings = rtl838x_get_link_ksettings,
1218 .set_link_ksettings = rtl838x_set_link_ksettings,
1219 };
1220
1221 static int __init rtl838x_eth_probe(struct platform_device *pdev)
1222 {
1223 struct net_device *dev;
1224 struct device_node *dn = pdev->dev.of_node;
1225 struct rtl838x_eth_priv *priv;
1226 struct resource *res, *mem;
1227 const void *mac;
1228 phy_interface_t phy_mode;
1229 struct phylink *phylink;
1230 int err = 0;
1231
1232 pr_info("Probing RTL838X eth device pdev: %x, dev: %x\n",
1233 (u32)pdev, (u32)(&(pdev->dev)));
1234
1235 if (!dn) {
1236 dev_err(&pdev->dev, "No DT found\n");
1237 return -EINVAL;
1238 }
1239
1240 dev = alloc_etherdev(sizeof(struct rtl838x_eth_priv));
1241 if (!dev) {
1242 err = -ENOMEM;
1243 goto err_free;
1244 }
1245 SET_NETDEV_DEV(dev, &pdev->dev);
1246 priv = netdev_priv(dev);
1247
1248 /* obtain buffer memory space */
1249 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1250 if (res) {
1251 mem = devm_request_mem_region(&pdev->dev, res->start,
1252 resource_size(res), res->name);
1253 if (!mem) {
1254 dev_err(&pdev->dev, "cannot request memory space\n");
1255 err = -ENXIO;
1256 goto err_free;
1257 }
1258
1259 dev->mem_start = mem->start;
1260 dev->mem_end = mem->end;
1261 } else {
1262 dev_err(&pdev->dev, "cannot request IO resource\n");
1263 err = -ENXIO;
1264 goto err_free;
1265 }
1266
1267 /* Allocate buffer memory */
1268 priv->membase = dmam_alloc_coherent(&pdev->dev,
1269 sizeof(struct ring_b) + sizeof(struct notify_b),
1270 (void *)&dev->mem_start, GFP_KERNEL);
1271 if (!priv->membase) {
1272 dev_err(&pdev->dev, "cannot allocate DMA buffer\n");
1273 err = -ENOMEM;
1274 goto err_free;
1275 }
1276
1277 spin_lock_init(&priv->lock);
1278
1279 /* obtain device IRQ number */
1280 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1281 if (!res) {
1282 dev_err(&pdev->dev, "cannot obtain IRQ, using default 24\n");
1283 dev->irq = 24;
1284 } else {
1285 dev->irq = res->start;
1286 }
1287 dev->ethtool_ops = &rtl838x_ethtool_ops;
1288
1289 priv->id = soc_info.id;
1290 priv->family_id = soc_info.family;
1291 if (priv->id) {
1292 pr_info("Found SoC ID: %4x: %s, family %x\n",
1293 priv->id, soc_info.name, priv->family_id);
1294 } else {
1295 pr_err("Unknown chip id (%04x)\n", priv->id);
1296 return -ENODEV;
1297 }
1298
1299 if (priv->family_id == 0x8390) {
1300 priv->cpu_port = RTL839X_CPU_PORT;
1301 priv->r = &rtl839x_reg;
1302 priv->port_mask = 0x3f;
1303 } else {
1304 priv->cpu_port = RTL838X_CPU_PORT;
1305 priv->r = &rtl838x_reg;
1306 priv->port_mask = 0x1f;
1307 }
1308
1309 rtl8380_init_mac(priv);
1310
1311 /* try to get mac address in the following order:
1312 * 1) from device tree data
1313 * 2) from internal registers set by bootloader
1314 */
1315 mac = of_get_mac_address(pdev->dev.of_node);
1316 if (!IS_ERR(mac)) {
1317 memcpy(dev->dev_addr, mac, ETH_ALEN);
1318 rtl838x_set_mac_hw(dev, (u8 *)mac);
1319 } else {
1320 dev->dev_addr[0] = (sw_r32(priv->r->mac) >> 8) & 0xff;
1321 dev->dev_addr[1] = sw_r32(priv->r->mac) & 0xff;
1322 dev->dev_addr[2] = (sw_r32(priv->r->mac + 4) >> 24) & 0xff;
1323 dev->dev_addr[3] = (sw_r32(priv->r->mac + 4) >> 16) & 0xff;
1324 dev->dev_addr[4] = (sw_r32(priv->r->mac + 4) >> 8) & 0xff;
1325 dev->dev_addr[5] = sw_r32(priv->r->mac + 4) & 0xff;
1326 }
1327 /* if the address is invalid, use a random value */
1328 if (!is_valid_ether_addr(dev->dev_addr)) {
1329 struct sockaddr sa = { AF_UNSPEC };
1330
1331 netdev_warn(dev, "Invalid MAC address, using random\n");
1332 eth_hw_addr_random(dev);
1333 memcpy(sa.sa_data, dev->dev_addr, ETH_ALEN);
1334 if (rtl838x_set_mac_address(dev, &sa))
1335 netdev_warn(dev, "Failed to set MAC address.\n");
1336 }
1337 pr_info("Using MAC %08x%08x\n", sw_r32(priv->r->mac),
1338 sw_r32(priv->r->mac + 4));
1339 strcpy(dev->name, "eth%d");
1340 dev->netdev_ops = &rtl838x_eth_netdev_ops;
1341 priv->pdev = pdev;
1342 priv->netdev = dev;
1343
1344 err = rtl838x_mdio_init(priv);
1345 if (err)
1346 goto err_free;
1347
1348 err = register_netdev(dev);
1349 if (err)
1350 goto err_free;
1351
1352 netif_napi_add(dev, &priv->napi, rtl838x_poll_rx, 64);
1353 platform_set_drvdata(pdev, dev);
1354
1355 phy_mode = of_get_phy_mode(dn);
1356 if (phy_mode < 0) {
1357 dev_err(&pdev->dev, "incorrect phy-mode\n");
1358 err = -EINVAL;
1359 goto err_free;
1360 }
1361 priv->phylink_config.dev = &dev->dev;
1362 priv->phylink_config.type = PHYLINK_NETDEV;
1363
1364 phylink = phylink_create(&priv->phylink_config, pdev->dev.fwnode,
1365 phy_mode, &rtl838x_phylink_ops);
1366 if (IS_ERR(phylink)) {
1367 err = PTR_ERR(phylink);
1368 goto err_free;
1369 }
1370 priv->phylink = phylink;
1371
1372 return 0;
1373
1374 err_free:
1375 pr_err("Error setting up netdev, freeing it again.\n");
1376 free_netdev(dev);
1377 return err;
1378 }
1379
1380 static int rtl838x_eth_remove(struct platform_device *pdev)
1381 {
1382 struct net_device *dev = platform_get_drvdata(pdev);
1383 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1384
1385 if (dev) {
1386 pr_info("Removing platform driver for rtl838x-eth\n");
1387 rtl838x_mdio_remove(priv);
1388 rtl838x_hw_stop(priv);
1389 netif_stop_queue(dev);
1390 netif_napi_del(&priv->napi);
1391 unregister_netdev(dev);
1392 free_netdev(dev);
1393 }
1394 return 0;
1395 }
1396
1397 static const struct of_device_id rtl838x_eth_of_ids[] = {
1398 { .compatible = "realtek,rtl838x-eth"},
1399 { /* sentinel */ }
1400 };
1401 MODULE_DEVICE_TABLE(of, rtl838x_eth_of_ids);
1402
1403 static struct platform_driver rtl838x_eth_driver = {
1404 .probe = rtl838x_eth_probe,
1405 .remove = rtl838x_eth_remove,
1406 .driver = {
1407 .name = "rtl838x-eth",
1408 .pm = NULL,
1409 .of_match_table = rtl838x_eth_of_ids,
1410 },
1411 };
1412
1413 module_platform_driver(rtl838x_eth_driver);
1414
1415 MODULE_AUTHOR("B. Koblitz");
1416 MODULE_DESCRIPTION("RTL838X SoC Ethernet Driver");
1417 MODULE_LICENSE("GPL");