bmips: bcm6348-enet: register emac driver from iudma
[openwrt/staging/pepe2k.git] / target / linux / bmips / files / drivers / net / ethernet / broadcom / bcm6348-enet.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * BCM6348 Ethernet Controller Driver
4 *
5 * Copyright (C) 2020 Álvaro Fernández Rojas <noltari@gmail.com>
6 * Copyright (C) 2015 Jonas Gorski <jonas.gorski@gmail.com>
7 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
8 */
9
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/err.h>
14 #include <linux/etherdevice.h>
15 #include <linux/if_vlan.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/mtd/mtd.h>
19 #include <linux/of_address.h>
20 #include <linux/of_clk.h>
21 #include <linux/of_irq.h>
22 #include <linux/of_mdio.h>
23 #include <linux/of_net.h>
24 #include <linux/of_platform.h>
25 #include <linux/phy.h>
26 #include <linux/platform_device.h>
27 #include <linux/reset.h>
28
29 /* DMA channels */
30 #define DMA_CHAN_WIDTH 0x10
31
32 /* Controller Configuration Register */
33 #define DMA_CFG_REG 0x0
34 #define DMA_CFG_EN_SHIFT 0
35 #define DMA_CFG_EN_MASK (1 << DMA_CFG_EN_SHIFT)
36 #define DMA_CFG_FLOWCH_MASK(x) (1 << ((x >> 1) + 1))
37
38 /* Flow Control Descriptor Low Threshold register */
39 #define DMA_FLOWCL_REG(x) (0x4 + (x) * 6)
40
41 /* Flow Control Descriptor High Threshold register */
42 #define DMA_FLOWCH_REG(x) (0x8 + (x) * 6)
43
44 /* Flow Control Descriptor Buffer Alloca Threshold register */
45 #define DMA_BUFALLOC_REG(x) (0xc + (x) * 6)
46 #define DMA_BUFALLOC_FORCE_SHIFT 31
47 #define DMA_BUFALLOC_FORCE_MASK (1 << DMA_BUFALLOC_FORCE_SHIFT)
48
49 /* Channel Configuration register */
50 #define DMAC_CHANCFG_REG 0x0
51 #define DMAC_CHANCFG_EN_SHIFT 0
52 #define DMAC_CHANCFG_EN_MASK (1 << DMAC_CHANCFG_EN_SHIFT)
53 #define DMAC_CHANCFG_PKTHALT_SHIFT 1
54 #define DMAC_CHANCFG_PKTHALT_MASK (1 << DMAC_CHANCFG_PKTHALT_SHIFT)
55 #define DMAC_CHANCFG_BUFHALT_SHIFT 2
56 #define DMAC_CHANCFG_BUFHALT_MASK (1 << DMAC_CHANCFG_BUFHALT_SHIFT)
57 #define DMAC_CHANCFG_CHAINING_SHIFT 2
58 #define DMAC_CHANCFG_CHAINING_MASK (1 << DMAC_CHANCFG_CHAINING_SHIFT)
59 #define DMAC_CHANCFG_WRAP_EN_SHIFT 3
60 #define DMAC_CHANCFG_WRAP_EN_MASK (1 << DMAC_CHANCFG_WRAP_EN_SHIFT)
61 #define DMAC_CHANCFG_FLOWC_EN_SHIFT 4
62 #define DMAC_CHANCFG_FLOWC_EN_MASK (1 << DMAC_CHANCFG_FLOWC_EN_SHIFT)
63
64 /* Interrupt Control/Status register */
65 #define DMAC_IR_REG 0x4
66 #define DMAC_IR_BUFDONE_MASK (1 << 0)
67 #define DMAC_IR_PKTDONE_MASK (1 << 1)
68 #define DMAC_IR_NOTOWNER_MASK (1 << 2)
69
70 /* Interrupt Mask register */
71 #define DMAC_IRMASK_REG 0x8
72
73 /* Maximum Burst Length */
74 #define DMAC_MAXBURST_REG 0xc
75
76 /* Ring Start Address register */
77 #define DMAS_RSTART_REG 0x0
78
79 /* State Ram Word 2 */
80 #define DMAS_SRAM2_REG 0x4
81
82 /* State Ram Word 3 */
83 #define DMAS_SRAM3_REG 0x8
84
85 /* State Ram Word 4 */
86 #define DMAS_SRAM4_REG 0xc
87
88 struct bcm6348_iudma_desc {
89 u32 len_stat;
90 u32 address;
91 };
92
93 /* control */
94 #define DMADESC_LENGTH_SHIFT 16
95 #define DMADESC_LENGTH_MASK (0xfff << DMADESC_LENGTH_SHIFT)
96 #define DMADESC_OWNER_MASK (1 << 15)
97 #define DMADESC_EOP_MASK (1 << 14)
98 #define DMADESC_SOP_MASK (1 << 13)
99 #define DMADESC_ESOP_MASK (DMADESC_EOP_MASK | DMADESC_SOP_MASK)
100 #define DMADESC_WRAP_MASK (1 << 12)
101
102 /* status */
103 #define DMADESC_UNDER_MASK (1 << 9)
104 #define DMADESC_APPEND_CRC (1 << 8)
105 #define DMADESC_OVSIZE_MASK (1 << 4)
106 #define DMADESC_RXER_MASK (1 << 2)
107 #define DMADESC_CRC_MASK (1 << 1)
108 #define DMADESC_OV_MASK (1 << 0)
109 #define DMADESC_ERR_MASK (DMADESC_UNDER_MASK | \
110 DMADESC_OVSIZE_MASK | \
111 DMADESC_RXER_MASK | \
112 DMADESC_CRC_MASK | \
113 DMADESC_OV_MASK)
114
115 struct bcm6348_iudma {
116 void __iomem *dma_base;
117 void __iomem *dma_chan;
118 void __iomem *dma_sram;
119
120 spinlock_t dma_base_lock;
121
122 struct clk **clock;
123 unsigned int num_clocks;
124
125 struct reset_control **reset;
126 unsigned int num_resets;
127
128 unsigned int dma_channels;
129 };
130
131 int bcm6348_iudma_drivers_register(struct platform_device *pdev);
132
133 static inline u32 dma_readl(struct bcm6348_iudma *iudma, u32 off)
134 {
135 u32 val;
136
137 spin_lock(&iudma->dma_base_lock);
138 val = __raw_readl(iudma->dma_base + off);
139 spin_unlock(&iudma->dma_base_lock);
140
141 return val;
142 }
143
144 static inline void dma_writel(struct bcm6348_iudma *iudma, u32 val, u32 off)
145 {
146 spin_lock(&iudma->dma_base_lock);
147 __raw_writel(val, iudma->dma_base + off);
148 spin_unlock(&iudma->dma_base_lock);
149 }
150
151 static inline u32 dmac_readl(struct bcm6348_iudma *iudma, u32 off, int chan)
152 {
153 return __raw_readl(iudma->dma_chan + chan * DMA_CHAN_WIDTH + off);
154 }
155
156 static inline void dmac_writel(struct bcm6348_iudma *iudma, u32 val, u32 off,
157 int chan)
158 {
159 __raw_writel(val, iudma->dma_chan + chan * DMA_CHAN_WIDTH + off);
160 }
161
162 static inline void dmas_writel(struct bcm6348_iudma *iudma, u32 val, u32 off,
163 int chan)
164 {
165 __raw_writel(val, iudma->dma_sram + chan * DMA_CHAN_WIDTH + off);
166 }
167
168 static void bcm6348_iudma_chan_stop(struct bcm6348_iudma *iudma, int chan)
169 {
170 int limit = 1000;
171
172 dmac_writel(iudma, 0, DMAC_CHANCFG_REG, chan);
173
174 do {
175 u32 val;
176
177 val = dmac_readl(iudma, DMAC_CHANCFG_REG, chan);
178 if (!(val & DMAC_CHANCFG_EN_MASK))
179 break;
180
181 udelay(1);
182 } while (limit--);
183 }
184
185 static int bcm6348_iudma_probe(struct platform_device *pdev)
186 {
187 struct device *dev = &pdev->dev;
188 struct device_node *node = dev->of_node;
189 struct bcm6348_iudma *iudma;
190 unsigned i;
191 int num_resets;
192 int ret;
193
194 iudma = devm_kzalloc(dev, sizeof(*iudma), GFP_KERNEL);
195 if (!iudma)
196 return -ENOMEM;
197
198 if (of_property_read_u32(node, "dma-channels", &iudma->dma_channels))
199 return -ENODEV;
200
201 iudma->dma_base = devm_platform_ioremap_resource_byname(pdev, "dma");
202 if (IS_ERR_OR_NULL(iudma->dma_base))
203 return PTR_ERR(iudma->dma_base);
204
205 iudma->dma_chan = devm_platform_ioremap_resource_byname(pdev,
206 "dma-channels");
207 if (IS_ERR_OR_NULL(iudma->dma_chan))
208 return PTR_ERR(iudma->dma_chan);
209
210 iudma->dma_sram = devm_platform_ioremap_resource_byname(pdev,
211 "dma-sram");
212 if (IS_ERR_OR_NULL(iudma->dma_sram))
213 return PTR_ERR(iudma->dma_sram);
214
215 iudma->num_clocks = of_clk_get_parent_count(node);
216 if (iudma->num_clocks) {
217 iudma->clock = devm_kcalloc(dev, iudma->num_clocks,
218 sizeof(struct clk *), GFP_KERNEL);
219 if (IS_ERR_OR_NULL(iudma->clock))
220 return PTR_ERR(iudma->clock);
221 }
222 for (i = 0; i < iudma->num_clocks; i++) {
223 iudma->clock[i] = of_clk_get(node, i);
224 if (IS_ERR_OR_NULL(iudma->clock[i])) {
225 dev_err(dev, "error getting iudma clock %d\n", i);
226 return PTR_ERR(iudma->clock[i]);
227 }
228
229 ret = clk_prepare_enable(iudma->clock[i]);
230 if (ret) {
231 dev_err(dev, "error enabling iudma clock %d\n", i);
232 return ret;
233 }
234 }
235
236 num_resets = of_count_phandle_with_args(node, "resets",
237 "#reset-cells");
238 if (num_resets > 0)
239 iudma->num_resets = num_resets;
240 else
241 iudma->num_resets = 0;
242 if (iudma->num_resets) {
243 iudma->reset = devm_kcalloc(dev, iudma->num_resets,
244 sizeof(struct reset_control *),
245 GFP_KERNEL);
246 if (IS_ERR_OR_NULL(iudma->reset))
247 return PTR_ERR(iudma->reset);
248 }
249 for (i = 0; i < iudma->num_resets; i++) {
250 iudma->reset[i] = devm_reset_control_get_by_index(dev, i);
251 if (IS_ERR_OR_NULL(iudma->reset[i])) {
252 dev_err(dev, "error getting iudma reset %d\n", i);
253 return PTR_ERR(iudma->reset[i]);
254 }
255
256 ret = reset_control_reset(iudma->reset[i]);
257 if (ret) {
258 dev_err(dev, "error performing iudma reset %d\n", i);
259 return ret;
260 }
261 }
262
263 dma_writel(iudma, 0, DMA_CFG_REG);
264 for (i = 0; i < iudma->dma_channels; i++)
265 bcm6348_iudma_chan_stop(iudma, i);
266 dma_writel(iudma, DMA_CFG_EN_MASK, DMA_CFG_REG);
267
268 spin_lock_init(&iudma->dma_base_lock);
269
270 dev_info(dev, "bcm6348-iudma @ 0x%px\n", iudma->dma_base);
271
272 platform_set_drvdata(pdev, iudma);
273
274 return bcm6348_iudma_drivers_register(pdev);
275 }
276
277 static const struct of_device_id bcm6348_iudma_of_match[] = {
278 { .compatible = "brcm,bcm6338-iudma", },
279 { .compatible = "brcm,bcm6348-iudma", },
280 { .compatible = "brcm,bcm6358-iudma", },
281 { /* sentinel */ },
282 };
283
284 static struct platform_driver bcm6348_iudma_driver = {
285 .driver = {
286 .name = "bcm6348-iudma",
287 .of_match_table = of_match_ptr(bcm6348_iudma_of_match),
288 },
289 .probe = bcm6348_iudma_probe,
290 };
291 builtin_platform_driver(bcm6348_iudma_driver);
292
293 /*
294 * BCM6348 Eternet MACs
295 */
296
297 /* MTU */
298 #define ENET_MAX_MTU 2046
299
300 #define ENET_TAG_SIZE 6
301 #define ENET_MTU_OVERHEAD (VLAN_ETH_HLEN + VLAN_HLEN + \
302 ENET_TAG_SIZE)
303
304 /* Default number of descriptor */
305 #define ENET_DEF_RX_DESC 64
306 #define ENET_DEF_TX_DESC 32
307 #define ENET_DEF_CPY_BREAK 128
308
309 /* Maximum burst len for dma (4 bytes unit) */
310 #define ENET_DMA_MAXBURST 8
311
312 /* Receiver Configuration register */
313 #define ENET_RXCFG_REG 0x0
314 #define ENET_RXCFG_ALLMCAST_SHIFT 1
315 #define ENET_RXCFG_ALLMCAST_MASK (1 << ENET_RXCFG_ALLMCAST_SHIFT)
316 #define ENET_RXCFG_PROMISC_SHIFT 3
317 #define ENET_RXCFG_PROMISC_MASK (1 << ENET_RXCFG_PROMISC_SHIFT)
318 #define ENET_RXCFG_LOOPBACK_SHIFT 4
319 #define ENET_RXCFG_LOOPBACK_MASK (1 << ENET_RXCFG_LOOPBACK_SHIFT)
320 #define ENET_RXCFG_ENFLOW_SHIFT 5
321 #define ENET_RXCFG_ENFLOW_MASK (1 << ENET_RXCFG_ENFLOW_SHIFT)
322
323 /* Receive Maximum Length register */
324 #define ENET_RXMAXLEN_REG 0x4
325 #define ENET_RXMAXLEN_SHIFT 0
326 #define ENET_RXMAXLEN_MASK (0x7ff << ENET_RXMAXLEN_SHIFT)
327
328 /* Transmit Maximum Length register */
329 #define ENET_TXMAXLEN_REG 0x8
330 #define ENET_TXMAXLEN_SHIFT 0
331 #define ENET_TXMAXLEN_MASK (0x7ff << ENET_TXMAXLEN_SHIFT)
332
333 /* MII Status/Control register */
334 #define ENET_MIISC_REG 0x10
335 #define ENET_MIISC_MDCFREQDIV_SHIFT 0
336 #define ENET_MIISC_MDCFREQDIV_MASK (0x7f << ENET_MIISC_MDCFREQDIV_SHIFT)
337 #define ENET_MIISC_PREAMBLEEN_SHIFT 7
338 #define ENET_MIISC_PREAMBLEEN_MASK (1 << ENET_MIISC_PREAMBLEEN_SHIFT)
339
340 /* MII Data register */
341 #define ENET_MIID_REG 0x14
342 #define ENET_MIID_DATA_SHIFT 0
343 #define ENET_MIID_DATA_MASK (0xffff << ENET_MIID_DATA_SHIFT)
344 #define ENET_MIID_TA_SHIFT 16
345 #define ENET_MIID_TA_MASK (0x3 << ENET_MIID_TA_SHIFT)
346 #define ENET_MIID_REG_SHIFT 18
347 #define ENET_MIID_REG_MASK (0x1f << ENET_MIID_REG_SHIFT)
348 #define ENET_MIID_PHY_SHIFT 23
349 #define ENET_MIID_PHY_MASK (0x1f << ENET_MIID_PHY_SHIFT)
350 #define ENET_MIID_OP_SHIFT 28
351 #define ENET_MIID_OP_WRITE (0x5 << ENET_MIID_OP_SHIFT)
352 #define ENET_MIID_OP_READ (0x6 << ENET_MIID_OP_SHIFT)
353
354 /* Ethernet Interrupt Mask register */
355 #define ENET_IRMASK_REG 0x18
356
357 /* Ethernet Interrupt register */
358 #define ENET_IR_REG 0x1c
359 #define ENET_IR_MII BIT(0)
360 #define ENET_IR_MIB BIT(1)
361 #define ENET_IR_FLOWC BIT(2)
362
363 /* Ethernet Control register */
364 #define ENET_CTL_REG 0x2c
365 #define ENET_CTL_ENABLE_SHIFT 0
366 #define ENET_CTL_ENABLE_MASK (1 << ENET_CTL_ENABLE_SHIFT)
367 #define ENET_CTL_DISABLE_SHIFT 1
368 #define ENET_CTL_DISABLE_MASK (1 << ENET_CTL_DISABLE_SHIFT)
369 #define ENET_CTL_SRESET_SHIFT 2
370 #define ENET_CTL_SRESET_MASK (1 << ENET_CTL_SRESET_SHIFT)
371 #define ENET_CTL_EPHYSEL_SHIFT 3
372 #define ENET_CTL_EPHYSEL_MASK (1 << ENET_CTL_EPHYSEL_SHIFT)
373
374 /* Transmit Control register */
375 #define ENET_TXCTL_REG 0x30
376 #define ENET_TXCTL_FD_SHIFT 0
377 #define ENET_TXCTL_FD_MASK (1 << ENET_TXCTL_FD_SHIFT)
378
379 /* Transmit Watermask register */
380 #define ENET_TXWMARK_REG 0x34
381 #define ENET_TXWMARK_WM_SHIFT 0
382 #define ENET_TXWMARK_WM_MASK (0x3f << ENET_TXWMARK_WM_SHIFT)
383
384 /* MIB Control register */
385 #define ENET_MIBCTL_REG 0x38
386 #define ENET_MIBCTL_RDCLEAR_SHIFT 0
387 #define ENET_MIBCTL_RDCLEAR_MASK (1 << ENET_MIBCTL_RDCLEAR_SHIFT)
388
389 /* Perfect Match Data Low register */
390 #define ENET_PML_REG(x) (0x58 + (x) * 8)
391 #define ENET_PMH_REG(x) (0x5c + (x) * 8)
392 #define ENET_PMH_DATAVALID_SHIFT 16
393 #define ENET_PMH_DATAVALID_MASK (1 << ENET_PMH_DATAVALID_SHIFT)
394
395 /* MIB register */
396 #define ENET_MIB_REG(x) (0x200 + (x) * 4)
397 #define ENET_MIB_REG_COUNT 55
398
399 /*
400 * TX transmit threshold (4 bytes unit), FIFO is 256 bytes, the value
401 * must be low enough so that a DMA transfer of above burst length can
402 * not overflow the fifo
403 */
404 #define ENET_TX_FIFO_TRESH 32
405
406 struct bcm6348_emac {
407 struct bcm6348_iudma *iudma;
408 void __iomem *base;
409
410 struct clk **clock;
411 unsigned int num_clocks;
412
413 struct reset_control **reset;
414 unsigned int num_resets;
415
416 int copybreak;
417
418 int irq_rx;
419 int irq_tx;
420
421 /* hw view of rx & tx dma ring */
422 dma_addr_t rx_desc_dma;
423 dma_addr_t tx_desc_dma;
424
425 /* allocated size (in bytes) for rx & tx dma ring */
426 unsigned int rx_desc_alloc_size;
427 unsigned int tx_desc_alloc_size;
428
429 struct napi_struct napi;
430
431 /* dma channel id for rx */
432 int rx_chan;
433
434 /* number of dma desc in rx ring */
435 int rx_ring_size;
436
437 /* cpu view of rx dma ring */
438 struct bcm6348_iudma_desc *rx_desc_cpu;
439
440 /* current number of armed descriptor given to hardware for rx */
441 int rx_desc_count;
442
443 /* next rx descriptor to fetch from hardware */
444 int rx_curr_desc;
445
446 /* next dirty rx descriptor to refill */
447 int rx_dirty_desc;
448
449 /* size of allocated rx skbs */
450 unsigned int rx_skb_size;
451
452 /* list of skb given to hw for rx */
453 struct sk_buff **rx_skb;
454
455 /* used when rx skb allocation failed, so we defer rx queue
456 * refill */
457 struct timer_list rx_timeout;
458
459 /* lock rx_timeout against rx normal operation */
460 spinlock_t rx_lock;
461
462 /* dma channel id for tx */
463 int tx_chan;
464
465 /* number of dma desc in tx ring */
466 int tx_ring_size;
467
468 /* cpu view of tx dma ring */
469 struct bcm6348_iudma_desc *tx_desc_cpu;
470
471 /* number of available descriptor for tx */
472 int tx_desc_count;
473
474 /* next tx descriptor avaiable */
475 int tx_curr_desc;
476
477 /* next dirty tx descriptor to reclaim */
478 int tx_dirty_desc;
479
480 /* list of skb given to hw for tx */
481 struct sk_buff **tx_skb;
482
483 /* lock used by tx reclaim and xmit */
484 spinlock_t tx_lock;
485
486 /* network device reference */
487 struct net_device *net_dev;
488
489 /* platform device reference */
490 struct platform_device *pdev;
491
492 /* external mii bus */
493 bool ext_mii;
494
495 /* phy */
496 int old_link;
497 int old_duplex;
498 int old_pause;
499 };
500
501 static inline void emac_writel(struct bcm6348_emac *emac, u32 val, u32 off)
502 {
503 __raw_writel(val, emac->base + off);
504 }
505
506 static inline u32 emac_readl(struct bcm6348_emac *emac, u32 off)
507 {
508 return __raw_readl(emac->base + off);
509 }
510
511 /*
512 * refill rx queue
513 */
514 static int bcm6348_emac_refill_rx(struct net_device *ndev)
515 {
516 struct bcm6348_emac *emac = netdev_priv(ndev);
517 struct bcm6348_iudma *iudma = emac->iudma;
518 struct platform_device *pdev = emac->pdev;
519 struct device *dev = &pdev->dev;
520
521 while (emac->rx_desc_count < emac->rx_ring_size) {
522 struct bcm6348_iudma_desc *desc;
523 struct sk_buff *skb;
524 dma_addr_t p;
525 int desc_idx;
526 u32 len_stat;
527
528 desc_idx = emac->rx_dirty_desc;
529 desc = &emac->rx_desc_cpu[desc_idx];
530
531 if (!emac->rx_skb[desc_idx]) {
532 skb = netdev_alloc_skb(ndev, emac->rx_skb_size);
533 if (!skb)
534 break;
535 emac->rx_skb[desc_idx] = skb;
536 p = dma_map_single(dev, skb->data, emac->rx_skb_size,
537 DMA_FROM_DEVICE);
538 desc->address = p;
539 }
540
541 len_stat = emac->rx_skb_size << DMADESC_LENGTH_SHIFT;
542 len_stat |= DMADESC_OWNER_MASK;
543 if (emac->rx_dirty_desc == emac->rx_ring_size - 1) {
544 len_stat |= DMADESC_WRAP_MASK;
545 emac->rx_dirty_desc = 0;
546 } else {
547 emac->rx_dirty_desc++;
548 }
549 wmb();
550 desc->len_stat = len_stat;
551
552 emac->rx_desc_count++;
553
554 /* tell dma engine we allocated one buffer */
555 dma_writel(iudma, 1, DMA_BUFALLOC_REG(emac->rx_chan));
556 }
557
558 /* If rx ring is still empty, set a timer to try allocating
559 * again at a later time. */
560 if (emac->rx_desc_count == 0 && netif_running(ndev)) {
561 dev_warn(dev, "unable to refill rx ring\n");
562 emac->rx_timeout.expires = jiffies + HZ;
563 add_timer(&emac->rx_timeout);
564 }
565
566 return 0;
567 }
568
569 /*
570 * timer callback to defer refill rx queue in case we're OOM
571 */
572 static void bcm6348_emac_refill_rx_timer(struct timer_list *t)
573 {
574 struct bcm6348_emac *emac = from_timer(emac, t, rx_timeout);
575 struct net_device *ndev = emac->net_dev;
576
577 spin_lock(&emac->rx_lock);
578 bcm6348_emac_refill_rx(ndev);
579 spin_unlock(&emac->rx_lock);
580 }
581
582 /*
583 * extract packet from rx queue
584 */
585 static int bcm6348_emac_receive_queue(struct net_device *ndev, int budget)
586 {
587 struct bcm6348_emac *emac = netdev_priv(ndev);
588 struct bcm6348_iudma *iudma = emac->iudma;
589 struct platform_device *pdev = emac->pdev;
590 struct device *dev = &pdev->dev;
591 int processed = 0;
592
593 /* don't scan ring further than number of refilled
594 * descriptor */
595 if (budget > emac->rx_desc_count)
596 budget = emac->rx_desc_count;
597
598 do {
599 struct bcm6348_iudma_desc *desc;
600 struct sk_buff *skb;
601 int desc_idx;
602 u32 len_stat;
603 unsigned int len;
604
605 desc_idx = emac->rx_curr_desc;
606 desc = &emac->rx_desc_cpu[desc_idx];
607
608 /* make sure we actually read the descriptor status at
609 * each loop */
610 rmb();
611
612 len_stat = desc->len_stat;
613
614 /* break if dma ownership belongs to hw */
615 if (len_stat & DMADESC_OWNER_MASK)
616 break;
617
618 processed++;
619 emac->rx_curr_desc++;
620 if (emac->rx_curr_desc == emac->rx_ring_size)
621 emac->rx_curr_desc = 0;
622 emac->rx_desc_count--;
623
624 /* if the packet does not have start of packet _and_
625 * end of packet flag set, then just recycle it */
626 if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) {
627 ndev->stats.rx_dropped++;
628 continue;
629 }
630
631 /* valid packet */
632 skb = emac->rx_skb[desc_idx];
633 len = (len_stat & DMADESC_LENGTH_MASK)
634 >> DMADESC_LENGTH_SHIFT;
635 /* don't include FCS */
636 len -= 4;
637
638 if (len < emac->copybreak) {
639 struct sk_buff *nskb;
640
641 nskb = napi_alloc_skb(&emac->napi, len);
642 if (!nskb) {
643 /* forget packet, just rearm desc */
644 ndev->stats.rx_dropped++;
645 continue;
646 }
647
648 dma_sync_single_for_cpu(dev, desc->address,
649 len, DMA_FROM_DEVICE);
650 memcpy(nskb->data, skb->data, len);
651 dma_sync_single_for_device(dev, desc->address,
652 len, DMA_FROM_DEVICE);
653 skb = nskb;
654 } else {
655 dma_unmap_single(dev, desc->address,
656 emac->rx_skb_size, DMA_FROM_DEVICE);
657 emac->rx_skb[desc_idx] = NULL;
658 }
659
660 skb_put(skb, len);
661 skb->protocol = eth_type_trans(skb, ndev);
662 ndev->stats.rx_packets++;
663 ndev->stats.rx_bytes += len;
664 netif_receive_skb(skb);
665 } while (--budget > 0);
666
667 if (processed || !emac->rx_desc_count) {
668 bcm6348_emac_refill_rx(ndev);
669
670 /* kick rx dma */
671 dmac_writel(iudma, DMAC_CHANCFG_EN_MASK, DMAC_CHANCFG_REG,
672 emac->rx_chan);
673 }
674
675 return processed;
676 }
677
678 /*
679 * try to or force reclaim of transmitted buffers
680 */
681 static int bcm6348_emac_tx_reclaim(struct net_device *ndev, int force)
682 {
683 struct bcm6348_emac *emac = netdev_priv(ndev);
684 struct platform_device *pdev = emac->pdev;
685 struct device *dev = &pdev->dev;
686 int released = 0;
687
688 while (emac->tx_desc_count < emac->tx_ring_size) {
689 struct bcm6348_iudma_desc *desc;
690 struct sk_buff *skb;
691
692 /* We run in a bh and fight against start_xmit, which
693 * is called with bh disabled */
694 spin_lock(&emac->tx_lock);
695
696 desc = &emac->tx_desc_cpu[emac->tx_dirty_desc];
697
698 if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) {
699 spin_unlock(&emac->tx_lock);
700 break;
701 }
702
703 /* ensure other field of the descriptor were not read
704 * before we checked ownership */
705 rmb();
706
707 skb = emac->tx_skb[emac->tx_dirty_desc];
708 emac->tx_skb[emac->tx_dirty_desc] = NULL;
709 dma_unmap_single(dev, desc->address, skb->len, DMA_TO_DEVICE);
710
711 emac->tx_dirty_desc++;
712 if (emac->tx_dirty_desc == emac->tx_ring_size)
713 emac->tx_dirty_desc = 0;
714 emac->tx_desc_count++;
715
716 spin_unlock(&emac->tx_lock);
717
718 if (desc->len_stat & DMADESC_UNDER_MASK)
719 ndev->stats.tx_errors++;
720
721 dev_kfree_skb(skb);
722 released++;
723 }
724
725 if (netif_queue_stopped(ndev) && released)
726 netif_wake_queue(ndev);
727
728 return released;
729 }
730
731 static int bcm6348_emac_poll(struct napi_struct *napi, int budget)
732 {
733 struct bcm6348_emac *emac = container_of(napi, struct bcm6348_emac,
734 napi);
735 struct bcm6348_iudma *iudma = emac->iudma;
736 struct net_device *ndev = emac->net_dev;
737 int rx_work_done;
738
739 /* ack interrupts */
740 dmac_writel(iudma, DMAC_IR_PKTDONE_MASK, DMAC_IR_REG,
741 emac->rx_chan);
742 dmac_writel(iudma, DMAC_IR_PKTDONE_MASK, DMAC_IR_REG,
743 emac->tx_chan);
744
745 /* reclaim sent skb */
746 bcm6348_emac_tx_reclaim(ndev, 0);
747
748 spin_lock(&emac->rx_lock);
749 rx_work_done = bcm6348_emac_receive_queue(ndev, budget);
750 spin_unlock(&emac->rx_lock);
751
752 if (rx_work_done >= budget) {
753 /* rx queue is not yet empty/clean */
754 return rx_work_done;
755 }
756
757 /* no more packet in rx/tx queue, remove device from poll
758 * queue */
759 napi_complete_done(napi, rx_work_done);
760
761 /* restore rx/tx interrupt */
762 dmac_writel(iudma, DMAC_IR_PKTDONE_MASK, DMAC_IRMASK_REG,
763 emac->rx_chan);
764 dmac_writel(iudma, DMAC_IR_PKTDONE_MASK, DMAC_IRMASK_REG,
765 emac->tx_chan);
766
767 return rx_work_done;
768 }
769
770 /*
771 * emac interrupt handler
772 */
773 static irqreturn_t bcm6348_emac_isr_mac(int irq, void *dev_id)
774 {
775 struct net_device *ndev = dev_id;
776 struct bcm6348_emac *emac = netdev_priv(ndev);
777 u32 stat;
778
779 stat = emac_readl(emac, ENET_IR_REG);
780 if (!(stat & ENET_IR_MIB))
781 return IRQ_NONE;
782
783 /* clear & mask interrupt */
784 emac_writel(emac, ENET_IR_MIB, ENET_IR_REG);
785 emac_writel(emac, 0, ENET_IRMASK_REG);
786
787 return IRQ_HANDLED;
788 }
789
790 /*
791 * rx/tx dma interrupt handler
792 */
793 static irqreturn_t bcm6348_emac_isr_dma(int irq, void *dev_id)
794 {
795 struct net_device *ndev = dev_id;
796 struct bcm6348_emac *emac = netdev_priv(ndev);
797 struct bcm6348_iudma *iudma = emac->iudma;
798
799 /* mask rx/tx interrupts */
800 dmac_writel(iudma, 0, DMAC_IRMASK_REG, emac->rx_chan);
801 dmac_writel(iudma, 0, DMAC_IRMASK_REG, emac->tx_chan);
802
803 napi_schedule(&emac->napi);
804
805 return IRQ_HANDLED;
806 }
807
808 /*
809 * tx request callback
810 */
811 static netdev_tx_t bcm6348_emac_start_xmit(struct sk_buff *skb,
812 struct net_device *ndev)
813 {
814 struct bcm6348_emac *emac = netdev_priv(ndev);
815 struct bcm6348_iudma *iudma = emac->iudma;
816 struct platform_device *pdev = emac->pdev;
817 struct device *dev = &pdev->dev;
818 struct bcm6348_iudma_desc *desc;
819 u32 len_stat;
820 netdev_tx_t ret;
821
822 /* lock against tx reclaim */
823 spin_lock(&emac->tx_lock);
824
825 /* make sure the tx hw queue is not full, should not happen
826 * since we stop queue before it's the case */
827 if (unlikely(!emac->tx_desc_count)) {
828 netif_stop_queue(ndev);
829 dev_err(dev, "xmit called with no tx desc available?\n");
830 ret = NETDEV_TX_BUSY;
831 goto out_unlock;
832 }
833
834 /* point to the next available desc */
835 desc = &emac->tx_desc_cpu[emac->tx_curr_desc];
836 emac->tx_skb[emac->tx_curr_desc] = skb;
837
838 /* fill descriptor */
839 desc->address = dma_map_single(dev, skb->data, skb->len,
840 DMA_TO_DEVICE);
841
842 len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
843 len_stat |= DMADESC_ESOP_MASK | DMADESC_APPEND_CRC |
844 DMADESC_OWNER_MASK;
845
846 emac->tx_curr_desc++;
847 if (emac->tx_curr_desc == emac->tx_ring_size) {
848 emac->tx_curr_desc = 0;
849 len_stat |= DMADESC_WRAP_MASK;
850 }
851 emac->tx_desc_count--;
852
853 /* dma might be already polling, make sure we update desc
854 * fields in correct order */
855 wmb();
856 desc->len_stat = len_stat;
857 wmb();
858
859 /* kick tx dma */
860 dmac_writel(iudma, DMAC_CHANCFG_EN_MASK, DMAC_CHANCFG_REG,
861 emac->tx_chan);
862
863 /* stop queue if no more desc available */
864 if (!emac->tx_desc_count)
865 netif_stop_queue(ndev);
866
867 ndev->stats.tx_bytes += skb->len;
868 ndev->stats.tx_packets++;
869 ret = NETDEV_TX_OK;
870
871 out_unlock:
872 spin_unlock(&emac->tx_lock);
873 return ret;
874 }
875
876 /*
877 * Change the interface's emac address.
878 */
879 static int bcm6348_emac_set_mac_address(struct net_device *ndev, void *p)
880 {
881 struct bcm6348_emac *emac = netdev_priv(ndev);
882 struct sockaddr *addr = p;
883 u32 val;
884
885 eth_hw_addr_set(ndev, addr->sa_data);
886
887 /* use perfect match register 0 to store my emac address */
888 val = (ndev->dev_addr[2] << 24) | (ndev->dev_addr[3] << 16) |
889 (ndev->dev_addr[4] << 8) | ndev->dev_addr[5];
890 emac_writel(emac, val, ENET_PML_REG(0));
891
892 val = (ndev->dev_addr[0] << 8 | ndev->dev_addr[1]);
893 val |= ENET_PMH_DATAVALID_MASK;
894 emac_writel(emac, val, ENET_PMH_REG(0));
895
896 return 0;
897 }
898
899 /*
900 * Change rx mode (promiscuous/allmulti) and update multicast list
901 */
902 static void bcm6348_emac_set_multicast_list(struct net_device *ndev)
903 {
904 struct bcm6348_emac *emac = netdev_priv(ndev);
905 struct netdev_hw_addr *ha;
906 u32 val;
907 unsigned int i;
908
909 val = emac_readl(emac, ENET_RXCFG_REG);
910
911 if (ndev->flags & IFF_PROMISC)
912 val |= ENET_RXCFG_PROMISC_MASK;
913 else
914 val &= ~ENET_RXCFG_PROMISC_MASK;
915
916 /* only 3 perfect match registers left, first one is used for
917 * own mac address */
918 if ((ndev->flags & IFF_ALLMULTI) || netdev_mc_count(ndev) > 3)
919 val |= ENET_RXCFG_ALLMCAST_MASK;
920 else
921 val &= ~ENET_RXCFG_ALLMCAST_MASK;
922
923 /* no need to set perfect match registers if we catch all
924 * multicast */
925 if (val & ENET_RXCFG_ALLMCAST_MASK) {
926 emac_writel(emac, val, ENET_RXCFG_REG);
927 return;
928 }
929
930 i = 0;
931 netdev_for_each_mc_addr(ha, ndev) {
932 u8 *dmi_addr;
933 u32 tmp;
934
935 if (i == 3)
936 break;
937
938 /* update perfect match registers */
939 dmi_addr = ha->addr;
940 tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
941 (dmi_addr[4] << 8) | dmi_addr[5];
942 emac_writel(emac, tmp, ENET_PML_REG(i + 1));
943
944 tmp = (dmi_addr[0] << 8 | dmi_addr[1]);
945 tmp |= ENET_PMH_DATAVALID_MASK;
946 emac_writel(emac, tmp, ENET_PMH_REG(i++ + 1));
947 }
948
949 for (; i < 3; i++) {
950 emac_writel(emac, 0, ENET_PML_REG(i + 1));
951 emac_writel(emac, 0, ENET_PMH_REG(i + 1));
952 }
953
954 emac_writel(emac, val, ENET_RXCFG_REG);
955 }
956
957 /*
958 * disable emac
959 */
960 static void bcm6348_emac_disable_mac(struct bcm6348_emac *emac)
961 {
962 int limit;
963 u32 val;
964
965 val = emac_readl(emac, ENET_CTL_REG);
966 val |= ENET_CTL_DISABLE_MASK;
967 emac_writel(emac, val, ENET_CTL_REG);
968
969 limit = 1000;
970 do {
971 val = emac_readl(emac, ENET_CTL_REG);
972 if (!(val & ENET_CTL_DISABLE_MASK))
973 break;
974 udelay(1);
975 } while (limit--);
976 }
977
978 /*
979 * set emac duplex parameters
980 */
981 static void bcm6348_emac_set_duplex(struct bcm6348_emac *emac, int fullduplex)
982 {
983 u32 val;
984
985 val = emac_readl(emac, ENET_TXCTL_REG);
986 if (fullduplex)
987 val |= ENET_TXCTL_FD_MASK;
988 else
989 val &= ~ENET_TXCTL_FD_MASK;
990 emac_writel(emac, val, ENET_TXCTL_REG);
991 }
992
993 /*
994 * set emac flow control parameters
995 */
996 static void bcm6348_emac_set_flow(struct bcm6348_emac *emac, bool rx_en, bool tx_en)
997 {
998 struct bcm6348_iudma *iudma = emac->iudma;
999 u32 val;
1000
1001 val = emac_readl(emac, ENET_RXCFG_REG);
1002 if (rx_en)
1003 val |= ENET_RXCFG_ENFLOW_MASK;
1004 else
1005 val &= ~ENET_RXCFG_ENFLOW_MASK;
1006 emac_writel(emac, val, ENET_RXCFG_REG);
1007
1008 dmas_writel(iudma, emac->rx_desc_dma, DMAS_RSTART_REG, emac->rx_chan);
1009 dmas_writel(iudma, emac->tx_desc_dma, DMAS_RSTART_REG, emac->tx_chan);
1010
1011 val = dma_readl(iudma, DMA_CFG_REG);
1012 if (tx_en)
1013 val |= DMA_CFG_FLOWCH_MASK(emac->rx_chan);
1014 else
1015 val &= ~DMA_CFG_FLOWCH_MASK(emac->rx_chan);
1016 dma_writel(iudma, val, DMA_CFG_REG);
1017 }
1018
1019 /*
1020 * adjust emac phy
1021 */
1022 static void bcm6348_emac_adjust_phy(struct net_device *ndev)
1023 {
1024 struct phy_device *phydev = ndev->phydev;
1025 struct bcm6348_emac *emac = netdev_priv(ndev);
1026 struct platform_device *pdev = emac->pdev;
1027 struct device *dev = &pdev->dev;
1028 bool status_changed = false;
1029
1030 if (emac->old_link != phydev->link) {
1031 status_changed = true;
1032 emac->old_link = phydev->link;
1033 }
1034
1035 if (phydev->link && phydev->duplex != emac->old_duplex) {
1036 bcm6348_emac_set_duplex(emac, phydev->duplex == DUPLEX_FULL);
1037 status_changed = true;
1038 emac->old_duplex = phydev->duplex;
1039 }
1040
1041 if (phydev->link && phydev->pause != emac->old_pause) {
1042 bool rx_pause_en, tx_pause_en;
1043
1044 if (phydev->pause) {
1045 rx_pause_en = true;
1046 tx_pause_en = true;
1047 } else {
1048 rx_pause_en = false;
1049 tx_pause_en = false;
1050 }
1051
1052 bcm6348_emac_set_flow(emac, rx_pause_en, tx_pause_en);
1053 status_changed = true;
1054 emac->old_pause = phydev->pause;
1055 }
1056
1057 if (status_changed)
1058 dev_info(dev, "%s: phy link %s %s/%s/%s/%s\n",
1059 ndev->name,
1060 phydev->link ? "UP" : "DOWN",
1061 phy_modes(phydev->interface),
1062 phy_speed_to_str(phydev->speed),
1063 phy_duplex_to_str(phydev->duplex),
1064 phydev->pause ? "rx/tx" : "off");
1065 }
1066
1067
1068 static int bcm6348_emac_open(struct net_device *ndev)
1069 {
1070 struct bcm6348_emac *emac = netdev_priv(ndev);
1071 struct bcm6348_iudma *iudma = emac->iudma;
1072 struct platform_device *pdev = emac->pdev;
1073 struct device *dev = &pdev->dev;
1074 struct sockaddr addr;
1075 unsigned int i, size;
1076 int ret;
1077 void *p;
1078 u32 val;
1079
1080 /* mask all interrupts and request them */
1081 emac_writel(emac, 0, ENET_IRMASK_REG);
1082 dmac_writel(iudma, 0, DMAC_IRMASK_REG, emac->rx_chan);
1083 dmac_writel(iudma, 0, DMAC_IRMASK_REG, emac->tx_chan);
1084
1085 ret = request_irq(ndev->irq, bcm6348_emac_isr_mac, 0, ndev->name,
1086 ndev);
1087 if (ret)
1088 return ret;
1089
1090 ret = request_irq(emac->irq_rx, bcm6348_emac_isr_dma,
1091 0, ndev->name, ndev);
1092 if (ret)
1093 goto out_freeirq;
1094
1095 ret = request_irq(emac->irq_tx, bcm6348_emac_isr_dma,
1096 0, ndev->name, ndev);
1097 if (ret)
1098 goto out_freeirq_rx;
1099
1100 /* initialize perfect match registers */
1101 for (i = 0; i < 4; i++) {
1102 emac_writel(emac, 0, ENET_PML_REG(i));
1103 emac_writel(emac, 0, ENET_PMH_REG(i));
1104 }
1105
1106 /* write device mac address */
1107 memcpy(addr.sa_data, ndev->dev_addr, ETH_ALEN);
1108 bcm6348_emac_set_mac_address(ndev, &addr);
1109
1110 /* allocate rx dma ring */
1111 size = emac->rx_ring_size * sizeof(struct bcm6348_iudma_desc);
1112 p = dma_alloc_coherent(dev, size, &emac->rx_desc_dma, GFP_KERNEL);
1113 if (!p) {
1114 dev_err(dev, "cannot allocate rx ring %u\n", size);
1115 ret = -ENOMEM;
1116 goto out_freeirq_tx;
1117 }
1118
1119 memset(p, 0, size);
1120 emac->rx_desc_alloc_size = size;
1121 emac->rx_desc_cpu = p;
1122
1123 /* allocate tx dma ring */
1124 size = emac->tx_ring_size * sizeof(struct bcm6348_iudma_desc);
1125 p = dma_alloc_coherent(dev, size, &emac->tx_desc_dma, GFP_KERNEL);
1126 if (!p) {
1127 dev_err(dev, "cannot allocate tx ring\n");
1128 ret = -ENOMEM;
1129 goto out_free_rx_ring;
1130 }
1131
1132 memset(p, 0, size);
1133 emac->tx_desc_alloc_size = size;
1134 emac->tx_desc_cpu = p;
1135
1136 emac->tx_skb = kzalloc(sizeof(struct sk_buff *) * emac->tx_ring_size,
1137 GFP_KERNEL);
1138 if (!emac->tx_skb) {
1139 dev_err(dev, "cannot allocate rx skb queue\n");
1140 ret = -ENOMEM;
1141 goto out_free_tx_ring;
1142 }
1143
1144 emac->tx_desc_count = emac->tx_ring_size;
1145 emac->tx_dirty_desc = 0;
1146 emac->tx_curr_desc = 0;
1147 spin_lock_init(&emac->tx_lock);
1148
1149 /* init & fill rx ring with skbs */
1150 emac->rx_skb = kzalloc(sizeof(struct sk_buff *) * emac->rx_ring_size,
1151 GFP_KERNEL);
1152 if (!emac->rx_skb) {
1153 dev_err(dev, "cannot allocate rx skb queue\n");
1154 ret = -ENOMEM;
1155 goto out_free_tx_skb;
1156 }
1157
1158 emac->rx_desc_count = 0;
1159 emac->rx_dirty_desc = 0;
1160 emac->rx_curr_desc = 0;
1161
1162 /* initialize flow control buffer allocation */
1163 dma_writel(iudma, DMA_BUFALLOC_FORCE_MASK | 0,
1164 DMA_BUFALLOC_REG(emac->rx_chan));
1165
1166 if (bcm6348_emac_refill_rx(ndev)) {
1167 dev_err(dev, "cannot allocate rx skb queue\n");
1168 ret = -ENOMEM;
1169 goto out;
1170 }
1171
1172 /* write rx & tx ring addresses */
1173 dmas_writel(iudma, emac->rx_desc_dma,
1174 DMAS_RSTART_REG, emac->rx_chan);
1175 dmas_writel(iudma, emac->tx_desc_dma,
1176 DMAS_RSTART_REG, emac->tx_chan);
1177
1178 /* clear remaining state ram for rx & tx channel */
1179 dmas_writel(iudma, 0, DMAS_SRAM2_REG, emac->rx_chan);
1180 dmas_writel(iudma, 0, DMAS_SRAM2_REG, emac->tx_chan);
1181 dmas_writel(iudma, 0, DMAS_SRAM3_REG, emac->rx_chan);
1182 dmas_writel(iudma, 0, DMAS_SRAM3_REG, emac->tx_chan);
1183 dmas_writel(iudma, 0, DMAS_SRAM4_REG, emac->rx_chan);
1184 dmas_writel(iudma, 0, DMAS_SRAM4_REG, emac->tx_chan);
1185
1186 /* set max rx/tx length */
1187 emac_writel(emac, ndev->mtu, ENET_RXMAXLEN_REG);
1188 emac_writel(emac, ndev->mtu, ENET_TXMAXLEN_REG);
1189
1190 /* set dma maximum burst len */
1191 dmac_writel(iudma, ENET_DMA_MAXBURST,
1192 DMAC_MAXBURST_REG, emac->rx_chan);
1193 dmac_writel(iudma, ENET_DMA_MAXBURST,
1194 DMAC_MAXBURST_REG, emac->tx_chan);
1195
1196 /* set correct transmit fifo watermark */
1197 emac_writel(emac, ENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
1198
1199 /* set flow control low/high threshold to 1/3 / 2/3 */
1200 val = emac->rx_ring_size / 3;
1201 dma_writel(iudma, val, DMA_FLOWCL_REG(emac->rx_chan));
1202 val = (emac->rx_ring_size * 2) / 3;
1203 dma_writel(iudma, val, DMA_FLOWCH_REG(emac->rx_chan));
1204
1205 /* all set, enable emac and interrupts, start dma engine and
1206 * kick rx dma channel
1207 */
1208 wmb();
1209 val = emac_readl(emac, ENET_CTL_REG);
1210 val |= ENET_CTL_ENABLE_MASK;
1211 emac_writel(emac, val, ENET_CTL_REG);
1212 dmac_writel(iudma, DMAC_CHANCFG_EN_MASK,
1213 DMAC_CHANCFG_REG, emac->rx_chan);
1214
1215 /* watch "mib counters about to overflow" interrupt */
1216 emac_writel(emac, ENET_IR_MIB, ENET_IR_REG);
1217 emac_writel(emac, ENET_IR_MIB, ENET_IRMASK_REG);
1218
1219 /* watch "packet transferred" interrupt in rx and tx */
1220 dmac_writel(iudma, DMAC_IR_PKTDONE_MASK,
1221 DMAC_IR_REG, emac->rx_chan);
1222 dmac_writel(iudma, DMAC_IR_PKTDONE_MASK,
1223 DMAC_IR_REG, emac->tx_chan);
1224
1225 /* make sure we enable napi before rx interrupt */
1226 napi_enable(&emac->napi);
1227
1228 dmac_writel(iudma, DMAC_IR_PKTDONE_MASK,
1229 DMAC_IRMASK_REG, emac->rx_chan);
1230 dmac_writel(iudma, DMAC_IR_PKTDONE_MASK,
1231 DMAC_IRMASK_REG, emac->tx_chan);
1232
1233 if (ndev->phydev)
1234 phy_start(ndev->phydev);
1235
1236 netif_carrier_on(ndev);
1237 netif_start_queue(ndev);
1238
1239 return 0;
1240
1241 out:
1242 for (i = 0; i < emac->rx_ring_size; i++) {
1243 struct bcm6348_iudma_desc *desc;
1244
1245 if (!emac->rx_skb[i])
1246 continue;
1247
1248 desc = &emac->rx_desc_cpu[i];
1249 dma_unmap_single(dev, desc->address, emac->rx_skb_size,
1250 DMA_FROM_DEVICE);
1251 kfree_skb(emac->rx_skb[i]);
1252 }
1253 kfree(emac->rx_skb);
1254
1255 out_free_tx_skb:
1256 kfree(emac->tx_skb);
1257
1258 out_free_tx_ring:
1259 dma_free_coherent(dev, emac->tx_desc_alloc_size,
1260 emac->tx_desc_cpu, emac->tx_desc_dma);
1261
1262 out_free_rx_ring:
1263 dma_free_coherent(dev, emac->rx_desc_alloc_size,
1264 emac->rx_desc_cpu, emac->rx_desc_dma);
1265
1266 out_freeirq_tx:
1267 if (emac->irq_tx != -1)
1268 free_irq(emac->irq_tx, ndev);
1269
1270 out_freeirq_rx:
1271 free_irq(emac->irq_rx, ndev);
1272
1273 out_freeirq:
1274 if (ndev->phydev)
1275 phy_disconnect(ndev->phydev);
1276
1277 return ret;
1278 }
1279
1280 static int bcm6348_emac_stop(struct net_device *ndev)
1281 {
1282 struct bcm6348_emac *emac = netdev_priv(ndev);
1283 struct bcm6348_iudma *iudma = emac->iudma;
1284 struct device *dev = &emac->pdev->dev;
1285 unsigned int i;
1286
1287 netif_stop_queue(ndev);
1288 napi_disable(&emac->napi);
1289 if (ndev->phydev)
1290 phy_stop(ndev->phydev);
1291 del_timer_sync(&emac->rx_timeout);
1292
1293 /* mask all interrupts */
1294 emac_writel(emac, 0, ENET_IRMASK_REG);
1295 dmac_writel(iudma, 0, DMAC_IRMASK_REG, emac->rx_chan);
1296 dmac_writel(iudma, 0, DMAC_IRMASK_REG, emac->tx_chan);
1297
1298 /* disable dma & emac */
1299 bcm6348_iudma_chan_stop(iudma, emac->tx_chan);
1300 bcm6348_iudma_chan_stop(iudma, emac->rx_chan);
1301 bcm6348_emac_disable_mac(emac);
1302
1303 /* force reclaim of all tx buffers */
1304 bcm6348_emac_tx_reclaim(ndev, 1);
1305
1306 /* free the rx skb ring */
1307 for (i = 0; i < emac->rx_ring_size; i++) {
1308 struct bcm6348_iudma_desc *desc;
1309
1310 if (!emac->rx_skb[i])
1311 continue;
1312
1313 desc = &emac->rx_desc_cpu[i];
1314 dma_unmap_single_attrs(dev, desc->address, emac->rx_skb_size,
1315 DMA_FROM_DEVICE,
1316 DMA_ATTR_SKIP_CPU_SYNC);
1317 kfree_skb(emac->rx_skb[i]);
1318 }
1319
1320 /* free remaining allocated memory */
1321 kfree(emac->rx_skb);
1322 kfree(emac->tx_skb);
1323 dma_free_coherent(dev, emac->rx_desc_alloc_size, emac->rx_desc_cpu,
1324 emac->rx_desc_dma);
1325 dma_free_coherent(dev, emac->tx_desc_alloc_size, emac->tx_desc_cpu,
1326 emac->tx_desc_dma);
1327 free_irq(emac->irq_tx, ndev);
1328 free_irq(emac->irq_rx, ndev);
1329 free_irq(ndev->irq, ndev);
1330
1331 netdev_reset_queue(ndev);
1332
1333 return 0;
1334 }
1335
1336 static const struct net_device_ops bcm6348_emac_ops = {
1337 .ndo_open = bcm6348_emac_open,
1338 .ndo_stop = bcm6348_emac_stop,
1339 .ndo_start_xmit = bcm6348_emac_start_xmit,
1340 .ndo_set_mac_address = bcm6348_emac_set_mac_address,
1341 .ndo_set_rx_mode = bcm6348_emac_set_multicast_list,
1342 };
1343
1344 static int bcm6348_emac_mdio_op(struct bcm6348_emac *emac, uint32_t data)
1345 {
1346 int limit;
1347
1348 /* Make sure mii interrupt status is cleared */
1349 emac_writel(emac, ENET_IR_MII, ENET_IR_REG);
1350
1351 /* Issue mii op */
1352 emac_writel(emac, data, ENET_MIID_REG);
1353 wmb();
1354
1355 /* busy wait on mii interrupt bit, with timeout */
1356 limit = 1000;
1357 do {
1358 if (emac_readl(emac, ENET_IR_REG) & ENET_IR_MII)
1359 break;
1360 udelay(1);
1361 } while (limit-- > 0);
1362
1363 return (limit < 0) ? 1 : 0;
1364 }
1365
1366 static int bcm6348_emac_mdio_read(struct mii_bus *bus, int phy_id, int loc)
1367 {
1368 struct bcm6348_emac *emac = bus->priv;
1369 struct platform_device *pdev = emac->pdev;
1370 struct device *dev = &pdev->dev;
1371 uint32_t reg;
1372
1373 reg = 0x2 << ENET_MIID_TA_SHIFT;
1374 reg |= loc << ENET_MIID_REG_SHIFT;
1375 reg |= phy_id << ENET_MIID_PHY_SHIFT;
1376 reg |= ENET_MIID_OP_READ;
1377
1378 if (bcm6348_emac_mdio_op(emac, reg)) {
1379 dev_err(dev, "mdio_read: phy=%d loc=%x timeout!\n",
1380 phy_id, loc);
1381 return -EINVAL;
1382 }
1383
1384 reg = emac_readl(emac, ENET_MIID_REG);
1385 reg = (reg >> ENET_MIID_DATA_SHIFT) & ENET_MIID_DATA_MASK;
1386
1387 return (int) reg;
1388 }
1389
1390 static int bcm6348_emac_mdio_write(struct mii_bus *bus, int phy_id,
1391 int loc, uint16_t val)
1392 {
1393 struct bcm6348_emac *emac = bus->priv;
1394 struct platform_device *pdev = emac->pdev;
1395 struct device *dev = &pdev->dev;
1396 uint32_t reg;
1397
1398 reg = (val << ENET_MIID_DATA_SHIFT) & ENET_MIID_DATA_MASK;
1399 reg |= 0x2 << ENET_MIID_TA_SHIFT;
1400 reg |= loc << ENET_MIID_REG_SHIFT;
1401 reg |= phy_id << ENET_MIID_PHY_SHIFT;
1402 reg |= ENET_MIID_OP_WRITE;
1403
1404 if (bcm6348_emac_mdio_op(emac, reg)) {
1405 dev_err(dev, "mdio_write: phy=%d loc=%x timeout!\n",
1406 phy_id, loc);
1407 return -EINVAL;
1408 }
1409
1410 bcm6348_emac_mdio_op(emac, reg);
1411
1412 return 0;
1413 }
1414
1415 static int bcm6348_emac_mdio_init(struct bcm6348_emac *emac,
1416 struct device_node *np)
1417 {
1418 struct platform_device *pdev = emac->pdev;
1419 struct device *dev = &pdev->dev;
1420 struct device_node *mnp;
1421 struct mii_bus *mii_bus;
1422 int ret;
1423
1424 mnp = of_get_child_by_name(np, "mdio");
1425 if (!mnp)
1426 return -ENODEV;
1427
1428 mii_bus = devm_mdiobus_alloc(dev);
1429 if (!mii_bus) {
1430 of_node_put(mnp);
1431 return -ENOMEM;
1432 }
1433
1434 mii_bus->priv = emac;
1435 mii_bus->name = np->full_name;
1436 snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(dev));
1437 mii_bus->parent = dev;
1438 mii_bus->read = bcm6348_emac_mdio_read;
1439 mii_bus->write = bcm6348_emac_mdio_write;
1440 mii_bus->phy_mask = 0x3f;
1441
1442 ret = devm_of_mdiobus_register(dev, mii_bus, mnp);
1443 of_node_put(mnp);
1444 if (ret) {
1445 dev_err(dev, "MDIO bus registration failed\n");
1446 return ret;
1447 }
1448
1449 dev_info(dev, "MDIO bus init\n");
1450
1451 return 0;
1452 }
1453
1454 /*
1455 * preinit hardware to allow mii operation while device is down
1456 */
1457 static void bcm6348_emac_hw_preinit(struct bcm6348_emac *emac)
1458 {
1459 u32 val;
1460 int limit;
1461
1462 /* make sure emac is disabled */
1463 bcm6348_emac_disable_mac(emac);
1464
1465 /* soft reset emac */
1466 val = ENET_CTL_SRESET_MASK;
1467 emac_writel(emac, val, ENET_CTL_REG);
1468 wmb();
1469
1470 limit = 1000;
1471 do {
1472 val = emac_readl(emac, ENET_CTL_REG);
1473 if (!(val & ENET_CTL_SRESET_MASK))
1474 break;
1475 udelay(1);
1476 } while (limit--);
1477
1478 /* select correct mii interface */
1479 val = emac_readl(emac, ENET_CTL_REG);
1480 if (emac->ext_mii)
1481 val |= ENET_CTL_EPHYSEL_MASK;
1482 else
1483 val &= ~ENET_CTL_EPHYSEL_MASK;
1484 emac_writel(emac, val, ENET_CTL_REG);
1485
1486 /* turn on mdc clock */
1487 emac_writel(emac, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) |
1488 ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG);
1489
1490 /* set mib counters to self-clear when read */
1491 val = emac_readl(emac, ENET_MIBCTL_REG);
1492 val |= ENET_MIBCTL_RDCLEAR_MASK;
1493 emac_writel(emac, val, ENET_MIBCTL_REG);
1494 }
1495
1496 static int bcm6348_emac_probe(struct platform_device *pdev)
1497 {
1498 struct device *dev = &pdev->dev;
1499 struct device_node *node = dev->of_node;
1500 struct device_node *dma_node;
1501 struct platform_device *dma_pdev;
1502 struct bcm6348_emac *emac;
1503 struct bcm6348_iudma *iudma;
1504 struct net_device *ndev;
1505 unsigned i;
1506 int num_resets;
1507 int ret;
1508
1509 dma_node = of_parse_phandle(node, "brcm,iudma", 0);
1510 if (!dma_node)
1511 return -EINVAL;
1512
1513 dma_pdev = of_find_device_by_node(dma_node);
1514 of_node_put(dma_node);
1515 if (!dma_pdev)
1516 return -EINVAL;
1517
1518 iudma = platform_get_drvdata(dma_pdev);
1519 if (!iudma)
1520 return -EPROBE_DEFER;
1521
1522 ndev = devm_alloc_etherdev(dev, sizeof(*emac));
1523 if (!ndev)
1524 return -ENOMEM;
1525
1526 platform_set_drvdata(pdev, ndev);
1527 SET_NETDEV_DEV(ndev, dev);
1528
1529 emac = netdev_priv(ndev);
1530 emac->iudma = iudma;
1531 emac->pdev = pdev;
1532 emac->net_dev = ndev;
1533
1534 emac->base = devm_platform_ioremap_resource(pdev, 0);
1535 if (IS_ERR_OR_NULL(emac->base))
1536 return PTR_ERR(emac->base);
1537
1538 ndev->irq = of_irq_get_byname(node, "emac");
1539 if (!ndev->irq)
1540 return -ENODEV;
1541
1542 emac->irq_rx = of_irq_get_byname(node, "rx");
1543 if (!emac->irq_rx)
1544 return -ENODEV;
1545
1546 emac->irq_tx = of_irq_get_byname(node, "tx");
1547 if (!emac->irq_tx)
1548 return -ENODEV;
1549
1550 if (of_property_read_u32(node, "dma-rx", &emac->rx_chan))
1551 return -ENODEV;
1552
1553 if (of_property_read_u32(node, "dma-tx", &emac->tx_chan))
1554 return -ENODEV;
1555
1556 emac->ext_mii = of_property_read_bool(node, "brcm,external-mii");
1557
1558 emac->rx_ring_size = ENET_DEF_RX_DESC;
1559 emac->tx_ring_size = ENET_DEF_TX_DESC;
1560 emac->copybreak = ENET_DEF_CPY_BREAK;
1561
1562 emac->old_link = 0;
1563 emac->old_duplex = -1;
1564 emac->old_pause = -1;
1565
1566 of_get_mac_address(node, ndev->dev_addr);
1567 if (is_valid_ether_addr(ndev->dev_addr)) {
1568 dev_info(dev, "mtd mac %pM\n", ndev->dev_addr);
1569 } else {
1570 random_ether_addr(ndev->dev_addr);
1571 dev_info(dev, "random mac %pM\n", ndev->dev_addr);
1572 }
1573
1574 emac->rx_skb_size = ALIGN(ndev->mtu + ENET_MTU_OVERHEAD,
1575 ENET_DMA_MAXBURST * 4);
1576
1577 emac->num_clocks = of_clk_get_parent_count(node);
1578 if (emac->num_clocks) {
1579 emac->clock = devm_kcalloc(dev, emac->num_clocks,
1580 sizeof(struct clk *), GFP_KERNEL);
1581 if (IS_ERR_OR_NULL(emac->clock))
1582 return PTR_ERR(emac->clock);
1583 }
1584 for (i = 0; i < emac->num_clocks; i++) {
1585 emac->clock[i] = of_clk_get(node, i);
1586 if (IS_ERR_OR_NULL(emac->clock[i])) {
1587 dev_err(dev, "error getting emac clock %d\n", i);
1588 return PTR_ERR(emac->clock[i]);
1589 }
1590
1591 ret = clk_prepare_enable(emac->clock[i]);
1592 if (ret) {
1593 dev_err(dev, "error enabling emac clock %d\n", i);
1594 return ret;
1595 }
1596 }
1597
1598 num_resets = of_count_phandle_with_args(node, "resets",
1599 "#reset-cells");
1600 if (num_resets > 0)
1601 emac->num_resets = num_resets;
1602 else
1603 emac->num_resets = 0;
1604 if (emac->num_resets) {
1605 emac->reset = devm_kcalloc(dev, emac->num_resets,
1606 sizeof(struct reset_control *),
1607 GFP_KERNEL);
1608 if (IS_ERR_OR_NULL(emac->reset))
1609 return PTR_ERR(emac->reset);
1610
1611 }
1612 for (i = 0; i < emac->num_resets; i++) {
1613 emac->reset[i] = devm_reset_control_get_by_index(dev, i);
1614 if (IS_ERR_OR_NULL(emac->reset[i])) {
1615 dev_err(dev, "error getting emac reset %d\n", i);
1616 return PTR_ERR(emac->reset[i]);
1617 }
1618
1619 ret = reset_control_reset(emac->reset[i]);
1620 if (ret) {
1621 dev_err(dev, "error performing emac reset %d\n", i);
1622 return ret;
1623 }
1624 }
1625
1626 /* do minimal hardware init to be able to probe mii bus */
1627 bcm6348_emac_hw_preinit(emac);
1628
1629 ret = bcm6348_emac_mdio_init(emac, node);
1630 if (ret)
1631 return ret;
1632
1633 spin_lock_init(&emac->rx_lock);
1634
1635 timer_setup(&emac->rx_timeout, bcm6348_emac_refill_rx_timer, 0);
1636
1637 /* zero mib counters */
1638 for (i = 0; i < ENET_MIB_REG_COUNT; i++)
1639 emac_writel(emac, 0, ENET_MIB_REG(i));
1640
1641 /* register netdevice */
1642 ndev->netdev_ops = &bcm6348_emac_ops;
1643 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
1644 ndev->mtu = ETH_DATA_LEN - VLAN_ETH_HLEN;
1645 ndev->max_mtu = ENET_MAX_MTU - VLAN_ETH_HLEN;
1646 netif_napi_add(ndev, &emac->napi, bcm6348_emac_poll, 16);
1647 SET_NETDEV_DEV(ndev, dev);
1648
1649 ret = devm_register_netdev(dev, ndev);
1650 if (ret)
1651 goto out_disable_clk;
1652
1653 netif_carrier_off(ndev);
1654
1655 ndev->phydev = of_phy_get_and_connect(ndev, node,
1656 bcm6348_emac_adjust_phy);
1657 if (IS_ERR_OR_NULL(ndev->phydev))
1658 dev_warn(dev, "PHY not found!\n");
1659
1660 dev_info(dev, "%s at 0x%px, IRQ %d\n", ndev->name, emac->base,
1661 ndev->irq);
1662
1663 return 0;
1664
1665 out_disable_clk:
1666 for (i = 0; i < emac->num_resets; i++)
1667 reset_control_assert(emac->reset[i]);
1668
1669 for (i = 0; i < emac->num_clocks; i++)
1670 clk_disable_unprepare(emac->clock[i]);
1671
1672 return ret;
1673 }
1674
1675 static int bcm6348_emac_remove(struct platform_device *pdev)
1676 {
1677 struct net_device *ndev = platform_get_drvdata(pdev);
1678 struct bcm6348_emac *emac = netdev_priv(ndev);
1679 unsigned int i;
1680
1681 emac_writel(emac, 0, ENET_MIISC_REG);
1682
1683 for (i = 0; i < emac->num_resets; i++)
1684 reset_control_assert(emac->reset[i]);
1685
1686 for (i = 0; i < emac->num_clocks; i++)
1687 clk_disable_unprepare(emac->clock[i]);
1688
1689 return 0;
1690 }
1691
1692 static const struct of_device_id bcm6348_emac_of_match[] = {
1693 { .compatible = "brcm,bcm6338-emac", },
1694 { .compatible = "brcm,bcm6348-emac", },
1695 { .compatible = "brcm,bcm6358-emac", },
1696 { /* sentinel */ },
1697 };
1698 MODULE_DEVICE_TABLE(of, bcm6348_emac_of_match);
1699
1700 static struct platform_driver bcm6348_emac_driver = {
1701 .driver = {
1702 .name = "bcm6348-emac",
1703 .of_match_table = of_match_ptr(bcm6348_emac_of_match),
1704 },
1705 .probe = bcm6348_emac_probe,
1706 .remove = bcm6348_emac_remove,
1707 };
1708
1709 int bcm6348_iudma_drivers_register(struct platform_device *pdev)
1710 {
1711 struct device *dev = &pdev->dev;
1712 int ret;
1713
1714 ret = platform_driver_register(&bcm6348_emac_driver);
1715 if (ret)
1716 dev_err(dev, "error registering emac driver!\n");
1717
1718 return ret;
1719 }