1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * BCM6368 Ethernet Switch Controller Driver
5 * Copyright (C) 2021 Álvaro Fernández Rojas <noltari@gmail.com>
6 * Copyright (C) 2015 Jonas Gorski <jonas.gorski@gmail.com>
7 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/err.h>
14 #include <linux/ethtool.h>
15 #include <linux/if_vlan.h>
16 #include <linux/interrupt.h>
17 #include <linux/of_clk.h>
18 #include <linux/of_net.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm_domain.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/reset.h>
25 #define ENETSW_TAG_SIZE 6
26 #define ENETSW_MTU_OVERHEAD (VLAN_ETH_HLEN + VLAN_HLEN + \
28 #define ENETSW_FRAG_SIZE(x) (SKB_DATA_ALIGN(NET_SKB_PAD + x + \
29 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))))
31 /* default number of descriptor */
32 #define ENETSW_DEF_RX_DESC 64
33 #define ENETSW_DEF_TX_DESC 32
34 #define ENETSW_DEF_CPY_BREAK 128
36 /* maximum burst len for dma (4 bytes unit) */
37 #define ENETSW_DMA_MAXBURST 8
40 #define DMA_CHAN_WIDTH 0x10
42 /* Controller Configuration Register */
43 #define DMA_CFG_REG 0x0
44 #define DMA_CFG_EN_SHIFT 0
45 #define DMA_CFG_EN_MASK (1 << DMA_CFG_EN_SHIFT)
46 #define DMA_CFG_FLOWCH_MASK(x) (1 << ((x >> 1) + 1))
48 /* Flow Control Descriptor Low Threshold register */
49 #define DMA_FLOWCL_REG(x) (0x4 + (x) * 6)
51 /* Flow Control Descriptor High Threshold register */
52 #define DMA_FLOWCH_REG(x) (0x8 + (x) * 6)
54 /* Flow Control Descriptor Buffer Alloca Threshold register */
55 #define DMA_BUFALLOC_REG(x) (0xc + (x) * 6)
56 #define DMA_BUFALLOC_FORCE_SHIFT 31
57 #define DMA_BUFALLOC_FORCE_MASK (1 << DMA_BUFALLOC_FORCE_SHIFT)
59 /* Channel Configuration register */
60 #define DMAC_CHANCFG_REG 0x0
61 #define DMAC_CHANCFG_EN_SHIFT 0
62 #define DMAC_CHANCFG_EN_MASK (1 << DMAC_CHANCFG_EN_SHIFT)
63 #define DMAC_CHANCFG_PKTHALT_SHIFT 1
64 #define DMAC_CHANCFG_PKTHALT_MASK (1 << DMAC_CHANCFG_PKTHALT_SHIFT)
65 #define DMAC_CHANCFG_BUFHALT_SHIFT 2
66 #define DMAC_CHANCFG_BUFHALT_MASK (1 << DMAC_CHANCFG_BUFHALT_SHIFT)
67 #define DMAC_CHANCFG_CHAINING_SHIFT 2
68 #define DMAC_CHANCFG_CHAINING_MASK (1 << DMAC_CHANCFG_CHAINING_SHIFT)
69 #define DMAC_CHANCFG_WRAP_EN_SHIFT 3
70 #define DMAC_CHANCFG_WRAP_EN_MASK (1 << DMAC_CHANCFG_WRAP_EN_SHIFT)
71 #define DMAC_CHANCFG_FLOWC_EN_SHIFT 4
72 #define DMAC_CHANCFG_FLOWC_EN_MASK (1 << DMAC_CHANCFG_FLOWC_EN_SHIFT)
74 /* Interrupt Control/Status register */
75 #define DMAC_IR_REG 0x4
76 #define DMAC_IR_BUFDONE_MASK (1 << 0)
77 #define DMAC_IR_PKTDONE_MASK (1 << 1)
78 #define DMAC_IR_NOTOWNER_MASK (1 << 2)
80 /* Interrupt Mask register */
81 #define DMAC_IRMASK_REG 0x8
83 /* Maximum Burst Length */
84 #define DMAC_MAXBURST_REG 0xc
86 /* Ring Start Address register */
87 #define DMAS_RSTART_REG 0x0
89 /* State Ram Word 2 */
90 #define DMAS_SRAM2_REG 0x4
92 /* State Ram Word 3 */
93 #define DMAS_SRAM3_REG 0x8
95 /* State Ram Word 4 */
96 #define DMAS_SRAM4_REG 0xc
98 struct bcm6368_enetsw_desc
{
104 #define DMADESC_LENGTH_SHIFT 16
105 #define DMADESC_LENGTH_MASK (0xfff << DMADESC_LENGTH_SHIFT)
106 #define DMADESC_OWNER_MASK (1 << 15)
107 #define DMADESC_EOP_MASK (1 << 14)
108 #define DMADESC_SOP_MASK (1 << 13)
109 #define DMADESC_ESOP_MASK (DMADESC_EOP_MASK | DMADESC_SOP_MASK)
110 #define DMADESC_WRAP_MASK (1 << 12)
111 #define DMADESC_USB_NOZERO_MASK (1 << 1)
112 #define DMADESC_USB_ZERO_MASK (1 << 0)
115 #define DMADESC_UNDER_MASK (1 << 9)
116 #define DMADESC_APPEND_CRC (1 << 8)
117 #define DMADESC_OVSIZE_MASK (1 << 4)
118 #define DMADESC_RXER_MASK (1 << 2)
119 #define DMADESC_CRC_MASK (1 << 1)
120 #define DMADESC_OV_MASK (1 << 0)
121 #define DMADESC_ERR_MASK (DMADESC_UNDER_MASK | \
122 DMADESC_OVSIZE_MASK | \
123 DMADESC_RXER_MASK | \
127 struct bcm6368_enetsw
{
128 void __iomem
*dma_base
;
129 void __iomem
*dma_chan
;
130 void __iomem
*dma_sram
;
133 struct device_link
**link_pm
;
137 unsigned int num_clocks
;
139 struct reset_control
**reset
;
140 unsigned int num_resets
;
147 /* hw view of rx & tx dma ring */
148 dma_addr_t rx_desc_dma
;
149 dma_addr_t tx_desc_dma
;
151 /* allocated size (in bytes) for rx & tx dma ring */
152 unsigned int rx_desc_alloc_size
;
153 unsigned int tx_desc_alloc_size
;
155 struct napi_struct napi
;
157 /* dma channel id for rx */
160 /* number of dma desc in rx ring */
163 /* cpu view of rx dma ring */
164 struct bcm6368_enetsw_desc
*rx_desc_cpu
;
166 /* current number of armed descriptor given to hardware for rx */
169 /* next rx descriptor to fetch from hardware */
172 /* next dirty rx descriptor to refill */
175 /* size of allocated rx buffer */
176 unsigned int rx_buf_size
;
178 /* size of allocated rx frag */
179 unsigned int rx_frag_size
;
181 /* list of buffer given to hw for rx */
182 unsigned char **rx_buf
;
184 /* used when rx buffer allocation failed, so we defer rx queue
186 struct timer_list rx_timeout
;
188 /* lock rx_timeout against rx normal operation */
191 /* dma channel id for tx */
194 /* number of dma desc in tx ring */
197 /* maximum dma burst size */
200 /* cpu view of rx dma ring */
201 struct bcm6368_enetsw_desc
*tx_desc_cpu
;
203 /* number of available descriptor for tx */
206 /* next tx descriptor avaiable */
209 /* next dirty tx descriptor to reclaim */
212 /* list of skb given to hw for tx */
213 struct sk_buff
**tx_skb
;
215 /* lock used by tx reclaim and xmit */
218 /* network device reference */
219 struct net_device
*net_dev
;
221 /* platform device reference */
222 struct platform_device
*pdev
;
224 /* dma channel enable mask */
225 u32 dma_chan_en_mask
;
227 /* dma channel interrupt mask */
228 u32 dma_chan_int_mask
;
230 /* dma channel width */
231 unsigned int dma_chan_width
;
234 static inline void dma_writel(struct bcm6368_enetsw
*priv
, u32 val
, u32 off
)
236 __raw_writel(val
, priv
->dma_base
+ off
);
239 static inline u32
dma_readl(struct bcm6368_enetsw
*priv
, u32 off
, int chan
)
241 return __raw_readl(priv
->dma_chan
+ off
+ chan
* priv
->dma_chan_width
);
244 static inline void dmac_writel(struct bcm6368_enetsw
*priv
, u32 val
,
247 __raw_writel(val
, priv
->dma_chan
+ off
+ chan
* priv
->dma_chan_width
);
250 static inline void dmas_writel(struct bcm6368_enetsw
*priv
, u32 val
,
253 __raw_writel(val
, priv
->dma_sram
+ off
+ chan
* priv
->dma_chan_width
);
259 static int bcm6368_enetsw_refill_rx(struct net_device
*dev
, bool napi_mode
)
261 struct bcm6368_enetsw
*priv
= netdev_priv(dev
);
263 while (priv
->rx_desc_count
< priv
->rx_ring_size
) {
264 struct bcm6368_enetsw_desc
*desc
;
268 desc_idx
= priv
->rx_dirty_desc
;
269 desc
= &priv
->rx_desc_cpu
[desc_idx
];
271 if (!priv
->rx_buf
[desc_idx
]) {
274 if (likely(napi_mode
))
275 buf
= napi_alloc_frag(priv
->rx_frag_size
);
277 buf
= netdev_alloc_frag(priv
->rx_frag_size
);
282 priv
->rx_buf
[desc_idx
] = buf
;
283 desc
->address
= dma_map_single(&priv
->pdev
->dev
,
289 len_stat
= priv
->rx_buf_size
<< DMADESC_LENGTH_SHIFT
;
290 len_stat
|= DMADESC_OWNER_MASK
;
291 if (priv
->rx_dirty_desc
== priv
->rx_ring_size
- 1) {
292 len_stat
|= DMADESC_WRAP_MASK
;
293 priv
->rx_dirty_desc
= 0;
295 priv
->rx_dirty_desc
++;
298 desc
->len_stat
= len_stat
;
300 priv
->rx_desc_count
++;
302 /* tell dma engine we allocated one buffer */
303 dma_writel(priv
, 1, DMA_BUFALLOC_REG(priv
->rx_chan
));
306 /* If rx ring is still empty, set a timer to try allocating
307 * again at a later time. */
308 if (priv
->rx_desc_count
== 0 && netif_running(dev
)) {
309 dev_warn(&priv
->pdev
->dev
, "unable to refill rx ring\n");
310 priv
->rx_timeout
.expires
= jiffies
+ HZ
;
311 add_timer(&priv
->rx_timeout
);
318 * timer callback to defer refill rx queue in case we're OOM
320 static void bcm6368_enetsw_refill_rx_timer(struct timer_list
*t
)
322 struct bcm6368_enetsw
*priv
= from_timer(priv
, t
, rx_timeout
);
323 struct net_device
*dev
= priv
->net_dev
;
325 spin_lock(&priv
->rx_lock
);
326 bcm6368_enetsw_refill_rx(dev
, false);
327 spin_unlock(&priv
->rx_lock
);
331 * extract packet from rx queue
333 static int bcm6368_enetsw_receive_queue(struct net_device
*dev
, int budget
)
335 struct bcm6368_enetsw
*priv
= netdev_priv(dev
);
336 struct device
*kdev
= &priv
->pdev
->dev
;
337 struct list_head rx_list
;
341 INIT_LIST_HEAD(&rx_list
);
343 /* don't scan ring further than number of refilled
345 if (budget
> priv
->rx_desc_count
)
346 budget
= priv
->rx_desc_count
;
349 struct bcm6368_enetsw_desc
*desc
;
350 unsigned int frag_size
;
356 desc_idx
= priv
->rx_curr_desc
;
357 desc
= &priv
->rx_desc_cpu
[desc_idx
];
359 /* make sure we actually read the descriptor status at
363 len_stat
= desc
->len_stat
;
365 /* break if dma ownership belongs to hw */
366 if (len_stat
& DMADESC_OWNER_MASK
)
370 priv
->rx_curr_desc
++;
371 if (priv
->rx_curr_desc
== priv
->rx_ring_size
)
372 priv
->rx_curr_desc
= 0;
374 /* if the packet does not have start of packet _and_
375 * end of packet flag set, then just recycle it */
376 if ((len_stat
& DMADESC_ESOP_MASK
) != DMADESC_ESOP_MASK
) {
377 dev
->stats
.rx_dropped
++;
382 buf
= priv
->rx_buf
[desc_idx
];
383 len
= (len_stat
& DMADESC_LENGTH_MASK
)
384 >> DMADESC_LENGTH_SHIFT
;
385 /* don't include FCS */
388 if (len
< priv
->copybreak
) {
389 unsigned int nfrag_size
= ENETSW_FRAG_SIZE(len
);
390 unsigned char *nbuf
= napi_alloc_frag(nfrag_size
);
392 if (unlikely(!nbuf
)) {
393 /* forget packet, just rearm desc */
394 dev
->stats
.rx_dropped
++;
398 dma_sync_single_for_cpu(kdev
, desc
->address
,
399 len
, DMA_FROM_DEVICE
);
400 memcpy(nbuf
+ NET_SKB_PAD
, buf
+ NET_SKB_PAD
, len
);
401 dma_sync_single_for_device(kdev
, desc
->address
,
402 len
, DMA_FROM_DEVICE
);
404 frag_size
= nfrag_size
;
406 dma_unmap_single(kdev
, desc
->address
,
407 priv
->rx_buf_size
, DMA_FROM_DEVICE
);
408 priv
->rx_buf
[desc_idx
] = NULL
;
409 frag_size
= priv
->rx_frag_size
;
412 skb
= napi_build_skb(buf
, frag_size
);
413 if (unlikely(!skb
)) {
415 dev
->stats
.rx_dropped
++;
419 skb_reserve(skb
, NET_SKB_PAD
);
421 dev
->stats
.rx_packets
++;
422 dev
->stats
.rx_bytes
+= len
;
423 list_add_tail(&skb
->list
, &rx_list
);
424 } while (processed
< budget
);
426 list_for_each_entry(skb
, &rx_list
, list
)
427 skb
->protocol
= eth_type_trans(skb
, dev
);
428 netif_receive_skb_list(&rx_list
);
429 priv
->rx_desc_count
-= processed
;
431 if (processed
|| !priv
->rx_desc_count
) {
432 bcm6368_enetsw_refill_rx(dev
, true);
435 dmac_writel(priv
, priv
->dma_chan_en_mask
,
436 DMAC_CHANCFG_REG
, priv
->rx_chan
);
443 * try to or force reclaim of transmitted buffers
445 static int bcm6368_enetsw_tx_reclaim(struct net_device
*dev
, int force
)
447 struct bcm6368_enetsw
*priv
= netdev_priv(dev
);
448 unsigned int bytes
= 0;
451 while (priv
->tx_desc_count
< priv
->tx_ring_size
) {
452 struct bcm6368_enetsw_desc
*desc
;
455 /* We run in a bh and fight against start_xmit, which
456 * is called with bh disabled */
457 spin_lock(&priv
->tx_lock
);
459 desc
= &priv
->tx_desc_cpu
[priv
->tx_dirty_desc
];
461 if (!force
&& (desc
->len_stat
& DMADESC_OWNER_MASK
)) {
462 spin_unlock(&priv
->tx_lock
);
466 /* ensure other field of the descriptor were not read
467 * before we checked ownership */
470 skb
= priv
->tx_skb
[priv
->tx_dirty_desc
];
471 priv
->tx_skb
[priv
->tx_dirty_desc
] = NULL
;
472 dma_unmap_single(&priv
->pdev
->dev
, desc
->address
, skb
->len
,
475 priv
->tx_dirty_desc
++;
476 if (priv
->tx_dirty_desc
== priv
->tx_ring_size
)
477 priv
->tx_dirty_desc
= 0;
478 priv
->tx_desc_count
++;
480 spin_unlock(&priv
->tx_lock
);
482 if (desc
->len_stat
& DMADESC_UNDER_MASK
)
483 dev
->stats
.tx_errors
++;
486 napi_consume_skb(skb
, !force
);
490 netdev_completed_queue(dev
, released
, bytes
);
492 if (netif_queue_stopped(dev
) && released
)
493 netif_wake_queue(dev
);
499 * poll func, called by network core
501 static int bcm6368_enetsw_poll(struct napi_struct
*napi
, int budget
)
503 struct bcm6368_enetsw
*priv
= container_of(napi
, struct bcm6368_enetsw
, napi
);
504 struct net_device
*dev
= priv
->net_dev
;
508 dmac_writel(priv
, priv
->dma_chan_int_mask
,
509 DMAC_IR_REG
, priv
->rx_chan
);
510 dmac_writel(priv
, priv
->dma_chan_int_mask
,
511 DMAC_IR_REG
, priv
->tx_chan
);
513 /* reclaim sent skb */
514 bcm6368_enetsw_tx_reclaim(dev
, 0);
516 spin_lock(&priv
->rx_lock
);
517 rx_work_done
= bcm6368_enetsw_receive_queue(dev
, budget
);
518 spin_unlock(&priv
->rx_lock
);
520 if (rx_work_done
>= budget
) {
521 /* rx queue is not yet empty/clean */
525 /* no more packet in rx/tx queue, remove device from poll
527 napi_complete_done(napi
, rx_work_done
);
529 /* restore rx/tx interrupt */
530 dmac_writel(priv
, priv
->dma_chan_int_mask
,
531 DMAC_IRMASK_REG
, priv
->rx_chan
);
532 dmac_writel(priv
, priv
->dma_chan_int_mask
,
533 DMAC_IRMASK_REG
, priv
->tx_chan
);
539 * rx/tx dma interrupt handler
541 static irqreturn_t
bcm6368_enetsw_isr_dma(int irq
, void *dev_id
)
543 struct net_device
*dev
= dev_id
;
544 struct bcm6368_enetsw
*priv
= netdev_priv(dev
);
546 /* mask rx/tx interrupts */
547 dmac_writel(priv
, 0, DMAC_IRMASK_REG
, priv
->rx_chan
);
548 dmac_writel(priv
, 0, DMAC_IRMASK_REG
, priv
->tx_chan
);
550 napi_schedule(&priv
->napi
);
556 * tx request callback
559 bcm6368_enetsw_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
561 struct bcm6368_enetsw
*priv
= netdev_priv(dev
);
562 struct bcm6368_enetsw_desc
*desc
;
566 /* lock against tx reclaim */
567 spin_lock(&priv
->tx_lock
);
569 /* make sure the tx hw queue is not full, should not happen
570 * since we stop queue before it's the case */
571 if (unlikely(!priv
->tx_desc_count
)) {
572 netif_stop_queue(dev
);
573 dev_err(&priv
->pdev
->dev
, "xmit called with no tx desc "
575 ret
= NETDEV_TX_BUSY
;
579 /* pad small packets */
580 if (skb
->len
< (ETH_ZLEN
+ ETH_FCS_LEN
)) {
581 int needed
= (ETH_ZLEN
+ ETH_FCS_LEN
) - skb
->len
;
584 if (unlikely(skb_tailroom(skb
) < needed
)) {
585 struct sk_buff
*nskb
;
587 nskb
= skb_copy_expand(skb
, 0, needed
, GFP_ATOMIC
);
589 ret
= NETDEV_TX_BUSY
;
596 data
= skb_put_zero(skb
, needed
);
599 /* point to the next available desc */
600 desc
= &priv
->tx_desc_cpu
[priv
->tx_curr_desc
];
601 priv
->tx_skb
[priv
->tx_curr_desc
] = skb
;
603 /* fill descriptor */
604 desc
->address
= dma_map_single(&priv
->pdev
->dev
, skb
->data
, skb
->len
,
607 len_stat
= (skb
->len
<< DMADESC_LENGTH_SHIFT
) & DMADESC_LENGTH_MASK
;
608 len_stat
|= DMADESC_ESOP_MASK
| DMADESC_APPEND_CRC
|
611 priv
->tx_curr_desc
++;
612 if (priv
->tx_curr_desc
== priv
->tx_ring_size
) {
613 priv
->tx_curr_desc
= 0;
614 len_stat
|= DMADESC_WRAP_MASK
;
616 priv
->tx_desc_count
--;
618 /* dma might be already polling, make sure we update desc
619 * fields in correct order */
621 desc
->len_stat
= len_stat
;
624 netdev_sent_queue(dev
, skb
->len
);
627 dmac_writel(priv
, priv
->dma_chan_en_mask
, DMAC_CHANCFG_REG
,
630 /* stop queue if no more desc available */
631 if (!priv
->tx_desc_count
)
632 netif_stop_queue(dev
);
634 dev
->stats
.tx_bytes
+= skb
->len
;
635 dev
->stats
.tx_packets
++;
639 spin_unlock(&priv
->tx_lock
);
644 * disable dma in given channel
646 static void bcm6368_enetsw_disable_dma(struct bcm6368_enetsw
*priv
, int chan
)
650 dmac_writel(priv
, 0, DMAC_CHANCFG_REG
, chan
);
655 val
= dma_readl(priv
, DMAC_CHANCFG_REG
, chan
);
656 if (!(val
& DMAC_CHANCFG_EN_MASK
))
663 static int bcm6368_enetsw_open(struct net_device
*dev
)
665 struct bcm6368_enetsw
*priv
= netdev_priv(dev
);
666 struct device
*kdev
= &priv
->pdev
->dev
;
672 /* mask all interrupts and request them */
673 dmac_writel(priv
, 0, DMAC_IRMASK_REG
, priv
->rx_chan
);
674 dmac_writel(priv
, 0, DMAC_IRMASK_REG
, priv
->tx_chan
);
676 ret
= request_irq(priv
->irq_rx
, bcm6368_enetsw_isr_dma
,
681 if (priv
->irq_tx
!= -1) {
682 ret
= request_irq(priv
->irq_tx
, bcm6368_enetsw_isr_dma
,
688 /* allocate rx dma ring */
689 size
= priv
->rx_ring_size
* sizeof(struct bcm6368_enetsw_desc
);
690 p
= dma_alloc_coherent(kdev
, size
, &priv
->rx_desc_dma
, GFP_KERNEL
);
692 dev_err(kdev
, "cannot allocate rx ring %u\n", size
);
698 priv
->rx_desc_alloc_size
= size
;
699 priv
->rx_desc_cpu
= p
;
701 /* allocate tx dma ring */
702 size
= priv
->tx_ring_size
* sizeof(struct bcm6368_enetsw_desc
);
703 p
= dma_alloc_coherent(kdev
, size
, &priv
->tx_desc_dma
, GFP_KERNEL
);
705 dev_err(kdev
, "cannot allocate tx ring\n");
707 goto out_free_rx_ring
;
711 priv
->tx_desc_alloc_size
= size
;
712 priv
->tx_desc_cpu
= p
;
714 priv
->tx_skb
= kzalloc(sizeof(struct sk_buff
*) * priv
->tx_ring_size
,
717 dev_err(kdev
, "cannot allocate tx skb queue\n");
719 goto out_free_tx_ring
;
722 priv
->tx_desc_count
= priv
->tx_ring_size
;
723 priv
->tx_dirty_desc
= 0;
724 priv
->tx_curr_desc
= 0;
725 spin_lock_init(&priv
->tx_lock
);
727 /* init & fill rx ring with buffers */
728 priv
->rx_buf
= kzalloc(sizeof(unsigned char *) * priv
->rx_ring_size
,
731 dev_err(kdev
, "cannot allocate rx buffer queue\n");
733 goto out_free_tx_skb
;
736 priv
->rx_desc_count
= 0;
737 priv
->rx_dirty_desc
= 0;
738 priv
->rx_curr_desc
= 0;
740 /* initialize flow control buffer allocation */
741 dma_writel(priv
, DMA_BUFALLOC_FORCE_MASK
| 0,
742 DMA_BUFALLOC_REG(priv
->rx_chan
));
744 if (bcm6368_enetsw_refill_rx(dev
, false)) {
745 dev_err(kdev
, "cannot allocate rx buffer queue\n");
750 /* write rx & tx ring addresses */
751 dmas_writel(priv
, priv
->rx_desc_dma
,
752 DMAS_RSTART_REG
, priv
->rx_chan
);
753 dmas_writel(priv
, priv
->tx_desc_dma
,
754 DMAS_RSTART_REG
, priv
->tx_chan
);
756 /* clear remaining state ram for rx & tx channel */
757 dmas_writel(priv
, 0, DMAS_SRAM2_REG
, priv
->rx_chan
);
758 dmas_writel(priv
, 0, DMAS_SRAM2_REG
, priv
->tx_chan
);
759 dmas_writel(priv
, 0, DMAS_SRAM3_REG
, priv
->rx_chan
);
760 dmas_writel(priv
, 0, DMAS_SRAM3_REG
, priv
->tx_chan
);
761 dmas_writel(priv
, 0, DMAS_SRAM4_REG
, priv
->rx_chan
);
762 dmas_writel(priv
, 0, DMAS_SRAM4_REG
, priv
->tx_chan
);
764 /* set dma maximum burst len */
765 dmac_writel(priv
, priv
->dma_maxburst
,
766 DMAC_MAXBURST_REG
, priv
->rx_chan
);
767 dmac_writel(priv
, priv
->dma_maxburst
,
768 DMAC_MAXBURST_REG
, priv
->tx_chan
);
770 /* set flow control low/high threshold to 1/3 / 2/3 */
771 val
= priv
->rx_ring_size
/ 3;
772 dma_writel(priv
, val
, DMA_FLOWCL_REG(priv
->rx_chan
));
773 val
= (priv
->rx_ring_size
* 2) / 3;
774 dma_writel(priv
, val
, DMA_FLOWCH_REG(priv
->rx_chan
));
776 /* all set, enable mac and interrupts, start dma engine and
777 * kick rx dma channel
780 dma_writel(priv
, DMA_CFG_EN_MASK
, DMA_CFG_REG
);
781 dmac_writel(priv
, DMAC_CHANCFG_EN_MASK
,
782 DMAC_CHANCFG_REG
, priv
->rx_chan
);
784 /* watch "packet transferred" interrupt in rx and tx */
785 dmac_writel(priv
, DMAC_IR_PKTDONE_MASK
,
786 DMAC_IR_REG
, priv
->rx_chan
);
787 dmac_writel(priv
, DMAC_IR_PKTDONE_MASK
,
788 DMAC_IR_REG
, priv
->tx_chan
);
790 /* make sure we enable napi before rx interrupt */
791 napi_enable(&priv
->napi
);
793 dmac_writel(priv
, DMAC_IR_PKTDONE_MASK
,
794 DMAC_IRMASK_REG
, priv
->rx_chan
);
795 dmac_writel(priv
, DMAC_IR_PKTDONE_MASK
,
796 DMAC_IRMASK_REG
, priv
->tx_chan
);
798 netif_carrier_on(dev
);
799 netif_start_queue(dev
);
804 for (i
= 0; i
< priv
->rx_ring_size
; i
++) {
805 struct bcm6368_enetsw_desc
*desc
;
807 if (!priv
->rx_buf
[i
])
810 desc
= &priv
->rx_desc_cpu
[i
];
811 dma_unmap_single(kdev
, desc
->address
, priv
->rx_buf_size
,
813 skb_free_frag(priv
->rx_buf
[i
]);
821 dma_free_coherent(kdev
, priv
->tx_desc_alloc_size
,
822 priv
->tx_desc_cpu
, priv
->tx_desc_dma
);
825 dma_free_coherent(kdev
, priv
->rx_desc_alloc_size
,
826 priv
->rx_desc_cpu
, priv
->rx_desc_dma
);
829 if (priv
->irq_tx
!= -1)
830 free_irq(priv
->irq_tx
, dev
);
833 free_irq(priv
->irq_rx
, dev
);
839 static int bcm6368_enetsw_stop(struct net_device
*dev
)
841 struct bcm6368_enetsw
*priv
= netdev_priv(dev
);
842 struct device
*kdev
= &priv
->pdev
->dev
;
845 netif_stop_queue(dev
);
846 napi_disable(&priv
->napi
);
847 del_timer_sync(&priv
->rx_timeout
);
849 /* mask all interrupts */
850 dmac_writel(priv
, 0, DMAC_IRMASK_REG
, priv
->rx_chan
);
851 dmac_writel(priv
, 0, DMAC_IRMASK_REG
, priv
->tx_chan
);
853 /* disable dma & mac */
854 bcm6368_enetsw_disable_dma(priv
, priv
->tx_chan
);
855 bcm6368_enetsw_disable_dma(priv
, priv
->rx_chan
);
857 /* force reclaim of all tx buffers */
858 bcm6368_enetsw_tx_reclaim(dev
, 1);
860 /* free the rx buffer ring */
861 for (i
= 0; i
< priv
->rx_ring_size
; i
++) {
862 struct bcm6368_enetsw_desc
*desc
;
864 if (!priv
->rx_buf
[i
])
867 desc
= &priv
->rx_desc_cpu
[i
];
868 dma_unmap_single_attrs(kdev
, desc
->address
, priv
->rx_buf_size
,
870 DMA_ATTR_SKIP_CPU_SYNC
);
871 skb_free_frag(priv
->rx_buf
[i
]);
874 /* free remaining allocated memory */
877 dma_free_coherent(kdev
, priv
->rx_desc_alloc_size
,
878 priv
->rx_desc_cpu
, priv
->rx_desc_dma
);
879 dma_free_coherent(kdev
, priv
->tx_desc_alloc_size
,
880 priv
->tx_desc_cpu
, priv
->tx_desc_dma
);
881 if (priv
->irq_tx
!= -1)
882 free_irq(priv
->irq_tx
, dev
);
883 free_irq(priv
->irq_rx
, dev
);
885 netdev_reset_queue(dev
);
890 static const struct net_device_ops bcm6368_enetsw_ops
= {
891 .ndo_open
= bcm6368_enetsw_open
,
892 .ndo_stop
= bcm6368_enetsw_stop
,
893 .ndo_start_xmit
= bcm6368_enetsw_start_xmit
,
896 static int bcm6368_enetsw_probe(struct platform_device
*pdev
)
898 struct bcm6368_enetsw
*priv
;
899 struct device
*dev
= &pdev
->dev
;
900 struct device_node
*node
= dev
->of_node
;
901 struct net_device
*ndev
;
902 struct resource
*res
;
906 ndev
= alloc_etherdev(sizeof(*priv
));
910 priv
= netdev_priv(ndev
);
912 priv
->num_pms
= of_count_phandle_with_args(node
, "power-domains",
913 "#power-domain-cells");
914 if (priv
->num_pms
> 1) {
915 priv
->pm
= devm_kcalloc(dev
, priv
->num_pms
,
916 sizeof(struct device
*), GFP_KERNEL
);
920 priv
->link_pm
= devm_kcalloc(dev
, priv
->num_pms
,
921 sizeof(struct device_link
*),
926 for (i
= 0; i
< priv
->num_pms
; i
++) {
927 priv
->pm
[i
] = genpd_dev_pm_attach_by_id(dev
, i
);
928 if (IS_ERR(priv
->pm
[i
])) {
929 dev_err(dev
, "error getting pm %d\n", i
);
933 priv
->link_pm
[i
] = device_link_add(dev
, priv
->pm
[i
],
934 DL_FLAG_STATELESS
| DL_FLAG_PM_RUNTIME
|
939 pm_runtime_enable(dev
);
940 pm_runtime_no_callbacks(dev
);
941 ret
= pm_runtime_get_sync(dev
);
943 pm_runtime_disable(dev
);
944 dev_info(dev
, "PM prober defer: ret=%d\n", ret
);
945 return -EPROBE_DEFER
;
948 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "dma");
949 priv
->dma_base
= devm_ioremap_resource(dev
, res
);
950 if (IS_ERR(priv
->dma_base
))
951 return PTR_ERR(priv
->dma_base
);
953 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
,
955 priv
->dma_chan
= devm_ioremap_resource(dev
, res
);
956 if (IS_ERR(priv
->dma_chan
))
957 return PTR_ERR(priv
->dma_chan
);
959 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "dma-sram");
960 priv
->dma_sram
= devm_ioremap_resource(dev
, res
);
961 if (IS_ERR(priv
->dma_sram
))
962 return PTR_ERR(priv
->dma_sram
);
964 priv
->irq_rx
= platform_get_irq_byname(pdev
, "rx");
968 priv
->irq_tx
= platform_get_irq_byname(pdev
, "tx");
971 else if (priv
->irq_tx
< 0)
974 if (device_property_read_u32(dev
, "dma-rx", &priv
->rx_chan
))
977 if (device_property_read_u32(dev
, "dma-tx", &priv
->tx_chan
))
980 priv
->rx_ring_size
= ENETSW_DEF_RX_DESC
;
981 priv
->tx_ring_size
= ENETSW_DEF_TX_DESC
;
983 priv
->dma_maxburst
= ENETSW_DMA_MAXBURST
;
985 priv
->copybreak
= ENETSW_DEF_CPY_BREAK
;
987 priv
->dma_chan_en_mask
= DMAC_CHANCFG_EN_MASK
;
988 priv
->dma_chan_int_mask
= DMAC_IR_PKTDONE_MASK
;
989 priv
->dma_chan_width
= DMA_CHAN_WIDTH
;
991 of_get_mac_address(node
, ndev
->dev_addr
);
992 if (is_valid_ether_addr(ndev
->dev_addr
)) {
993 dev_info(dev
, "mtd mac %pM\n", ndev
->dev_addr
);
995 random_ether_addr(ndev
->dev_addr
);
996 dev_info(dev
, "random mac %pM\n", ndev
->dev_addr
);
999 priv
->rx_buf_size
= ALIGN(ndev
->mtu
+ ENETSW_MTU_OVERHEAD
,
1000 priv
->dma_maxburst
* 4);
1002 priv
->rx_frag_size
= ENETSW_FRAG_SIZE(priv
->rx_buf_size
);
1004 priv
->num_clocks
= of_clk_get_parent_count(node
);
1005 if (priv
->num_clocks
) {
1006 priv
->clock
= devm_kcalloc(dev
, priv
->num_clocks
,
1007 sizeof(struct clk
*), GFP_KERNEL
);
1011 for (i
= 0; i
< priv
->num_clocks
; i
++) {
1012 priv
->clock
[i
] = of_clk_get(node
, i
);
1013 if (IS_ERR(priv
->clock
[i
])) {
1014 dev_err(dev
, "error getting clock %d\n", i
);
1018 ret
= clk_prepare_enable(priv
->clock
[i
]);
1020 dev_err(dev
, "error enabling clock %d\n", i
);
1025 priv
->num_resets
= of_count_phandle_with_args(node
, "resets",
1027 if (priv
->num_resets
) {
1028 priv
->reset
= devm_kcalloc(dev
, priv
->num_resets
,
1029 sizeof(struct reset_control
*),
1034 for (i
= 0; i
< priv
->num_resets
; i
++) {
1035 priv
->reset
[i
] = devm_reset_control_get_by_index(dev
, i
);
1036 if (IS_ERR(priv
->reset
[i
])) {
1037 dev_err(dev
, "error getting reset %d\n", i
);
1041 ret
= reset_control_reset(priv
->reset
[i
]);
1043 dev_err(dev
, "error performing reset %d\n", i
);
1048 spin_lock_init(&priv
->rx_lock
);
1050 timer_setup(&priv
->rx_timeout
, bcm6368_enetsw_refill_rx_timer
, 0);
1052 /* register netdevice */
1053 ndev
->netdev_ops
= &bcm6368_enetsw_ops
;
1054 ndev
->min_mtu
= ETH_ZLEN
;
1055 ndev
->mtu
= ETH_DATA_LEN
+ ENETSW_TAG_SIZE
;
1056 ndev
->max_mtu
= ETH_DATA_LEN
+ ENETSW_TAG_SIZE
;
1057 netif_napi_add(ndev
, &priv
->napi
, bcm6368_enetsw_poll
, 16);
1058 SET_NETDEV_DEV(ndev
, dev
);
1060 ret
= register_netdev(ndev
);
1062 goto out_disable_clk
;
1064 netif_carrier_off(ndev
);
1065 platform_set_drvdata(pdev
, ndev
);
1067 priv
->net_dev
= ndev
;
1072 for (i
= 0; i
< priv
->num_resets
; i
++)
1073 reset_control_assert(priv
->reset
[i
]);
1075 for (i
= 0; i
< priv
->num_clocks
; i
++)
1076 clk_disable_unprepare(priv
->clock
[i
]);
1081 static int bcm6368_enetsw_remove(struct platform_device
*pdev
)
1083 struct device
*dev
= &pdev
->dev
;
1084 struct net_device
*ndev
= platform_get_drvdata(pdev
);
1085 struct bcm6368_enetsw
*priv
= netdev_priv(ndev
);
1088 unregister_netdev(ndev
);
1090 pm_runtime_put_sync(dev
);
1091 for (i
= 0; priv
->pm
&& i
< priv
->num_pms
; i
++) {
1092 dev_pm_domain_detach(priv
->pm
[i
], true);
1093 device_link_del(priv
->link_pm
[i
]);
1096 for (i
= 0; i
< priv
->num_resets
; i
++)
1097 reset_control_assert(priv
->reset
[i
]);
1099 for (i
= 0; i
< priv
->num_clocks
; i
++)
1100 clk_disable_unprepare(priv
->clock
[i
]);
1107 static const struct of_device_id bcm6368_enetsw_of_match
[] = {
1108 { .compatible
= "brcm,bcm6318-enetsw", },
1109 { .compatible
= "brcm,bcm6328-enetsw", },
1110 { .compatible
= "brcm,bcm6362-enetsw", },
1111 { .compatible
= "brcm,bcm6368-enetsw", },
1112 { .compatible
= "brcm,bcm63268-enetsw", },
1115 MODULE_DEVICE_TABLE(of
, bcm6368_enetsw_of_match
);
1117 static struct platform_driver bcm6368_enetsw_driver
= {
1119 .name
= "bcm6368-enetsw",
1120 .of_match_table
= of_match_ptr(bcm6368_enetsw_of_match
),
1122 .probe
= bcm6368_enetsw_probe
,
1123 .remove
= bcm6368_enetsw_remove
,
1125 module_platform_driver(bcm6368_enetsw_driver
);