1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * BCM6368 Ethernet Switch Controller Driver
5 * Copyright (C) 2021 Álvaro Fernández Rojas <noltari@gmail.com>
6 * Copyright (C) 2015 Jonas Gorski <jonas.gorski@gmail.com>
7 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/err.h>
14 #include <linux/ethtool.h>
15 #include <linux/if_vlan.h>
16 #include <linux/interrupt.h>
17 #include <linux/of_clk.h>
18 #include <linux/of_net.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm_domain.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/reset.h>
23 #include <linux/version.h>
25 /* TODO: Bigger frames may work but we do not trust that they are safe on all
26 * platforms so more research is needed, a max frame size of 2048 has been
27 * tested. We use the safe frame size 1542 which is 1532 plus DSA and VLAN
30 #define ENETSW_MAX_FRAME 1542
31 #define ENETSW_DSA_TAG_SIZE 6
32 /* The MTU in Linux does not include ethernet or VLAN headers, but it DOES
33 * include the DSA overhead (the framework will increase the MTU to fit
36 #define ENETSW_MAX_MTU (ENETSW_MAX_FRAME - VLAN_ETH_HLEN - \
38 #define ENETSW_FRAG_SIZE(x) (SKB_DATA_ALIGN(NET_SKB_PAD + x + \
39 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))))
41 /* default number of descriptor */
42 #define ENETSW_DEF_RX_DESC 64
43 #define ENETSW_DEF_TX_DESC 32
44 #define ENETSW_DEF_CPY_BREAK 128
46 /* maximum burst len for dma (4 bytes unit) */
47 #define ENETSW_DMA_MAXBURST 8
50 #define DMA_CHAN_WIDTH 0x10
52 /* Controller Configuration Register */
53 #define DMA_CFG_REG 0x0
54 #define DMA_CFG_EN_SHIFT 0
55 #define DMA_CFG_EN_MASK (1 << DMA_CFG_EN_SHIFT)
56 #define DMA_CFG_FLOWCH_MASK(x) (1 << ((x >> 1) + 1))
58 /* Flow Control Descriptor Low Threshold register */
59 #define DMA_FLOWCL_REG(x) (0x4 + (x) * 6)
61 /* Flow Control Descriptor High Threshold register */
62 #define DMA_FLOWCH_REG(x) (0x8 + (x) * 6)
64 /* Flow Control Descriptor Buffer Alloca Threshold register */
65 #define DMA_BUFALLOC_REG(x) (0xc + (x) * 6)
66 #define DMA_BUFALLOC_FORCE_SHIFT 31
67 #define DMA_BUFALLOC_FORCE_MASK (1 << DMA_BUFALLOC_FORCE_SHIFT)
69 /* Channel Configuration register */
70 #define DMAC_CHANCFG_REG 0x0
71 #define DMAC_CHANCFG_EN_SHIFT 0
72 #define DMAC_CHANCFG_EN_MASK (1 << DMAC_CHANCFG_EN_SHIFT)
73 #define DMAC_CHANCFG_PKTHALT_SHIFT 1
74 #define DMAC_CHANCFG_PKTHALT_MASK (1 << DMAC_CHANCFG_PKTHALT_SHIFT)
75 #define DMAC_CHANCFG_BUFHALT_SHIFT 2
76 #define DMAC_CHANCFG_BUFHALT_MASK (1 << DMAC_CHANCFG_BUFHALT_SHIFT)
77 #define DMAC_CHANCFG_CHAINING_SHIFT 2
78 #define DMAC_CHANCFG_CHAINING_MASK (1 << DMAC_CHANCFG_CHAINING_SHIFT)
79 #define DMAC_CHANCFG_WRAP_EN_SHIFT 3
80 #define DMAC_CHANCFG_WRAP_EN_MASK (1 << DMAC_CHANCFG_WRAP_EN_SHIFT)
81 #define DMAC_CHANCFG_FLOWC_EN_SHIFT 4
82 #define DMAC_CHANCFG_FLOWC_EN_MASK (1 << DMAC_CHANCFG_FLOWC_EN_SHIFT)
84 /* Interrupt Control/Status register */
85 #define DMAC_IR_REG 0x4
86 #define DMAC_IR_BUFDONE_MASK (1 << 0)
87 #define DMAC_IR_PKTDONE_MASK (1 << 1)
88 #define DMAC_IR_NOTOWNER_MASK (1 << 2)
90 /* Interrupt Mask register */
91 #define DMAC_IRMASK_REG 0x8
93 /* Maximum Burst Length */
94 #define DMAC_MAXBURST_REG 0xc
96 /* Ring Start Address register */
97 #define DMAS_RSTART_REG 0x0
99 /* State Ram Word 2 */
100 #define DMAS_SRAM2_REG 0x4
102 /* State Ram Word 3 */
103 #define DMAS_SRAM3_REG 0x8
105 /* State Ram Word 4 */
106 #define DMAS_SRAM4_REG 0xc
108 struct bcm6368_enetsw_desc
{
114 #define DMADESC_LENGTH_SHIFT 16
115 #define DMADESC_LENGTH_MASK (0xfff << DMADESC_LENGTH_SHIFT)
116 #define DMADESC_OWNER_MASK (1 << 15)
117 #define DMADESC_EOP_MASK (1 << 14)
118 #define DMADESC_SOP_MASK (1 << 13)
119 #define DMADESC_ESOP_MASK (DMADESC_EOP_MASK | DMADESC_SOP_MASK)
120 #define DMADESC_WRAP_MASK (1 << 12)
121 #define DMADESC_USB_NOZERO_MASK (1 << 1)
122 #define DMADESC_USB_ZERO_MASK (1 << 0)
125 #define DMADESC_UNDER_MASK (1 << 9)
126 #define DMADESC_APPEND_CRC (1 << 8)
127 #define DMADESC_OVSIZE_MASK (1 << 4)
128 #define DMADESC_RXER_MASK (1 << 2)
129 #define DMADESC_CRC_MASK (1 << 1)
130 #define DMADESC_OV_MASK (1 << 0)
131 #define DMADESC_ERR_MASK (DMADESC_UNDER_MASK | \
132 DMADESC_OVSIZE_MASK | \
133 DMADESC_RXER_MASK | \
137 struct bcm6368_enetsw
{
138 void __iomem
*dma_base
;
139 void __iomem
*dma_chan
;
140 void __iomem
*dma_sram
;
143 struct device_link
**link_pm
;
147 unsigned int num_clocks
;
149 struct reset_control
**reset
;
150 unsigned int num_resets
;
157 /* hw view of rx & tx dma ring */
158 dma_addr_t rx_desc_dma
;
159 dma_addr_t tx_desc_dma
;
161 /* allocated size (in bytes) for rx & tx dma ring */
162 unsigned int rx_desc_alloc_size
;
163 unsigned int tx_desc_alloc_size
;
165 struct napi_struct napi
;
167 /* dma channel id for rx */
170 /* number of dma desc in rx ring */
173 /* cpu view of rx dma ring */
174 struct bcm6368_enetsw_desc
*rx_desc_cpu
;
176 /* current number of armed descriptor given to hardware for rx */
179 /* next rx descriptor to fetch from hardware */
182 /* next dirty rx descriptor to refill */
185 /* size of allocated rx buffer */
186 unsigned int rx_buf_size
;
188 /* size of allocated rx frag */
189 unsigned int rx_frag_size
;
191 /* list of buffer given to hw for rx */
192 unsigned char **rx_buf
;
194 /* used when rx buffer allocation failed, so we defer rx queue
196 struct timer_list rx_timeout
;
198 /* lock rx_timeout against rx normal operation */
201 /* dma channel id for tx */
204 /* number of dma desc in tx ring */
207 /* cpu view of rx dma ring */
208 struct bcm6368_enetsw_desc
*tx_desc_cpu
;
210 /* number of available descriptor for tx */
213 /* next tx descriptor avaiable */
216 /* next dirty tx descriptor to reclaim */
219 /* list of skb given to hw for tx */
220 struct sk_buff
**tx_skb
;
222 /* lock used by tx reclaim and xmit */
225 /* network device reference */
226 struct net_device
*net_dev
;
228 /* platform device reference */
229 struct platform_device
*pdev
;
232 static inline void dma_writel(struct bcm6368_enetsw
*priv
, u32 val
, u32 off
)
234 __raw_writel(val
, priv
->dma_base
+ off
);
237 static inline u32
dma_readl(struct bcm6368_enetsw
*priv
, u32 off
, int chan
)
239 return __raw_readl(priv
->dma_chan
+ off
+ chan
* DMA_CHAN_WIDTH
);
242 static inline void dmac_writel(struct bcm6368_enetsw
*priv
, u32 val
, u32 off
,
245 __raw_writel(val
, priv
->dma_chan
+ off
+ chan
* DMA_CHAN_WIDTH
);
248 static inline void dmas_writel(struct bcm6368_enetsw
*priv
, u32 val
,
251 __raw_writel(val
, priv
->dma_sram
+ off
+ chan
* DMA_CHAN_WIDTH
);
257 static int bcm6368_enetsw_refill_rx(struct net_device
*ndev
, bool napi_mode
)
259 struct bcm6368_enetsw
*priv
= netdev_priv(ndev
);
260 struct platform_device
*pdev
= priv
->pdev
;
261 struct device
*dev
= &pdev
->dev
;
263 while (priv
->rx_desc_count
< priv
->rx_ring_size
) {
264 struct bcm6368_enetsw_desc
*desc
;
268 desc_idx
= priv
->rx_dirty_desc
;
269 desc
= &priv
->rx_desc_cpu
[desc_idx
];
271 if (!priv
->rx_buf
[desc_idx
]) {
275 if (likely(napi_mode
))
276 buf
= napi_alloc_frag(priv
->rx_frag_size
);
278 buf
= netdev_alloc_frag(priv
->rx_frag_size
);
283 p
= dma_map_single(dev
, buf
+ NET_SKB_PAD
,
284 priv
->rx_buf_size
, DMA_FROM_DEVICE
);
285 if (unlikely(dma_mapping_error(dev
, p
))) {
290 priv
->rx_buf
[desc_idx
] = buf
;
294 len_stat
= priv
->rx_buf_size
<< DMADESC_LENGTH_SHIFT
;
295 len_stat
|= DMADESC_OWNER_MASK
;
296 if (priv
->rx_dirty_desc
== priv
->rx_ring_size
- 1) {
297 len_stat
|= DMADESC_WRAP_MASK
;
298 priv
->rx_dirty_desc
= 0;
300 priv
->rx_dirty_desc
++;
303 desc
->len_stat
= len_stat
;
305 priv
->rx_desc_count
++;
307 /* tell dma engine we allocated one buffer */
308 dma_writel(priv
, 1, DMA_BUFALLOC_REG(priv
->rx_chan
));
311 /* If rx ring is still empty, set a timer to try allocating
312 * again at a later time. */
313 if (priv
->rx_desc_count
== 0 && netif_running(ndev
)) {
314 dev_warn(dev
, "unable to refill rx ring\n");
315 priv
->rx_timeout
.expires
= jiffies
+ HZ
;
316 add_timer(&priv
->rx_timeout
);
323 * timer callback to defer refill rx queue in case we're OOM
325 static void bcm6368_enetsw_refill_rx_timer(struct timer_list
*t
)
327 struct bcm6368_enetsw
*priv
= from_timer(priv
, t
, rx_timeout
);
328 struct net_device
*ndev
= priv
->net_dev
;
330 spin_lock(&priv
->rx_lock
);
331 bcm6368_enetsw_refill_rx(ndev
, false);
332 spin_unlock(&priv
->rx_lock
);
336 * extract packet from rx queue
338 static int bcm6368_enetsw_receive_queue(struct net_device
*ndev
, int budget
)
340 struct bcm6368_enetsw
*priv
= netdev_priv(ndev
);
341 struct platform_device
*pdev
= priv
->pdev
;
342 struct device
*dev
= &pdev
->dev
;
343 struct list_head rx_list
;
347 INIT_LIST_HEAD(&rx_list
);
349 /* don't scan ring further than number of refilled
351 if (budget
> priv
->rx_desc_count
)
352 budget
= priv
->rx_desc_count
;
355 struct bcm6368_enetsw_desc
*desc
;
356 unsigned int frag_size
;
362 desc_idx
= priv
->rx_curr_desc
;
363 desc
= &priv
->rx_desc_cpu
[desc_idx
];
365 /* make sure we actually read the descriptor status at
369 len_stat
= desc
->len_stat
;
371 /* break if dma ownership belongs to hw */
372 if (len_stat
& DMADESC_OWNER_MASK
)
376 priv
->rx_curr_desc
++;
377 if (priv
->rx_curr_desc
== priv
->rx_ring_size
)
378 priv
->rx_curr_desc
= 0;
380 /* if the packet does not have start of packet _and_
381 * end of packet flag set, then just recycle it */
382 if ((len_stat
& DMADESC_ESOP_MASK
) != DMADESC_ESOP_MASK
) {
383 ndev
->stats
.rx_dropped
++;
388 buf
= priv
->rx_buf
[desc_idx
];
389 len
= (len_stat
& DMADESC_LENGTH_MASK
)
390 >> DMADESC_LENGTH_SHIFT
;
391 /* don't include FCS */
394 if (len
< priv
->copybreak
) {
395 unsigned int nfrag_size
= ENETSW_FRAG_SIZE(len
);
396 unsigned char *nbuf
= napi_alloc_frag(nfrag_size
);
398 if (unlikely(!nbuf
)) {
399 /* forget packet, just rearm desc */
400 ndev
->stats
.rx_dropped
++;
404 dma_sync_single_for_cpu(dev
, desc
->address
,
405 len
, DMA_FROM_DEVICE
);
406 memcpy(nbuf
+ NET_SKB_PAD
, buf
+ NET_SKB_PAD
, len
);
407 dma_sync_single_for_device(dev
, desc
->address
,
408 len
, DMA_FROM_DEVICE
);
410 frag_size
= nfrag_size
;
412 dma_unmap_single(dev
, desc
->address
,
413 priv
->rx_buf_size
, DMA_FROM_DEVICE
);
414 priv
->rx_buf
[desc_idx
] = NULL
;
415 frag_size
= priv
->rx_frag_size
;
418 skb
= napi_build_skb(buf
, frag_size
);
419 if (unlikely(!skb
)) {
421 ndev
->stats
.rx_dropped
++;
425 skb_reserve(skb
, NET_SKB_PAD
);
427 ndev
->stats
.rx_packets
++;
428 ndev
->stats
.rx_bytes
+= len
;
429 list_add_tail(&skb
->list
, &rx_list
);
430 } while (processed
< budget
);
432 list_for_each_entry(skb
, &rx_list
, list
)
433 skb
->protocol
= eth_type_trans(skb
, ndev
);
434 netif_receive_skb_list(&rx_list
);
435 priv
->rx_desc_count
-= processed
;
437 if (processed
|| !priv
->rx_desc_count
) {
438 bcm6368_enetsw_refill_rx(ndev
, true);
441 dmac_writel(priv
, DMAC_CHANCFG_EN_MASK
,
442 DMAC_CHANCFG_REG
, priv
->rx_chan
);
449 * try to or force reclaim of transmitted buffers
451 static int bcm6368_enetsw_tx_reclaim(struct net_device
*ndev
, int force
,
454 struct bcm6368_enetsw
*priv
= netdev_priv(ndev
);
455 struct platform_device
*pdev
= priv
->pdev
;
456 struct device
*dev
= &pdev
->dev
;
457 unsigned int bytes
= 0;
460 while (priv
->tx_desc_count
< priv
->tx_ring_size
) {
461 struct bcm6368_enetsw_desc
*desc
;
464 /* We run in a bh and fight against start_xmit, which
465 * is called with bh disabled */
466 spin_lock(&priv
->tx_lock
);
468 desc
= &priv
->tx_desc_cpu
[priv
->tx_dirty_desc
];
470 if (!force
&& (desc
->len_stat
& DMADESC_OWNER_MASK
)) {
471 spin_unlock(&priv
->tx_lock
);
475 /* ensure other field of the descriptor were not read
476 * before we checked ownership */
479 skb
= priv
->tx_skb
[priv
->tx_dirty_desc
];
480 priv
->tx_skb
[priv
->tx_dirty_desc
] = NULL
;
481 dma_unmap_single(dev
, desc
->address
, skb
->len
,
484 priv
->tx_dirty_desc
++;
485 if (priv
->tx_dirty_desc
== priv
->tx_ring_size
)
486 priv
->tx_dirty_desc
= 0;
487 priv
->tx_desc_count
++;
489 spin_unlock(&priv
->tx_lock
);
491 if (desc
->len_stat
& DMADESC_UNDER_MASK
)
492 ndev
->stats
.tx_errors
++;
495 napi_consume_skb(skb
, budget
);
499 netdev_completed_queue(ndev
, released
, bytes
);
501 if (netif_queue_stopped(ndev
) && released
)
502 netif_wake_queue(ndev
);
508 * poll func, called by network core
510 static int bcm6368_enetsw_poll(struct napi_struct
*napi
, int budget
)
512 struct bcm6368_enetsw
*priv
= container_of(napi
, struct bcm6368_enetsw
, napi
);
513 struct net_device
*ndev
= priv
->net_dev
;
517 dmac_writel(priv
, DMAC_IR_PKTDONE_MASK
,
518 DMAC_IR_REG
, priv
->rx_chan
);
519 dmac_writel(priv
, DMAC_IR_PKTDONE_MASK
,
520 DMAC_IR_REG
, priv
->tx_chan
);
522 /* reclaim sent skb */
523 bcm6368_enetsw_tx_reclaim(ndev
, 0, budget
);
525 spin_lock(&priv
->rx_lock
);
526 rx_work_done
= bcm6368_enetsw_receive_queue(ndev
, budget
);
527 spin_unlock(&priv
->rx_lock
);
529 if (rx_work_done
>= budget
) {
530 /* rx queue is not yet empty/clean */
534 /* no more packet in rx/tx queue, remove device from poll
536 napi_complete_done(napi
, rx_work_done
);
538 /* restore rx/tx interrupt */
539 dmac_writel(priv
, DMAC_IR_PKTDONE_MASK
,
540 DMAC_IRMASK_REG
, priv
->rx_chan
);
541 dmac_writel(priv
, DMAC_IR_PKTDONE_MASK
,
542 DMAC_IRMASK_REG
, priv
->tx_chan
);
548 * rx/tx dma interrupt handler
550 static irqreturn_t
bcm6368_enetsw_isr_dma(int irq
, void *dev_id
)
552 struct net_device
*ndev
= dev_id
;
553 struct bcm6368_enetsw
*priv
= netdev_priv(ndev
);
555 /* mask rx/tx interrupts */
556 dmac_writel(priv
, 0, DMAC_IRMASK_REG
, priv
->rx_chan
);
557 dmac_writel(priv
, 0, DMAC_IRMASK_REG
, priv
->tx_chan
);
559 napi_schedule(&priv
->napi
);
565 * tx request callback
568 bcm6368_enetsw_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
570 struct bcm6368_enetsw
*priv
= netdev_priv(ndev
);
571 struct platform_device
*pdev
= priv
->pdev
;
572 struct device
*dev
= &pdev
->dev
;
573 struct bcm6368_enetsw_desc
*desc
;
578 /* lock against tx reclaim */
579 spin_lock(&priv
->tx_lock
);
581 /* make sure the tx hw queue is not full, should not happen
582 * since we stop queue before it's the case */
583 if (unlikely(!priv
->tx_desc_count
)) {
584 netif_stop_queue(ndev
);
585 dev_err(dev
, "xmit called with no tx desc available?\n");
586 ret
= NETDEV_TX_BUSY
;
590 /* pad small packets */
591 if (skb
->len
< (ETH_ZLEN
+ ETH_FCS_LEN
)) {
592 int needed
= (ETH_ZLEN
+ ETH_FCS_LEN
) - skb
->len
;
595 if (unlikely(skb_tailroom(skb
) < needed
)) {
596 struct sk_buff
*nskb
;
598 nskb
= skb_copy_expand(skb
, 0, needed
, GFP_ATOMIC
);
600 ret
= NETDEV_TX_BUSY
;
607 data
= skb_put_zero(skb
, needed
);
610 /* fill descriptor */
611 p
= dma_map_single(dev
, skb
->data
, skb
->len
, DMA_TO_DEVICE
);
612 if (unlikely(dma_mapping_error(dev
, p
))) {
618 /* point to the next available desc */
619 desc
= &priv
->tx_desc_cpu
[priv
->tx_curr_desc
];
620 priv
->tx_skb
[priv
->tx_curr_desc
] = skb
;
623 len_stat
= (skb
->len
<< DMADESC_LENGTH_SHIFT
) & DMADESC_LENGTH_MASK
;
624 len_stat
|= DMADESC_ESOP_MASK
| DMADESC_APPEND_CRC
|
627 priv
->tx_curr_desc
++;
628 if (priv
->tx_curr_desc
== priv
->tx_ring_size
) {
629 priv
->tx_curr_desc
= 0;
630 len_stat
|= DMADESC_WRAP_MASK
;
632 priv
->tx_desc_count
--;
634 /* dma might be already polling, make sure we update desc
635 * fields in correct order */
637 desc
->len_stat
= len_stat
;
640 netdev_sent_queue(ndev
, skb
->len
);
643 dmac_writel(priv
, DMAC_CHANCFG_EN_MASK
, DMAC_CHANCFG_REG
,
646 /* stop queue if no more desc available */
647 if (!priv
->tx_desc_count
)
648 netif_stop_queue(ndev
);
650 ndev
->stats
.tx_bytes
+= skb
->len
;
651 ndev
->stats
.tx_packets
++;
655 spin_unlock(&priv
->tx_lock
);
660 * disable dma in given channel
662 static void bcm6368_enetsw_disable_dma(struct bcm6368_enetsw
*priv
, int chan
)
666 dmac_writel(priv
, 0, DMAC_CHANCFG_REG
, chan
);
671 val
= dma_readl(priv
, DMAC_CHANCFG_REG
, chan
);
672 if (!(val
& DMAC_CHANCFG_EN_MASK
))
679 static int bcm6368_enetsw_open(struct net_device
*ndev
)
681 struct bcm6368_enetsw
*priv
= netdev_priv(ndev
);
682 struct platform_device
*pdev
= priv
->pdev
;
683 struct device
*dev
= &pdev
->dev
;
689 /* mask all interrupts and request them */
690 dmac_writel(priv
, 0, DMAC_IRMASK_REG
, priv
->rx_chan
);
691 dmac_writel(priv
, 0, DMAC_IRMASK_REG
, priv
->tx_chan
);
693 ret
= request_irq(priv
->irq_rx
, bcm6368_enetsw_isr_dma
,
694 0, ndev
->name
, ndev
);
698 if (priv
->irq_tx
!= -1) {
699 ret
= request_irq(priv
->irq_tx
, bcm6368_enetsw_isr_dma
,
700 0, ndev
->name
, ndev
);
705 /* allocate rx dma ring */
706 size
= priv
->rx_ring_size
* sizeof(struct bcm6368_enetsw_desc
);
707 p
= dma_alloc_coherent(dev
, size
, &priv
->rx_desc_dma
, GFP_KERNEL
);
709 dev_err(dev
, "cannot allocate rx ring %u\n", size
);
715 priv
->rx_desc_alloc_size
= size
;
716 priv
->rx_desc_cpu
= p
;
718 /* allocate tx dma ring */
719 size
= priv
->tx_ring_size
* sizeof(struct bcm6368_enetsw_desc
);
720 p
= dma_alloc_coherent(dev
, size
, &priv
->tx_desc_dma
, GFP_KERNEL
);
722 dev_err(dev
, "cannot allocate tx ring\n");
724 goto out_free_rx_ring
;
728 priv
->tx_desc_alloc_size
= size
;
729 priv
->tx_desc_cpu
= p
;
731 priv
->tx_skb
= kzalloc(sizeof(struct sk_buff
*) * priv
->tx_ring_size
,
734 dev_err(dev
, "cannot allocate tx skb queue\n");
736 goto out_free_tx_ring
;
739 priv
->tx_desc_count
= priv
->tx_ring_size
;
740 priv
->tx_dirty_desc
= 0;
741 priv
->tx_curr_desc
= 0;
742 spin_lock_init(&priv
->tx_lock
);
744 /* init & fill rx ring with buffers */
745 priv
->rx_buf
= kzalloc(sizeof(unsigned char *) * priv
->rx_ring_size
,
748 dev_err(dev
, "cannot allocate rx buffer queue\n");
750 goto out_free_tx_skb
;
753 priv
->rx_desc_count
= 0;
754 priv
->rx_dirty_desc
= 0;
755 priv
->rx_curr_desc
= 0;
757 /* initialize flow control buffer allocation */
758 dma_writel(priv
, DMA_BUFALLOC_FORCE_MASK
| 0,
759 DMA_BUFALLOC_REG(priv
->rx_chan
));
761 if (bcm6368_enetsw_refill_rx(ndev
, false)) {
762 dev_err(dev
, "cannot allocate rx buffer queue\n");
767 /* write rx & tx ring addresses */
768 dmas_writel(priv
, priv
->rx_desc_dma
,
769 DMAS_RSTART_REG
, priv
->rx_chan
);
770 dmas_writel(priv
, priv
->tx_desc_dma
,
771 DMAS_RSTART_REG
, priv
->tx_chan
);
773 /* clear remaining state ram for rx & tx channel */
774 dmas_writel(priv
, 0, DMAS_SRAM2_REG
, priv
->rx_chan
);
775 dmas_writel(priv
, 0, DMAS_SRAM2_REG
, priv
->tx_chan
);
776 dmas_writel(priv
, 0, DMAS_SRAM3_REG
, priv
->rx_chan
);
777 dmas_writel(priv
, 0, DMAS_SRAM3_REG
, priv
->tx_chan
);
778 dmas_writel(priv
, 0, DMAS_SRAM4_REG
, priv
->rx_chan
);
779 dmas_writel(priv
, 0, DMAS_SRAM4_REG
, priv
->tx_chan
);
781 /* set dma maximum burst len */
782 dmac_writel(priv
, ENETSW_DMA_MAXBURST
,
783 DMAC_MAXBURST_REG
, priv
->rx_chan
);
784 dmac_writel(priv
, ENETSW_DMA_MAXBURST
,
785 DMAC_MAXBURST_REG
, priv
->tx_chan
);
787 /* set flow control low/high threshold to 1/3 / 2/3 */
788 val
= priv
->rx_ring_size
/ 3;
789 dma_writel(priv
, val
, DMA_FLOWCL_REG(priv
->rx_chan
));
790 val
= (priv
->rx_ring_size
* 2) / 3;
791 dma_writel(priv
, val
, DMA_FLOWCH_REG(priv
->rx_chan
));
793 /* all set, enable mac and interrupts, start dma engine and
794 * kick rx dma channel
797 dma_writel(priv
, DMA_CFG_EN_MASK
, DMA_CFG_REG
);
798 dmac_writel(priv
, DMAC_CHANCFG_EN_MASK
,
799 DMAC_CHANCFG_REG
, priv
->rx_chan
);
801 /* watch "packet transferred" interrupt in rx and tx */
802 dmac_writel(priv
, DMAC_IR_PKTDONE_MASK
,
803 DMAC_IR_REG
, priv
->rx_chan
);
804 dmac_writel(priv
, DMAC_IR_PKTDONE_MASK
,
805 DMAC_IR_REG
, priv
->tx_chan
);
807 /* make sure we enable napi before rx interrupt */
808 napi_enable(&priv
->napi
);
810 dmac_writel(priv
, DMAC_IR_PKTDONE_MASK
,
811 DMAC_IRMASK_REG
, priv
->rx_chan
);
812 dmac_writel(priv
, DMAC_IR_PKTDONE_MASK
,
813 DMAC_IRMASK_REG
, priv
->tx_chan
);
815 netif_carrier_on(ndev
);
816 netif_start_queue(ndev
);
821 for (i
= 0; i
< priv
->rx_ring_size
; i
++) {
822 struct bcm6368_enetsw_desc
*desc
;
824 if (!priv
->rx_buf
[i
])
827 desc
= &priv
->rx_desc_cpu
[i
];
828 dma_unmap_single(dev
, desc
->address
, priv
->rx_buf_size
,
830 skb_free_frag(priv
->rx_buf
[i
]);
838 dma_free_coherent(dev
, priv
->tx_desc_alloc_size
,
839 priv
->tx_desc_cpu
, priv
->tx_desc_dma
);
842 dma_free_coherent(dev
, priv
->rx_desc_alloc_size
,
843 priv
->rx_desc_cpu
, priv
->rx_desc_dma
);
846 if (priv
->irq_tx
!= -1)
847 free_irq(priv
->irq_tx
, ndev
);
850 free_irq(priv
->irq_rx
, ndev
);
856 static int bcm6368_enetsw_stop(struct net_device
*ndev
)
858 struct bcm6368_enetsw
*priv
= netdev_priv(ndev
);
859 struct platform_device
*pdev
= priv
->pdev
;
860 struct device
*dev
= &pdev
->dev
;
863 netif_stop_queue(ndev
);
864 napi_disable(&priv
->napi
);
865 del_timer_sync(&priv
->rx_timeout
);
867 /* mask all interrupts */
868 dmac_writel(priv
, 0, DMAC_IRMASK_REG
, priv
->rx_chan
);
869 dmac_writel(priv
, 0, DMAC_IRMASK_REG
, priv
->tx_chan
);
871 /* disable dma & mac */
872 bcm6368_enetsw_disable_dma(priv
, priv
->tx_chan
);
873 bcm6368_enetsw_disable_dma(priv
, priv
->rx_chan
);
875 /* force reclaim of all tx buffers */
876 bcm6368_enetsw_tx_reclaim(ndev
, 1, 0);
878 /* free the rx buffer ring */
879 for (i
= 0; i
< priv
->rx_ring_size
; i
++) {
880 struct bcm6368_enetsw_desc
*desc
;
882 if (!priv
->rx_buf
[i
])
885 desc
= &priv
->rx_desc_cpu
[i
];
886 dma_unmap_single_attrs(dev
, desc
->address
, priv
->rx_buf_size
,
888 DMA_ATTR_SKIP_CPU_SYNC
);
889 skb_free_frag(priv
->rx_buf
[i
]);
892 /* free remaining allocated memory */
895 dma_free_coherent(dev
, priv
->rx_desc_alloc_size
,
896 priv
->rx_desc_cpu
, priv
->rx_desc_dma
);
897 dma_free_coherent(dev
, priv
->tx_desc_alloc_size
,
898 priv
->tx_desc_cpu
, priv
->tx_desc_dma
);
899 if (priv
->irq_tx
!= -1)
900 free_irq(priv
->irq_tx
, ndev
);
901 free_irq(priv
->irq_rx
, ndev
);
903 netdev_reset_queue(ndev
);
908 static const struct net_device_ops bcm6368_enetsw_ops
= {
909 .ndo_open
= bcm6368_enetsw_open
,
910 .ndo_stop
= bcm6368_enetsw_stop
,
911 .ndo_start_xmit
= bcm6368_enetsw_start_xmit
,
914 static int bcm6368_enetsw_probe(struct platform_device
*pdev
)
916 struct device
*dev
= &pdev
->dev
;
917 struct device_node
*node
= dev
->of_node
;
918 struct bcm6368_enetsw
*priv
;
919 struct net_device
*ndev
;
920 struct resource
*res
;
921 unsigned char dev_addr
[ETH_ALEN
];
926 ndev
= devm_alloc_etherdev(dev
, sizeof(*priv
));
930 platform_set_drvdata(pdev
, ndev
);
931 SET_NETDEV_DEV(ndev
, dev
);
933 priv
= netdev_priv(ndev
);
935 priv
->net_dev
= ndev
;
937 priv
->num_pms
= of_count_phandle_with_args(node
, "power-domains",
938 "#power-domain-cells");
939 if (priv
->num_pms
> 1) {
940 priv
->pm
= devm_kcalloc(dev
, priv
->num_pms
,
941 sizeof(struct device
*), GFP_KERNEL
);
945 priv
->link_pm
= devm_kcalloc(dev
, priv
->num_pms
,
946 sizeof(struct device_link
*),
951 for (i
= 0; i
< priv
->num_pms
; i
++) {
952 priv
->pm
[i
] = genpd_dev_pm_attach_by_id(dev
, i
);
953 if (IS_ERR(priv
->pm
[i
])) {
954 dev_err(dev
, "error getting pm %d\n", i
);
958 priv
->link_pm
[i
] = device_link_add(dev
, priv
->pm
[i
],
959 DL_FLAG_STATELESS
| DL_FLAG_PM_RUNTIME
|
964 pm_runtime_enable(dev
);
965 pm_runtime_no_callbacks(dev
);
966 ret
= pm_runtime_get_sync(dev
);
968 pm_runtime_disable(dev
);
969 dev_info(dev
, "PM prober defer: ret=%d\n", ret
);
970 return -EPROBE_DEFER
;
973 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "dma");
974 priv
->dma_base
= devm_ioremap_resource(dev
, res
);
975 if (IS_ERR_OR_NULL(priv
->dma_base
))
976 return PTR_ERR(priv
->dma_base
);
978 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
,
980 priv
->dma_chan
= devm_ioremap_resource(dev
, res
);
981 if (IS_ERR_OR_NULL(priv
->dma_chan
))
982 return PTR_ERR(priv
->dma_chan
);
984 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "dma-sram");
985 priv
->dma_sram
= devm_ioremap_resource(dev
, res
);
986 if (IS_ERR_OR_NULL(priv
->dma_sram
))
987 return PTR_ERR(priv
->dma_sram
);
989 priv
->irq_rx
= platform_get_irq_byname(pdev
, "rx");
993 priv
->irq_tx
= platform_get_irq_byname(pdev
, "tx");
996 else if (priv
->irq_tx
< 0)
999 if (device_property_read_u32(dev
, "dma-rx", &priv
->rx_chan
))
1002 if (device_property_read_u32(dev
, "dma-tx", &priv
->tx_chan
))
1005 priv
->rx_ring_size
= ENETSW_DEF_RX_DESC
;
1006 priv
->tx_ring_size
= ENETSW_DEF_TX_DESC
;
1007 priv
->copybreak
= ENETSW_DEF_CPY_BREAK
;
1009 of_get_mac_address(node
, dev_addr
);
1010 if (is_valid_ether_addr(dev_addr
)) {
1011 dev_addr_set(ndev
, dev_addr
);
1012 dev_info(dev
, "mtd mac %pM\n", dev_addr
);
1014 eth_hw_addr_random(ndev
);
1015 dev_info(dev
, "random mac\n");
1018 priv
->rx_buf_size
= ALIGN(ENETSW_MAX_FRAME
,
1019 ENETSW_DMA_MAXBURST
* 4);
1021 priv
->rx_frag_size
= ENETSW_FRAG_SIZE(priv
->rx_buf_size
);
1023 priv
->num_clocks
= of_clk_get_parent_count(node
);
1024 if (priv
->num_clocks
) {
1025 priv
->clock
= devm_kcalloc(dev
, priv
->num_clocks
,
1026 sizeof(struct clk
*), GFP_KERNEL
);
1027 if (IS_ERR_OR_NULL(priv
->clock
))
1028 return PTR_ERR(priv
->clock
);
1030 for (i
= 0; i
< priv
->num_clocks
; i
++) {
1031 priv
->clock
[i
] = of_clk_get(node
, i
);
1032 if (IS_ERR(priv
->clock
[i
])) {
1033 dev_err(dev
, "error getting clock %d\n", i
);
1034 return PTR_ERR(priv
->clock
[i
]);
1037 ret
= clk_prepare_enable(priv
->clock
[i
]);
1039 dev_err(dev
, "error enabling clock %d\n", i
);
1044 num_resets
= of_count_phandle_with_args(node
, "resets",
1047 priv
->num_resets
= num_resets
;
1049 priv
->num_resets
= 0;
1050 if (priv
->num_resets
) {
1051 priv
->reset
= devm_kcalloc(dev
, priv
->num_resets
,
1052 sizeof(struct reset_control
*),
1054 if (IS_ERR_OR_NULL(priv
->reset
))
1055 return PTR_ERR(priv
->reset
);
1057 for (i
= 0; i
< priv
->num_resets
; i
++) {
1058 priv
->reset
[i
] = devm_reset_control_get_by_index(dev
, i
);
1059 if (IS_ERR(priv
->reset
[i
])) {
1060 dev_err(dev
, "error getting reset %d\n", i
);
1061 return PTR_ERR(priv
->reset
[i
]);
1064 ret
= reset_control_reset(priv
->reset
[i
]);
1066 dev_err(dev
, "error performing reset %d\n", i
);
1071 spin_lock_init(&priv
->rx_lock
);
1073 timer_setup(&priv
->rx_timeout
, bcm6368_enetsw_refill_rx_timer
, 0);
1075 /* register netdevice */
1076 ndev
->netdev_ops
= &bcm6368_enetsw_ops
;
1077 ndev
->min_mtu
= ETH_ZLEN
;
1078 ndev
->mtu
= ETH_DATA_LEN
;
1079 ndev
->max_mtu
= ENETSW_MAX_MTU
;
1080 #if LINUX_VERSION_CODE >= KERNEL_VERSION(6,1,0)
1081 netif_napi_add(ndev
, &priv
->napi
, bcm6368_enetsw_poll
);
1083 netif_napi_add(ndev
, &priv
->napi
, bcm6368_enetsw_poll
, 16);
1086 ret
= devm_register_netdev(dev
, ndev
);
1088 netif_napi_del(&priv
->napi
);
1089 goto out_disable_clk
;
1092 netif_carrier_off(ndev
);
1094 dev_info(dev
, "%s at 0x%px, IRQ %d\n", ndev
->name
, priv
->dma_base
, ndev
->irq
);
1099 for (i
= 0; i
< priv
->num_resets
; i
++)
1100 reset_control_assert(priv
->reset
[i
]);
1102 for (i
= 0; i
< priv
->num_clocks
; i
++)
1103 clk_disable_unprepare(priv
->clock
[i
]);
1108 static int bcm6368_enetsw_remove(struct platform_device
*pdev
)
1110 struct device
*dev
= &pdev
->dev
;
1111 struct net_device
*ndev
= platform_get_drvdata(pdev
);
1112 struct bcm6368_enetsw
*priv
= netdev_priv(ndev
);
1115 pm_runtime_put_sync(dev
);
1116 for (i
= 0; priv
->pm
&& i
< priv
->num_pms
; i
++) {
1117 dev_pm_domain_detach(priv
->pm
[i
], true);
1118 device_link_del(priv
->link_pm
[i
]);
1121 for (i
= 0; i
< priv
->num_resets
; i
++)
1122 reset_control_assert(priv
->reset
[i
]);
1124 for (i
= 0; i
< priv
->num_clocks
; i
++)
1125 clk_disable_unprepare(priv
->clock
[i
]);
1130 static const struct of_device_id bcm6368_enetsw_of_match
[] = {
1131 { .compatible
= "brcm,bcm6318-enetsw", },
1132 { .compatible
= "brcm,bcm6328-enetsw", },
1133 { .compatible
= "brcm,bcm6362-enetsw", },
1134 { .compatible
= "brcm,bcm6368-enetsw", },
1135 { .compatible
= "brcm,bcm63268-enetsw", },
1138 MODULE_DEVICE_TABLE(of
, bcm6368_enetsw_of_match
);
1140 static struct platform_driver bcm6368_enetsw_driver
= {
1142 .name
= "bcm6368-enetsw",
1143 .of_match_table
= of_match_ptr(bcm6368_enetsw_of_match
),
1145 .probe
= bcm6368_enetsw_probe
,
1146 .remove
= bcm6368_enetsw_remove
,
1148 module_platform_driver(bcm6368_enetsw_driver
);
1150 MODULE_AUTHOR("Álvaro Fernández Rojas <noltari@gmail.com>");
1151 MODULE_DESCRIPTION("BCM6368 Ethernet Switch Controller Driver");
1152 MODULE_LICENSE("GPL v2");
1153 MODULE_ALIAS("platform:bcm6368-enetsw");