1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2015, Michael Lee <igvtee@gmail.com>
7 #include <linux/dmaengine.h>
8 #include <linux/dma-mapping.h>
10 #include <linux/init.h>
11 #include <linux/list.h>
12 #include <linux/module.h>
13 #include <linux/platform_device.h>
14 #include <linux/slab.h>
15 #include <linux/spinlock.h>
16 #include <linux/irq.h>
17 #include <linux/of_dma.h>
18 #include <linux/reset.h>
19 #include <linux/of_device.h>
21 #include "../virt-dma.h"
23 #define HSDMA_BASE_OFFSET 0x800
25 #define HSDMA_REG_TX_BASE 0x00
26 #define HSDMA_REG_TX_CNT 0x04
27 #define HSDMA_REG_TX_CTX 0x08
28 #define HSDMA_REG_TX_DTX 0x0c
29 #define HSDMA_REG_RX_BASE 0x100
30 #define HSDMA_REG_RX_CNT 0x104
31 #define HSDMA_REG_RX_CRX 0x108
32 #define HSDMA_REG_RX_DRX 0x10c
33 #define HSDMA_REG_INFO 0x200
34 #define HSDMA_REG_GLO_CFG 0x204
35 #define HSDMA_REG_RST_CFG 0x208
36 #define HSDMA_REG_DELAY_INT 0x20c
37 #define HSDMA_REG_FREEQ_THRES 0x210
38 #define HSDMA_REG_INT_STATUS 0x220
39 #define HSDMA_REG_INT_MASK 0x228
40 #define HSDMA_REG_SCH_Q01 0x280
41 #define HSDMA_REG_SCH_Q23 0x284
43 #define HSDMA_DESCS_MAX 0xfff
44 #define HSDMA_DESCS_NUM 8
45 #define HSDMA_DESCS_MASK (HSDMA_DESCS_NUM - 1)
46 #define HSDMA_NEXT_DESC(x) (((x) + 1) & HSDMA_DESCS_MASK)
49 #define HSDMA_INFO_INDEX_MASK 0xf
50 #define HSDMA_INFO_INDEX_SHIFT 24
51 #define HSDMA_INFO_BASE_MASK 0xff
52 #define HSDMA_INFO_BASE_SHIFT 16
53 #define HSDMA_INFO_RX_MASK 0xff
54 #define HSDMA_INFO_RX_SHIFT 8
55 #define HSDMA_INFO_TX_MASK 0xff
56 #define HSDMA_INFO_TX_SHIFT 0
58 /* HSDMA_REG_GLO_CFG */
59 #define HSDMA_GLO_TX_2B_OFFSET BIT(31)
60 #define HSDMA_GLO_CLK_GATE BIT(30)
61 #define HSDMA_GLO_BYTE_SWAP BIT(29)
62 #define HSDMA_GLO_MULTI_DMA BIT(10)
63 #define HSDMA_GLO_TWO_BUF BIT(9)
64 #define HSDMA_GLO_32B_DESC BIT(8)
65 #define HSDMA_GLO_BIG_ENDIAN BIT(7)
66 #define HSDMA_GLO_TX_DONE BIT(6)
67 #define HSDMA_GLO_BT_MASK 0x3
68 #define HSDMA_GLO_BT_SHIFT 4
69 #define HSDMA_GLO_RX_BUSY BIT(3)
70 #define HSDMA_GLO_RX_DMA BIT(2)
71 #define HSDMA_GLO_TX_BUSY BIT(1)
72 #define HSDMA_GLO_TX_DMA BIT(0)
74 #define HSDMA_BT_SIZE_16BYTES (0 << HSDMA_GLO_BT_SHIFT)
75 #define HSDMA_BT_SIZE_32BYTES (1 << HSDMA_GLO_BT_SHIFT)
76 #define HSDMA_BT_SIZE_64BYTES (2 << HSDMA_GLO_BT_SHIFT)
77 #define HSDMA_BT_SIZE_128BYTES (3 << HSDMA_GLO_BT_SHIFT)
79 #define HSDMA_GLO_DEFAULT (HSDMA_GLO_MULTI_DMA | \
80 HSDMA_GLO_RX_DMA | HSDMA_GLO_TX_DMA | HSDMA_BT_SIZE_32BYTES)
82 /* HSDMA_REG_RST_CFG */
83 #define HSDMA_RST_RX_SHIFT 16
84 #define HSDMA_RST_TX_SHIFT 0
86 /* HSDMA_REG_DELAY_INT */
87 #define HSDMA_DELAY_INT_EN BIT(15)
88 #define HSDMA_DELAY_PEND_OFFSET 8
89 #define HSDMA_DELAY_TIME_OFFSET 0
90 #define HSDMA_DELAY_TX_OFFSET 16
91 #define HSDMA_DELAY_RX_OFFSET 0
93 #define HSDMA_DELAY_INIT(x) (HSDMA_DELAY_INT_EN | \
94 ((x) << HSDMA_DELAY_PEND_OFFSET))
95 #define HSDMA_DELAY(x) ((HSDMA_DELAY_INIT(x) << \
96 HSDMA_DELAY_TX_OFFSET) | HSDMA_DELAY_INIT(x))
98 /* HSDMA_REG_INT_STATUS */
99 #define HSDMA_INT_DELAY_RX_COH BIT(31)
100 #define HSDMA_INT_DELAY_RX_INT BIT(30)
101 #define HSDMA_INT_DELAY_TX_COH BIT(29)
102 #define HSDMA_INT_DELAY_TX_INT BIT(28)
103 #define HSDMA_INT_RX_MASK 0x3
104 #define HSDMA_INT_RX_SHIFT 16
105 #define HSDMA_INT_RX_Q0 BIT(16)
106 #define HSDMA_INT_TX_MASK 0xf
107 #define HSDMA_INT_TX_SHIFT 0
108 #define HSDMA_INT_TX_Q0 BIT(0)
110 /* tx/rx dma desc flags */
111 #define HSDMA_PLEN_MASK 0x3fff
112 #define HSDMA_DESC_DONE BIT(31)
113 #define HSDMA_DESC_LS0 BIT(30)
114 #define HSDMA_DESC_PLEN0(_x) (((_x) & HSDMA_PLEN_MASK) << 16)
115 #define HSDMA_DESC_TAG BIT(15)
116 #define HSDMA_DESC_LS1 BIT(14)
117 #define HSDMA_DESC_PLEN1(_x) ((_x) & HSDMA_PLEN_MASK)
120 #define HSDMA_ALIGN_SIZE 3
121 /* align size 128bytes */
122 #define HSDMA_MAX_PLEN 0x3f80
131 struct mtk_hsdma_sg
{
137 struct mtk_hsdma_desc
{
138 struct virt_dma_desc vdesc
;
139 unsigned int num_sgs
;
140 struct mtk_hsdma_sg sg
[1];
143 struct mtk_hsdma_chan
{
144 struct virt_dma_chan vchan
;
146 dma_addr_t desc_addr
;
149 struct hsdma_desc
*tx_ring
;
150 struct hsdma_desc
*rx_ring
;
151 struct mtk_hsdma_desc
*desc
;
152 unsigned int next_sg
;
155 struct mtk_hsdam_engine
{
156 struct dma_device ddev
;
157 struct device_dma_parameters dma_parms
;
159 struct tasklet_struct task
;
160 volatile unsigned long chan_issued
;
162 struct mtk_hsdma_chan chan
[1];
165 static inline struct mtk_hsdam_engine
*mtk_hsdma_chan_get_dev(struct mtk_hsdma_chan
*chan
)
167 return container_of(chan
->vchan
.chan
.device
, struct mtk_hsdam_engine
,
171 static inline struct mtk_hsdma_chan
*to_mtk_hsdma_chan(struct dma_chan
*c
)
173 return container_of(c
, struct mtk_hsdma_chan
, vchan
.chan
);
176 static inline struct mtk_hsdma_desc
*to_mtk_hsdma_desc(struct virt_dma_desc
*vdesc
)
178 return container_of(vdesc
, struct mtk_hsdma_desc
, vdesc
);
181 static inline u32
mtk_hsdma_read(struct mtk_hsdam_engine
*hsdma
, u32 reg
)
183 return readl(hsdma
->base
+ reg
);
186 static inline void mtk_hsdma_write(struct mtk_hsdam_engine
*hsdma
,
187 unsigned int reg
, u32 val
)
189 writel(val
, hsdma
->base
+ reg
);
192 static void mtk_hsdma_reset_chan(struct mtk_hsdam_engine
*hsdma
,
193 struct mtk_hsdma_chan
*chan
)
196 chan
->rx_idx
= HSDMA_DESCS_NUM
- 1;
198 mtk_hsdma_write(hsdma
, HSDMA_REG_TX_CTX
, chan
->tx_idx
);
199 mtk_hsdma_write(hsdma
, HSDMA_REG_RX_CRX
, chan
->rx_idx
);
201 mtk_hsdma_write(hsdma
, HSDMA_REG_RST_CFG
,
202 0x1 << (chan
->id
+ HSDMA_RST_TX_SHIFT
));
203 mtk_hsdma_write(hsdma
, HSDMA_REG_RST_CFG
,
204 0x1 << (chan
->id
+ HSDMA_RST_RX_SHIFT
));
207 static void hsdma_dump_reg(struct mtk_hsdam_engine
*hsdma
)
209 dev_dbg(hsdma
->ddev
.dev
,
210 "tbase %08x, tcnt %08x, tctx %08x, tdtx: %08x, rbase %08x, rcnt %08x, rctx %08x, rdtx %08x\n",
211 mtk_hsdma_read(hsdma
, HSDMA_REG_TX_BASE
),
212 mtk_hsdma_read(hsdma
, HSDMA_REG_TX_CNT
),
213 mtk_hsdma_read(hsdma
, HSDMA_REG_TX_CTX
),
214 mtk_hsdma_read(hsdma
, HSDMA_REG_TX_DTX
),
215 mtk_hsdma_read(hsdma
, HSDMA_REG_RX_BASE
),
216 mtk_hsdma_read(hsdma
, HSDMA_REG_RX_CNT
),
217 mtk_hsdma_read(hsdma
, HSDMA_REG_RX_CRX
),
218 mtk_hsdma_read(hsdma
, HSDMA_REG_RX_DRX
));
220 dev_dbg(hsdma
->ddev
.dev
,
221 "info %08x, glo %08x, delay %08x, intr_stat %08x, intr_mask %08x\n",
222 mtk_hsdma_read(hsdma
, HSDMA_REG_INFO
),
223 mtk_hsdma_read(hsdma
, HSDMA_REG_GLO_CFG
),
224 mtk_hsdma_read(hsdma
, HSDMA_REG_DELAY_INT
),
225 mtk_hsdma_read(hsdma
, HSDMA_REG_INT_STATUS
),
226 mtk_hsdma_read(hsdma
, HSDMA_REG_INT_MASK
));
229 static void hsdma_dump_desc(struct mtk_hsdam_engine
*hsdma
,
230 struct mtk_hsdma_chan
*chan
)
232 struct hsdma_desc
*tx_desc
;
233 struct hsdma_desc
*rx_desc
;
236 dev_dbg(hsdma
->ddev
.dev
, "tx idx: %d, rx idx: %d\n",
237 chan
->tx_idx
, chan
->rx_idx
);
239 for (i
= 0; i
< HSDMA_DESCS_NUM
; i
++) {
240 tx_desc
= &chan
->tx_ring
[i
];
241 rx_desc
= &chan
->rx_ring
[i
];
243 dev_dbg(hsdma
->ddev
.dev
,
244 "%d tx addr0: %08x, flags %08x, tx addr1: %08x, rx addr0 %08x, flags %08x\n",
245 i
, tx_desc
->addr0
, tx_desc
->flags
,
246 tx_desc
->addr1
, rx_desc
->addr0
, rx_desc
->flags
);
250 static void mtk_hsdma_reset(struct mtk_hsdam_engine
*hsdma
,
251 struct mtk_hsdma_chan
*chan
)
256 mtk_hsdma_write(hsdma
, HSDMA_REG_GLO_CFG
, 0);
259 mtk_hsdma_write(hsdma
, HSDMA_REG_INT_MASK
, 0);
261 /* init desc value */
262 for (i
= 0; i
< HSDMA_DESCS_NUM
; i
++) {
263 chan
->tx_ring
[i
].addr0
= 0;
264 chan
->tx_ring
[i
].flags
= HSDMA_DESC_LS0
| HSDMA_DESC_DONE
;
266 for (i
= 0; i
< HSDMA_DESCS_NUM
; i
++) {
267 chan
->rx_ring
[i
].addr0
= 0;
268 chan
->rx_ring
[i
].flags
= 0;
272 mtk_hsdma_reset_chan(hsdma
, chan
);
275 mtk_hsdma_write(hsdma
, HSDMA_REG_INT_MASK
, HSDMA_INT_RX_Q0
);
278 mtk_hsdma_write(hsdma
, HSDMA_REG_GLO_CFG
, HSDMA_GLO_DEFAULT
);
281 static int mtk_hsdma_terminate_all(struct dma_chan
*c
)
283 struct mtk_hsdma_chan
*chan
= to_mtk_hsdma_chan(c
);
284 struct mtk_hsdam_engine
*hsdma
= mtk_hsdma_chan_get_dev(chan
);
285 unsigned long timeout
;
288 spin_lock_bh(&chan
->vchan
.lock
);
290 clear_bit(chan
->id
, &hsdma
->chan_issued
);
291 vchan_get_all_descriptors(&chan
->vchan
, &head
);
292 spin_unlock_bh(&chan
->vchan
.lock
);
294 vchan_dma_desc_free_list(&chan
->vchan
, &head
);
296 /* wait dma transfer complete */
297 timeout
= jiffies
+ msecs_to_jiffies(2000);
298 while (mtk_hsdma_read(hsdma
, HSDMA_REG_GLO_CFG
) &
299 (HSDMA_GLO_RX_BUSY
| HSDMA_GLO_TX_BUSY
)) {
300 if (time_after_eq(jiffies
, timeout
)) {
301 hsdma_dump_desc(hsdma
, chan
);
302 mtk_hsdma_reset(hsdma
, chan
);
303 dev_err(hsdma
->ddev
.dev
, "timeout, reset it\n");
312 static int mtk_hsdma_start_transfer(struct mtk_hsdam_engine
*hsdma
,
313 struct mtk_hsdma_chan
*chan
)
317 struct hsdma_desc
*tx_desc
, *rx_desc
;
318 struct mtk_hsdma_sg
*sg
;
322 sg
= &chan
->desc
->sg
[0];
324 chan
->desc
->num_sgs
= DIV_ROUND_UP(len
, HSDMA_MAX_PLEN
);
328 for (i
= 0; i
< chan
->desc
->num_sgs
; i
++) {
329 tx_desc
= &chan
->tx_ring
[chan
->tx_idx
];
331 if (len
> HSDMA_MAX_PLEN
)
332 tlen
= HSDMA_MAX_PLEN
;
337 tx_desc
->addr1
= src
;
338 tx_desc
->flags
|= HSDMA_DESC_PLEN1(tlen
);
340 tx_desc
->addr0
= src
;
341 tx_desc
->flags
= HSDMA_DESC_PLEN0(tlen
);
344 chan
->tx_idx
= HSDMA_NEXT_DESC(chan
->tx_idx
);
351 tx_desc
->flags
|= HSDMA_DESC_LS0
;
353 tx_desc
->flags
|= HSDMA_DESC_LS1
;
356 rx_idx
= HSDMA_NEXT_DESC(chan
->rx_idx
);
359 for (i
= 0; i
< chan
->desc
->num_sgs
; i
++) {
360 rx_desc
= &chan
->rx_ring
[rx_idx
];
361 if (len
> HSDMA_MAX_PLEN
)
362 tlen
= HSDMA_MAX_PLEN
;
366 rx_desc
->addr0
= dst
;
367 rx_desc
->flags
= HSDMA_DESC_PLEN0(tlen
);
373 rx_idx
= HSDMA_NEXT_DESC(rx_idx
);
376 /* make sure desc and index all up to date */
378 mtk_hsdma_write(hsdma
, HSDMA_REG_TX_CTX
, chan
->tx_idx
);
383 static int gdma_next_desc(struct mtk_hsdma_chan
*chan
)
385 struct virt_dma_desc
*vdesc
;
387 vdesc
= vchan_next_desc(&chan
->vchan
);
392 chan
->desc
= to_mtk_hsdma_desc(vdesc
);
398 static void mtk_hsdma_chan_done(struct mtk_hsdam_engine
*hsdma
,
399 struct mtk_hsdma_chan
*chan
)
401 struct mtk_hsdma_desc
*desc
;
405 spin_lock_bh(&chan
->vchan
.lock
);
408 if (chan
->next_sg
== desc
->num_sgs
) {
409 list_del(&desc
->vdesc
.node
);
410 vchan_cookie_complete(&desc
->vdesc
);
411 chan_issued
= gdma_next_desc(chan
);
414 dev_dbg(hsdma
->ddev
.dev
, "no desc to complete\n");
418 set_bit(chan
->id
, &hsdma
->chan_issued
);
419 spin_unlock_bh(&chan
->vchan
.lock
);
422 static irqreturn_t
mtk_hsdma_irq(int irq
, void *devid
)
424 struct mtk_hsdam_engine
*hsdma
= devid
;
427 status
= mtk_hsdma_read(hsdma
, HSDMA_REG_INT_STATUS
);
428 if (unlikely(!status
))
431 if (likely(status
& HSDMA_INT_RX_Q0
))
432 tasklet_schedule(&hsdma
->task
);
434 dev_dbg(hsdma
->ddev
.dev
, "unhandle irq status %08x\n", status
);
435 /* clean intr bits */
436 mtk_hsdma_write(hsdma
, HSDMA_REG_INT_STATUS
, status
);
441 static void mtk_hsdma_issue_pending(struct dma_chan
*c
)
443 struct mtk_hsdma_chan
*chan
= to_mtk_hsdma_chan(c
);
444 struct mtk_hsdam_engine
*hsdma
= mtk_hsdma_chan_get_dev(chan
);
446 spin_lock_bh(&chan
->vchan
.lock
);
447 if (vchan_issue_pending(&chan
->vchan
) && !chan
->desc
) {
448 if (gdma_next_desc(chan
)) {
449 set_bit(chan
->id
, &hsdma
->chan_issued
);
450 tasklet_schedule(&hsdma
->task
);
452 dev_dbg(hsdma
->ddev
.dev
, "no desc to issue\n");
455 spin_unlock_bh(&chan
->vchan
.lock
);
458 static struct dma_async_tx_descriptor
*mtk_hsdma_prep_dma_memcpy(
459 struct dma_chan
*c
, dma_addr_t dest
, dma_addr_t src
,
460 size_t len
, unsigned long flags
)
462 struct mtk_hsdma_chan
*chan
= to_mtk_hsdma_chan(c
);
463 struct mtk_hsdma_desc
*desc
;
468 desc
= kzalloc(sizeof(*desc
), GFP_ATOMIC
);
470 dev_err(c
->device
->dev
, "alloc memcpy decs error\n");
474 desc
->sg
[0].src_addr
= src
;
475 desc
->sg
[0].dst_addr
= dest
;
476 desc
->sg
[0].len
= len
;
478 return vchan_tx_prep(&chan
->vchan
, &desc
->vdesc
, flags
);
481 static enum dma_status
mtk_hsdma_tx_status(struct dma_chan
*c
,
483 struct dma_tx_state
*state
)
485 return dma_cookie_status(c
, cookie
, state
);
488 static void mtk_hsdma_free_chan_resources(struct dma_chan
*c
)
490 vchan_free_chan_resources(to_virt_chan(c
));
493 static void mtk_hsdma_desc_free(struct virt_dma_desc
*vdesc
)
495 kfree(container_of(vdesc
, struct mtk_hsdma_desc
, vdesc
));
498 static void mtk_hsdma_tx(struct mtk_hsdam_engine
*hsdma
)
500 struct mtk_hsdma_chan
*chan
;
502 if (test_and_clear_bit(0, &hsdma
->chan_issued
)) {
503 chan
= &hsdma
->chan
[0];
505 mtk_hsdma_start_transfer(hsdma
, chan
);
507 dev_dbg(hsdma
->ddev
.dev
, "chan 0 no desc to issue\n");
511 static void mtk_hsdma_rx(struct mtk_hsdam_engine
*hsdma
)
513 struct mtk_hsdma_chan
*chan
;
514 int next_idx
, drx_idx
, cnt
;
516 chan
= &hsdma
->chan
[0];
517 next_idx
= HSDMA_NEXT_DESC(chan
->rx_idx
);
518 drx_idx
= mtk_hsdma_read(hsdma
, HSDMA_REG_RX_DRX
);
520 cnt
= (drx_idx
- next_idx
) & HSDMA_DESCS_MASK
;
524 chan
->next_sg
+= cnt
;
525 chan
->rx_idx
= (chan
->rx_idx
+ cnt
) & HSDMA_DESCS_MASK
;
529 mtk_hsdma_write(hsdma
, HSDMA_REG_RX_CRX
, chan
->rx_idx
);
531 mtk_hsdma_chan_done(hsdma
, chan
);
534 static void mtk_hsdma_tasklet(struct tasklet_struct
*t
)
536 struct mtk_hsdam_engine
*hsdma
= from_tasklet(hsdma
, t
, task
);
542 static int mtk_hsdam_alloc_desc(struct mtk_hsdam_engine
*hsdma
,
543 struct mtk_hsdma_chan
*chan
)
547 chan
->tx_ring
= dma_alloc_coherent(hsdma
->ddev
.dev
,
548 2 * HSDMA_DESCS_NUM
*
549 sizeof(*chan
->tx_ring
),
550 &chan
->desc_addr
, GFP_ATOMIC
| __GFP_ZERO
);
554 chan
->rx_ring
= &chan
->tx_ring
[HSDMA_DESCS_NUM
];
556 /* init tx ring value */
557 for (i
= 0; i
< HSDMA_DESCS_NUM
; i
++)
558 chan
->tx_ring
[i
].flags
= HSDMA_DESC_LS0
| HSDMA_DESC_DONE
;
565 static void mtk_hsdam_free_desc(struct mtk_hsdam_engine
*hsdma
,
566 struct mtk_hsdma_chan
*chan
)
569 dma_free_coherent(hsdma
->ddev
.dev
,
570 2 * HSDMA_DESCS_NUM
* sizeof(*chan
->tx_ring
),
571 chan
->tx_ring
, chan
->desc_addr
);
572 chan
->tx_ring
= NULL
;
573 chan
->rx_ring
= NULL
;
577 static int mtk_hsdma_init(struct mtk_hsdam_engine
*hsdma
)
579 struct mtk_hsdma_chan
*chan
;
584 chan
= &hsdma
->chan
[0];
585 ret
= mtk_hsdam_alloc_desc(hsdma
, chan
);
590 mtk_hsdma_write(hsdma
, HSDMA_REG_TX_BASE
, chan
->desc_addr
);
591 mtk_hsdma_write(hsdma
, HSDMA_REG_TX_CNT
, HSDMA_DESCS_NUM
);
593 mtk_hsdma_write(hsdma
, HSDMA_REG_RX_BASE
, chan
->desc_addr
+
594 (sizeof(struct hsdma_desc
) * HSDMA_DESCS_NUM
));
595 mtk_hsdma_write(hsdma
, HSDMA_REG_RX_CNT
, HSDMA_DESCS_NUM
);
597 mtk_hsdma_reset_chan(hsdma
, chan
);
600 mtk_hsdma_write(hsdma
, HSDMA_REG_INT_MASK
, HSDMA_INT_RX_Q0
);
603 mtk_hsdma_write(hsdma
, HSDMA_REG_GLO_CFG
, HSDMA_GLO_DEFAULT
);
606 reg
= mtk_hsdma_read(hsdma
, HSDMA_REG_INFO
);
607 dev_info(hsdma
->ddev
.dev
, "rx: %d, tx: %d\n",
608 (reg
>> HSDMA_INFO_RX_SHIFT
) & HSDMA_INFO_RX_MASK
,
609 (reg
>> HSDMA_INFO_TX_SHIFT
) & HSDMA_INFO_TX_MASK
);
611 hsdma_dump_reg(hsdma
);
616 static void mtk_hsdma_uninit(struct mtk_hsdam_engine
*hsdma
)
618 struct mtk_hsdma_chan
*chan
;
621 mtk_hsdma_write(hsdma
, HSDMA_REG_GLO_CFG
, 0);
624 mtk_hsdma_write(hsdma
, HSDMA_REG_INT_MASK
, 0);
627 chan
= &hsdma
->chan
[0];
628 mtk_hsdam_free_desc(hsdma
, chan
);
631 mtk_hsdma_write(hsdma
, HSDMA_REG_TX_BASE
, 0);
632 mtk_hsdma_write(hsdma
, HSDMA_REG_TX_CNT
, 0);
634 mtk_hsdma_write(hsdma
, HSDMA_REG_RX_BASE
, 0);
635 mtk_hsdma_write(hsdma
, HSDMA_REG_RX_CNT
, 0);
637 mtk_hsdma_reset_chan(hsdma
, chan
);
640 static const struct of_device_id mtk_hsdma_of_match
[] = {
641 { .compatible
= "mediatek,mt7621-hsdma" },
645 static int mtk_hsdma_probe(struct platform_device
*pdev
)
647 const struct of_device_id
*match
;
648 struct mtk_hsdma_chan
*chan
;
649 struct mtk_hsdam_engine
*hsdma
;
650 struct dma_device
*dd
;
655 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
659 match
= of_match_device(mtk_hsdma_of_match
, &pdev
->dev
);
663 hsdma
= devm_kzalloc(&pdev
->dev
, sizeof(*hsdma
), GFP_KERNEL
);
667 base
= devm_platform_ioremap_resource(pdev
, 0);
669 return PTR_ERR(base
);
670 hsdma
->base
= base
+ HSDMA_BASE_OFFSET
;
671 tasklet_setup(&hsdma
->task
, mtk_hsdma_tasklet
);
673 irq
= platform_get_irq(pdev
, 0);
676 ret
= devm_request_irq(&pdev
->dev
, irq
, mtk_hsdma_irq
,
677 0, dev_name(&pdev
->dev
), hsdma
);
679 dev_err(&pdev
->dev
, "failed to request irq\n");
683 device_reset(&pdev
->dev
);
686 dma_cap_set(DMA_MEMCPY
, dd
->cap_mask
);
687 dd
->copy_align
= HSDMA_ALIGN_SIZE
;
688 dd
->device_free_chan_resources
= mtk_hsdma_free_chan_resources
;
689 dd
->device_prep_dma_memcpy
= mtk_hsdma_prep_dma_memcpy
;
690 dd
->device_terminate_all
= mtk_hsdma_terminate_all
;
691 dd
->device_tx_status
= mtk_hsdma_tx_status
;
692 dd
->device_issue_pending
= mtk_hsdma_issue_pending
;
693 dd
->dev
= &pdev
->dev
;
694 dd
->dev
->dma_parms
= &hsdma
->dma_parms
;
695 dma_set_max_seg_size(dd
->dev
, HSDMA_MAX_PLEN
);
696 INIT_LIST_HEAD(&dd
->channels
);
698 chan
= &hsdma
->chan
[0];
700 chan
->vchan
.desc_free
= mtk_hsdma_desc_free
;
701 vchan_init(&chan
->vchan
, dd
);
704 ret
= mtk_hsdma_init(hsdma
);
706 dev_err(&pdev
->dev
, "failed to alloc ring descs\n");
710 ret
= dma_async_device_register(dd
);
712 dev_err(&pdev
->dev
, "failed to register dma device\n");
713 goto err_uninit_hsdma
;
716 ret
= of_dma_controller_register(pdev
->dev
.of_node
,
717 of_dma_xlate_by_chan_id
, hsdma
);
719 dev_err(&pdev
->dev
, "failed to register of dma controller\n");
723 platform_set_drvdata(pdev
, hsdma
);
728 dma_async_device_unregister(dd
);
730 mtk_hsdma_uninit(hsdma
);
734 static int mtk_hsdma_remove(struct platform_device
*pdev
)
736 struct mtk_hsdam_engine
*hsdma
= platform_get_drvdata(pdev
);
738 mtk_hsdma_uninit(hsdma
);
740 of_dma_controller_free(pdev
->dev
.of_node
);
741 dma_async_device_unregister(&hsdma
->ddev
);
746 static struct platform_driver mtk_hsdma_driver
= {
747 .probe
= mtk_hsdma_probe
,
748 .remove
= mtk_hsdma_remove
,
750 .name
= KBUILD_MODNAME
,
751 .of_match_table
= mtk_hsdma_of_match
,
754 module_platform_driver(mtk_hsdma_driver
);
756 MODULE_AUTHOR("Michael Lee <igvtee@gmail.com>");
757 MODULE_DESCRIPTION("MTK HSDMA driver");
758 MODULE_LICENSE("GPL v2");