29fc301abd25f8923128495c73280348b621e408
[openwrt/staging/hauke.git] / target / linux / layerscape / patches-4.9 / 805-dma-support-layerscape.patch
1 From 659603c5f6cbc3d39922d4374df25ae4627d0e88 Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Mon, 25 Sep 2017 12:12:20 +0800
4 Subject: [PATCH] dma: support layerscape
5
6 This is a integrated patch for layerscape dma support.
7
8 Signed-off-by: jiaheng.fan <jiaheng.fan@nxp.com>
9 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
10 ---
11 drivers/dma/Kconfig | 14 +
12 drivers/dma/Makefile | 2 +
13 drivers/dma/dpaa2-qdma/Kconfig | 8 +
14 drivers/dma/dpaa2-qdma/Makefile | 8 +
15 drivers/dma/dpaa2-qdma/dpaa2-qdma.c | 986 +++++++++++++++++++++++++
16 drivers/dma/dpaa2-qdma/dpaa2-qdma.h | 262 +++++++
17 drivers/dma/dpaa2-qdma/dpdmai.c | 454 ++++++++++++
18 drivers/dma/dpaa2-qdma/fsl_dpdmai.h | 521 ++++++++++++++
19 drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h | 222 ++++++
20 drivers/dma/fsl-qdma.c | 1201 +++++++++++++++++++++++++++++++
21 10 files changed, 3678 insertions(+)
22 create mode 100644 drivers/dma/dpaa2-qdma/Kconfig
23 create mode 100644 drivers/dma/dpaa2-qdma/Makefile
24 create mode 100644 drivers/dma/dpaa2-qdma/dpaa2-qdma.c
25 create mode 100644 drivers/dma/dpaa2-qdma/dpaa2-qdma.h
26 create mode 100644 drivers/dma/dpaa2-qdma/dpdmai.c
27 create mode 100644 drivers/dma/dpaa2-qdma/fsl_dpdmai.h
28 create mode 100644 drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h
29 create mode 100644 drivers/dma/fsl-qdma.c
30
31 diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
32 index 141aefbe..e5b0fb0b 100644
33 --- a/drivers/dma/Kconfig
34 +++ b/drivers/dma/Kconfig
35 @@ -192,6 +192,20 @@ config FSL_EDMA
36 multiplexing capability for DMA request sources(slot).
37 This module can be found on Freescale Vybrid and LS-1 SoCs.
38
39 +config FSL_QDMA
40 + tristate "Freescale qDMA engine support"
41 + select DMA_ENGINE
42 + select DMA_VIRTUAL_CHANNELS
43 + select DMA_ENGINE_RAID
44 + select ASYNC_TX_ENABLE_CHANNEL_SWITCH
45 + help
46 + Support the Freescale qDMA engine with command queue and legacy mode.
47 + Channel virtualization is supported through enqueuing of DMA jobs to,
48 + or dequeuing DMA jobs from, different work queues.
49 + This module can be found on Freescale LS SoCs.
50 +
51 +source drivers/dma/dpaa2-qdma/Kconfig
52 +
53 config FSL_RAID
54 tristate "Freescale RAID engine Support"
55 depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH
56 diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
57 index e4dc9cac..1226cbb4 100644
58 --- a/drivers/dma/Makefile
59 +++ b/drivers/dma/Makefile
60 @@ -29,6 +29,8 @@ obj-$(CONFIG_DW_DMAC_CORE) += dw/
61 obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
62 obj-$(CONFIG_FSL_DMA) += fsldma.o
63 obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
64 +obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o
65 +obj-$(CONFIG_FSL_DPAA2_QDMA) += dpaa2-qdma/
66 obj-$(CONFIG_FSL_RAID) += fsl_raid.o
67 obj-$(CONFIG_HSU_DMA) += hsu/
68 obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
69 diff --git a/drivers/dma/dpaa2-qdma/Kconfig b/drivers/dma/dpaa2-qdma/Kconfig
70 new file mode 100644
71 index 00000000..084e34bf
72 --- /dev/null
73 +++ b/drivers/dma/dpaa2-qdma/Kconfig
74 @@ -0,0 +1,8 @@
75 +menuconfig FSL_DPAA2_QDMA
76 + tristate "NXP DPAA2 QDMA"
77 + depends on FSL_MC_BUS && FSL_MC_DPIO
78 + select DMA_ENGINE
79 + select DMA_VIRTUAL_CHANNELS
80 + ---help---
81 + NXP Data Path Acceleration Architecture 2 QDMA driver,
82 + using the NXP MC bus driver.
83 diff --git a/drivers/dma/dpaa2-qdma/Makefile b/drivers/dma/dpaa2-qdma/Makefile
84 new file mode 100644
85 index 00000000..ba599ac6
86 --- /dev/null
87 +++ b/drivers/dma/dpaa2-qdma/Makefile
88 @@ -0,0 +1,8 @@
89 +#
90 +# Makefile for the NXP DPAA2 CAAM controllers
91 +#
92 +ccflags-y += -DVERSION=\"\"
93 +
94 +obj-$(CONFIG_FSL_DPAA2_QDMA) += fsl-dpaa2-qdma.o
95 +
96 +fsl-dpaa2-qdma-objs := dpaa2-qdma.o dpdmai.o
97 diff --git a/drivers/dma/dpaa2-qdma/dpaa2-qdma.c b/drivers/dma/dpaa2-qdma/dpaa2-qdma.c
98 new file mode 100644
99 index 00000000..ad6b03f7
100 --- /dev/null
101 +++ b/drivers/dma/dpaa2-qdma/dpaa2-qdma.c
102 @@ -0,0 +1,986 @@
103 +/*
104 + * drivers/dma/dpaa2-qdma/dpaa2-qdma.c
105 + *
106 + * Copyright 2015-2017 NXP Semiconductor, Inc.
107 + * Author: Changming Huang <jerry.huang@nxp.com>
108 + *
109 + * Driver for the NXP QDMA engine with QMan mode.
110 + * Channel virtualization is supported through enqueuing of DMA jobs to,
111 + * or dequeuing DMA jobs from different work queues with QMan portal.
112 + * This module can be found on NXP LS2 SoCs.
113 + *
114 + * This program is free software; you can redistribute it and/or modify it
115 + * under the terms of the GNU General Public License as published by the
116 + * Free Software Foundation; either version 2 of the License, or (at your
117 + * option) any later version.
118 + */
119 +
120 +#include <linux/init.h>
121 +#include <linux/module.h>
122 +#include <linux/interrupt.h>
123 +#include <linux/clk.h>
124 +#include <linux/dma-mapping.h>
125 +#include <linux/dmapool.h>
126 +#include <linux/slab.h>
127 +#include <linux/spinlock.h>
128 +#include <linux/of.h>
129 +#include <linux/of_device.h>
130 +#include <linux/of_address.h>
131 +#include <linux/of_irq.h>
132 +#include <linux/of_dma.h>
133 +#include <linux/types.h>
134 +#include <linux/delay.h>
135 +#include <linux/iommu.h>
136 +
137 +#include "../virt-dma.h"
138 +
139 +#include "../../../drivers/staging/fsl-mc/include/mc.h"
140 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
141 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
142 +#include "fsl_dpdmai_cmd.h"
143 +#include "fsl_dpdmai.h"
144 +#include "dpaa2-qdma.h"
145 +
146 +static bool smmu_disable = true;
147 +
148 +static struct dpaa2_qdma_chan *to_dpaa2_qdma_chan(struct dma_chan *chan)
149 +{
150 + return container_of(chan, struct dpaa2_qdma_chan, vchan.chan);
151 +}
152 +
153 +static struct dpaa2_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
154 +{
155 + return container_of(vd, struct dpaa2_qdma_comp, vdesc);
156 +}
157 +
158 +static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan)
159 +{
160 + return 0;
161 +}
162 +
163 +static void dpaa2_qdma_free_chan_resources(struct dma_chan *chan)
164 +{
165 + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
166 + unsigned long flags;
167 + LIST_HEAD(head);
168 +
169 + spin_lock_irqsave(&dpaa2_chan->vchan.lock, flags);
170 + vchan_get_all_descriptors(&dpaa2_chan->vchan, &head);
171 + spin_unlock_irqrestore(&dpaa2_chan->vchan.lock, flags);
172 +
173 + vchan_dma_desc_free_list(&dpaa2_chan->vchan, &head);
174 +}
175 +
176 +/*
177 + * Request a command descriptor for enqueue.
178 + */
179 +static struct dpaa2_qdma_comp *
180 +dpaa2_qdma_request_desc(struct dpaa2_qdma_chan *dpaa2_chan)
181 +{
182 + struct dpaa2_qdma_comp *comp_temp = NULL;
183 + unsigned long flags;
184 +
185 + spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
186 + if (list_empty(&dpaa2_chan->comp_free)) {
187 + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
188 + comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
189 + if (!comp_temp)
190 + goto err;
191 + comp_temp->fd_virt_addr = dma_pool_alloc(dpaa2_chan->fd_pool,
192 + GFP_NOWAIT, &comp_temp->fd_bus_addr);
193 + if (!comp_temp->fd_virt_addr)
194 + goto err;
195 +
196 + comp_temp->fl_virt_addr =
197 + (void *)((struct dpaa2_fd *)
198 + comp_temp->fd_virt_addr + 1);
199 + comp_temp->fl_bus_addr = comp_temp->fd_bus_addr +
200 + sizeof(struct dpaa2_fd);
201 + comp_temp->desc_virt_addr =
202 + (void *)((struct dpaa2_frame_list *)
203 + comp_temp->fl_virt_addr + 3);
204 + comp_temp->desc_bus_addr = comp_temp->fl_bus_addr +
205 + sizeof(struct dpaa2_frame_list) * 3;
206 +
207 + comp_temp->qchan = dpaa2_chan;
208 + comp_temp->sg_blk_num = 0;
209 + INIT_LIST_HEAD(&comp_temp->sg_src_head);
210 + INIT_LIST_HEAD(&comp_temp->sg_dst_head);
211 + return comp_temp;
212 + }
213 + comp_temp = list_first_entry(&dpaa2_chan->comp_free,
214 + struct dpaa2_qdma_comp, list);
215 + list_del(&comp_temp->list);
216 + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
217 +
218 + comp_temp->qchan = dpaa2_chan;
219 +err:
220 + return comp_temp;
221 +}
222 +
223 +static void dpaa2_qdma_populate_fd(uint32_t format,
224 + struct dpaa2_qdma_comp *dpaa2_comp)
225 +{
226 + struct dpaa2_fd *fd;
227 +
228 + fd = (struct dpaa2_fd *)dpaa2_comp->fd_virt_addr;
229 + memset(fd, 0, sizeof(struct dpaa2_fd));
230 +
231 + /* fd populated */
232 + fd->simple.addr = dpaa2_comp->fl_bus_addr;
233 + /* Bypass memory translation, Frame list format, short length disable */
234 + /* we need to disable BMT if fsl-mc use iova addr */
235 + if (smmu_disable)
236 + fd->simple.bpid = QMAN_FD_BMT_ENABLE;
237 + fd->simple.format_offset = QMAN_FD_FMT_ENABLE | QMAN_FD_SL_DISABLE;
238 +
239 + fd->simple.frc = format | QDMA_SER_CTX;
240 +}
241 +
242 +/* first frame list for descriptor buffer */
243 +static void dpaa2_qdma_populate_first_framel(
244 + struct dpaa2_frame_list *f_list,
245 + struct dpaa2_qdma_comp *dpaa2_comp)
246 +{
247 + struct dpaa2_qdma_sd_d *sdd;
248 +
249 + sdd = (struct dpaa2_qdma_sd_d *)dpaa2_comp->desc_virt_addr;
250 + memset(sdd, 0, 2 * (sizeof(*sdd)));
251 + /* source and destination descriptor */
252 + sdd->cmd = QDMA_SD_CMD_RDTTYPE_COHERENT; /* source descriptor CMD */
253 + sdd++;
254 + sdd->cmd = QDMA_DD_CMD_WRTTYPE_COHERENT; /* dest descriptor CMD */
255 +
256 + memset(f_list, 0, sizeof(struct dpaa2_frame_list));
257 + /* first frame list to source descriptor */
258 + f_list->addr_lo = dpaa2_comp->desc_bus_addr;
259 + f_list->addr_hi = (dpaa2_comp->desc_bus_addr >> 32);
260 + f_list->data_len.data_len_sl0 = 0x20; /* source/destination desc len */
261 + f_list->fmt = QDMA_FL_FMT_SBF; /* single buffer frame */
262 + if (smmu_disable)
263 + f_list->bmt = QDMA_FL_BMT_ENABLE; /* bypass memory translation */
264 + f_list->sl = QDMA_FL_SL_LONG; /* long length */
265 + f_list->f = 0; /* not the last frame list */
266 +}
267 +
268 +/* source and destination frame list */
269 +static void dpaa2_qdma_populate_frames(struct dpaa2_frame_list *f_list,
270 + dma_addr_t dst, dma_addr_t src, size_t len, uint8_t fmt)
271 +{
272 + /* source frame list to source buffer */
273 + memset(f_list, 0, sizeof(struct dpaa2_frame_list));
274 + f_list->addr_lo = src;
275 + f_list->addr_hi = (src >> 32);
276 + f_list->data_len.data_len_sl0 = len;
277 + f_list->fmt = fmt; /* single buffer frame or scatter gather frame */
278 + if (smmu_disable)
279 + f_list->bmt = QDMA_FL_BMT_ENABLE; /* bypass memory translation */
280 + f_list->sl = QDMA_FL_SL_LONG; /* long length */
281 + f_list->f = 0; /* not the last frame list */
282 +
283 + f_list++;
284 + /* destination frame list to destination buffer */
285 + memset(f_list, 0, sizeof(struct dpaa2_frame_list));
286 + f_list->addr_lo = dst;
287 + f_list->addr_hi = (dst >> 32);
288 + f_list->data_len.data_len_sl0 = len;
289 + f_list->fmt = fmt; /* single buffer frame or scatter gather frame */
290 + if (smmu_disable)
291 + f_list->bmt = QDMA_FL_BMT_ENABLE; /* bypass memory translation */
292 + f_list->sl = QDMA_FL_SL_LONG; /* long length */
293 + f_list->f = QDMA_FL_F; /* Final bit: 1, for last frame list */
294 +}
295 +
296 +static struct dma_async_tx_descriptor *dpaa2_qdma_prep_memcpy(
297 + struct dma_chan *chan, dma_addr_t dst,
298 + dma_addr_t src, size_t len, unsigned long flags)
299 +{
300 + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
301 + struct dpaa2_qdma_comp *dpaa2_comp;
302 + struct dpaa2_frame_list *f_list;
303 + uint32_t format;
304 +
305 + dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan);
306 +
307 +#ifdef LONG_FORMAT
308 + format = QDMA_FD_LONG_FORMAT;
309 +#else
310 + format = QDMA_FD_SHORT_FORMAT;
311 +#endif
312 + /* populate Frame descriptor */
313 + dpaa2_qdma_populate_fd(format, dpaa2_comp);
314 +
315 + f_list = (struct dpaa2_frame_list *)dpaa2_comp->fl_virt_addr;
316 +
317 +#ifdef LONG_FORMAT
318 + /* first frame list for descriptor buffer (logn format) */
319 + dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp);
320 +
321 + f_list++;
322 +#endif
323 +
324 + dpaa2_qdma_populate_frames(f_list, dst, src, len, QDMA_FL_FMT_SBF);
325 +
326 + return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags);
327 +}
328 +
329 +static struct qdma_sg_blk *dpaa2_qdma_get_sg_blk(
330 + struct dpaa2_qdma_comp *dpaa2_comp,
331 + struct dpaa2_qdma_chan *dpaa2_chan)
332 +{
333 + struct qdma_sg_blk *sg_blk = NULL;
334 + dma_addr_t phy_sgb;
335 + unsigned long flags;
336 +
337 + spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
338 + if (list_empty(&dpaa2_chan->sgb_free)) {
339 + sg_blk = (struct qdma_sg_blk *)dma_pool_alloc(
340 + dpaa2_chan->sg_blk_pool,
341 + GFP_NOWAIT, &phy_sgb);
342 + if (!sg_blk) {
343 + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
344 + return sg_blk;
345 + }
346 + sg_blk->blk_virt_addr = (void *)(sg_blk + 1);
347 + sg_blk->blk_bus_addr = phy_sgb + sizeof(*sg_blk);
348 + } else {
349 + sg_blk = list_first_entry(&dpaa2_chan->sgb_free,
350 + struct qdma_sg_blk, list);
351 + list_del(&sg_blk->list);
352 + }
353 + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
354 +
355 + return sg_blk;
356 +}
357 +
358 +static uint32_t dpaa2_qdma_populate_sg(struct device *dev,
359 + struct dpaa2_qdma_chan *dpaa2_chan,
360 + struct dpaa2_qdma_comp *dpaa2_comp,
361 + struct scatterlist *dst_sg, u32 dst_nents,
362 + struct scatterlist *src_sg, u32 src_nents)
363 +{
364 + struct dpaa2_qdma_sg *src_sge;
365 + struct dpaa2_qdma_sg *dst_sge;
366 + struct qdma_sg_blk *sg_blk;
367 + struct qdma_sg_blk *sg_blk_dst;
368 + dma_addr_t src;
369 + dma_addr_t dst;
370 + uint32_t num;
371 + uint32_t blocks;
372 + uint32_t len = 0;
373 + uint32_t total_len = 0;
374 + int i, j = 0;
375 +
376 + num = min(dst_nents, src_nents);
377 + blocks = num / (NUM_SG_PER_BLK - 1);
378 + if (num % (NUM_SG_PER_BLK - 1))
379 + blocks += 1;
380 + if (dpaa2_comp->sg_blk_num < blocks) {
381 + len = blocks - dpaa2_comp->sg_blk_num;
382 + for (i = 0; i < len; i++) {
383 + /* source sg blocks */
384 + sg_blk = dpaa2_qdma_get_sg_blk(dpaa2_comp, dpaa2_chan);
385 + if (!sg_blk)
386 + return 0;
387 + list_add_tail(&sg_blk->list, &dpaa2_comp->sg_src_head);
388 + /* destination sg blocks */
389 + sg_blk = dpaa2_qdma_get_sg_blk(dpaa2_comp, dpaa2_chan);
390 + if (!sg_blk)
391 + return 0;
392 + list_add_tail(&sg_blk->list, &dpaa2_comp->sg_dst_head);
393 + }
394 + } else {
395 + len = dpaa2_comp->sg_blk_num - blocks;
396 + for (i = 0; i < len; i++) {
397 + spin_lock(&dpaa2_chan->queue_lock);
398 + /* handle source sg blocks */
399 + sg_blk = list_first_entry(&dpaa2_comp->sg_src_head,
400 + struct qdma_sg_blk, list);
401 + list_del(&sg_blk->list);
402 + list_add_tail(&sg_blk->list, &dpaa2_chan->sgb_free);
403 + /* handle destination sg blocks */
404 + sg_blk = list_first_entry(&dpaa2_comp->sg_dst_head,
405 + struct qdma_sg_blk, list);
406 + list_del(&sg_blk->list);
407 + list_add_tail(&sg_blk->list, &dpaa2_chan->sgb_free);
408 + spin_unlock(&dpaa2_chan->queue_lock);
409 + }
410 + }
411 + dpaa2_comp->sg_blk_num = blocks;
412 +
413 + /* get the first source sg phy address */
414 + sg_blk = list_first_entry(&dpaa2_comp->sg_src_head,
415 + struct qdma_sg_blk, list);
416 + dpaa2_comp->sge_src_bus_addr = sg_blk->blk_bus_addr;
417 + /* get the first destinaiton sg phy address */
418 + sg_blk_dst = list_first_entry(&dpaa2_comp->sg_dst_head,
419 + struct qdma_sg_blk, list);
420 + dpaa2_comp->sge_dst_bus_addr = sg_blk_dst->blk_bus_addr;
421 +
422 + for (i = 0; i < blocks; i++) {
423 + src_sge = (struct dpaa2_qdma_sg *)sg_blk->blk_virt_addr;
424 + dst_sge = (struct dpaa2_qdma_sg *)sg_blk_dst->blk_virt_addr;
425 +
426 + for (j = 0; j < (NUM_SG_PER_BLK - 1); j++) {
427 + len = min(sg_dma_len(dst_sg), sg_dma_len(src_sg));
428 + if (0 == len)
429 + goto fetch;
430 + total_len += len;
431 + src = sg_dma_address(src_sg);
432 + dst = sg_dma_address(dst_sg);
433 +
434 + /* source SG */
435 + src_sge->addr_lo = src;
436 + src_sge->addr_hi = (src >> 32);
437 + src_sge->data_len.data_len_sl0 = len;
438 + src_sge->ctrl.sl = QDMA_SG_SL_LONG;
439 + src_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
440 + /* destination SG */
441 + dst_sge->addr_lo = dst;
442 + dst_sge->addr_hi = (dst >> 32);
443 + dst_sge->data_len.data_len_sl0 = len;
444 + dst_sge->ctrl.sl = QDMA_SG_SL_LONG;
445 + dst_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
446 +fetch:
447 + num--;
448 + if (0 == num) {
449 + src_sge->ctrl.f = QDMA_SG_F;
450 + dst_sge->ctrl.f = QDMA_SG_F;
451 + goto end;
452 + }
453 + dst_sg = sg_next(dst_sg);
454 + src_sg = sg_next(src_sg);
455 + src_sge++;
456 + dst_sge++;
457 + if (j == (NUM_SG_PER_BLK - 2)) {
458 + /* for next blocks, extension */
459 + sg_blk = list_next_entry(sg_blk, list);
460 + sg_blk_dst = list_next_entry(sg_blk_dst, list);
461 + src_sge->addr_lo = sg_blk->blk_bus_addr;
462 + src_sge->addr_hi = sg_blk->blk_bus_addr >> 32;
463 + src_sge->ctrl.sl = QDMA_SG_SL_LONG;
464 + src_sge->ctrl.fmt = QDMA_SG_FMT_SGTE;
465 + dst_sge->addr_lo = sg_blk_dst->blk_bus_addr;
466 + dst_sge->addr_hi =
467 + sg_blk_dst->blk_bus_addr >> 32;
468 + dst_sge->ctrl.sl = QDMA_SG_SL_LONG;
469 + dst_sge->ctrl.fmt = QDMA_SG_FMT_SGTE;
470 + }
471 + }
472 + }
473 +
474 +end:
475 + return total_len;
476 +}
477 +
478 +static struct dma_async_tx_descriptor *dpaa2_qdma_prep_sg(
479 + struct dma_chan *chan,
480 + struct scatterlist *dst_sg, u32 dst_nents,
481 + struct scatterlist *src_sg, u32 src_nents,
482 + unsigned long flags)
483 +{
484 + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
485 + struct dpaa2_qdma_comp *dpaa2_comp;
486 + struct dpaa2_frame_list *f_list;
487 + struct device *dev = dpaa2_chan->qdma->priv->dev;
488 + uint32_t total_len = 0;
489 +
490 + /* basic sanity checks */
491 + if (dst_nents == 0 || src_nents == 0)
492 + return NULL;
493 +
494 + if (dst_sg == NULL || src_sg == NULL)
495 + return NULL;
496 +
497 + /* get the descriptors required */
498 + dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan);
499 +
500 + /* populate Frame descriptor */
501 + dpaa2_qdma_populate_fd(QDMA_FD_LONG_FORMAT, dpaa2_comp);
502 +
503 + /* prepare Scatter gather entry for source and destination */
504 + total_len = dpaa2_qdma_populate_sg(dev, dpaa2_chan,
505 + dpaa2_comp, dst_sg, dst_nents, src_sg, src_nents);
506 +
507 + f_list = (struct dpaa2_frame_list *)dpaa2_comp->fl_virt_addr;
508 + /* first frame list for descriptor buffer */
509 + dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp);
510 + f_list++;
511 + /* prepare Scatter gather entry for source and destination */
512 + /* populate source and destination frame list table */
513 + dpaa2_qdma_populate_frames(f_list, dpaa2_comp->sge_dst_bus_addr,
514 + dpaa2_comp->sge_src_bus_addr,
515 + total_len, QDMA_FL_FMT_SGE);
516 +
517 + return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags);
518 +}
519 +
520 +static enum dma_status dpaa2_qdma_tx_status(struct dma_chan *chan,
521 + dma_cookie_t cookie, struct dma_tx_state *txstate)
522 +{
523 + return dma_cookie_status(chan, cookie, txstate);
524 +}
525 +
526 +static void dpaa2_qdma_free_desc(struct virt_dma_desc *vdesc)
527 +{
528 +}
529 +
530 +static void dpaa2_qdma_issue_pending(struct dma_chan *chan)
531 +{
532 + struct dpaa2_qdma_comp *dpaa2_comp;
533 + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
534 + struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma;
535 + struct dpaa2_qdma_priv *priv = dpaa2_qdma->priv;
536 + struct virt_dma_desc *vdesc;
537 + struct dpaa2_fd *fd;
538 + int err;
539 + unsigned long flags;
540 +
541 + spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
542 + spin_lock(&dpaa2_chan->vchan.lock);
543 + if (vchan_issue_pending(&dpaa2_chan->vchan)) {
544 + vdesc = vchan_next_desc(&dpaa2_chan->vchan);
545 + if (!vdesc)
546 + goto err_enqueue;
547 + dpaa2_comp = to_fsl_qdma_comp(vdesc);
548 +
549 + fd = (struct dpaa2_fd *)dpaa2_comp->fd_virt_addr;
550 +
551 + list_del(&vdesc->node);
552 + list_add_tail(&dpaa2_comp->list, &dpaa2_chan->comp_used);
553 +
554 + /* TOBO: priority hard-coded to zero */
555 + err = dpaa2_io_service_enqueue_fq(NULL,
556 + priv->tx_queue_attr[0].fqid, fd);
557 + if (err) {
558 + list_del(&dpaa2_comp->list);
559 + list_add_tail(&dpaa2_comp->list,
560 + &dpaa2_chan->comp_free);
561 + }
562 +
563 + }
564 +err_enqueue:
565 + spin_unlock(&dpaa2_chan->vchan.lock);
566 + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
567 +}
568 +
569 +static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
570 +{
571 + struct device *dev = &ls_dev->dev;
572 + struct dpaa2_qdma_priv *priv;
573 + struct dpaa2_qdma_priv_per_prio *ppriv;
574 + uint8_t prio_def = DPDMAI_PRIO_NUM;
575 + int err;
576 + int i;
577 +
578 + priv = dev_get_drvdata(dev);
579 +
580 + priv->dev = dev;
581 + priv->dpqdma_id = ls_dev->obj_desc.id;
582 +
583 + /*Get the handle for the DPDMAI this interface is associate with */
584 + err = dpdmai_open(priv->mc_io, 0, priv->dpqdma_id, &ls_dev->mc_handle);
585 + if (err) {
586 + dev_err(dev, "dpdmai_open() failed\n");
587 + return err;
588 + }
589 + dev_info(dev, "Opened dpdmai object successfully\n");
590 +
591 + err = dpdmai_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
592 + &priv->dpdmai_attr);
593 + if (err) {
594 + dev_err(dev, "dpdmai_get_attributes() failed\n");
595 + return err;
596 + }
597 +
598 + if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) {
599 + dev_err(dev, "DPDMAI major version mismatch\n"
600 + "Found %u.%u, supported version is %u.%u\n",
601 + priv->dpdmai_attr.version.major,
602 + priv->dpdmai_attr.version.minor,
603 + DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
604 + }
605 +
606 + if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) {
607 + dev_err(dev, "DPDMAI minor version mismatch\n"
608 + "Found %u.%u, supported version is %u.%u\n",
609 + priv->dpdmai_attr.version.major,
610 + priv->dpdmai_attr.version.minor,
611 + DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
612 + }
613 +
614 + priv->num_pairs = min(priv->dpdmai_attr.num_of_priorities, prio_def);
615 + ppriv = kcalloc(priv->num_pairs, sizeof(*ppriv), GFP_KERNEL);
616 + if (!ppriv) {
617 + dev_err(dev, "kzalloc for ppriv failed\n");
618 + return -1;
619 + }
620 + priv->ppriv = ppriv;
621 +
622 + for (i = 0; i < priv->num_pairs; i++) {
623 + err = dpdmai_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
624 + i, &priv->rx_queue_attr[i]);
625 + if (err) {
626 + dev_err(dev, "dpdmai_get_rx_queue() failed\n");
627 + return err;
628 + }
629 + ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
630 +
631 + err = dpdmai_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle,
632 + i, &priv->tx_queue_attr[i]);
633 + if (err) {
634 + dev_err(dev, "dpdmai_get_tx_queue() failed\n");
635 + return err;
636 + }
637 + ppriv->req_fqid = priv->tx_queue_attr[i].fqid;
638 + ppriv->prio = i;
639 + ppriv->priv = priv;
640 + ppriv++;
641 + }
642 +
643 + return 0;
644 +}
645 +
646 +static void dpaa2_qdma_fqdan_cb(struct dpaa2_io_notification_ctx *ctx)
647 +{
648 + struct dpaa2_qdma_priv_per_prio *ppriv = container_of(ctx,
649 + struct dpaa2_qdma_priv_per_prio, nctx);
650 + struct dpaa2_qdma_priv *priv = ppriv->priv;
651 + struct dpaa2_qdma_comp *dpaa2_comp, *_comp_tmp;
652 + struct dpaa2_qdma_chan *qchan;
653 + const struct dpaa2_fd *fd;
654 + const struct dpaa2_fd *fd_eq;
655 + struct dpaa2_dq *dq;
656 + int err;
657 + int is_last = 0;
658 + uint8_t status;
659 + int i;
660 + int found;
661 + uint32_t n_chans = priv->dpaa2_qdma->n_chans;
662 +
663 + do {
664 + err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
665 + ppriv->store);
666 + } while (err);
667 +
668 + while (!is_last) {
669 + do {
670 + dq = dpaa2_io_store_next(ppriv->store, &is_last);
671 + } while (!is_last && !dq);
672 + if (!dq) {
673 + dev_err(priv->dev, "FQID returned no valid frames!\n");
674 + continue;
675 + }
676 +
677 + /* obtain FD and process the error */
678 + fd = dpaa2_dq_fd(dq);
679 + status = fd->simple.ctrl & 0xff;
680 + if (status)
681 + dev_err(priv->dev, "FD error occurred\n");
682 + found = 0;
683 + for (i = 0; i < n_chans; i++) {
684 + qchan = &priv->dpaa2_qdma->chans[i];
685 + spin_lock(&qchan->queue_lock);
686 + if (list_empty(&qchan->comp_used)) {
687 + spin_unlock(&qchan->queue_lock);
688 + continue;
689 + }
690 + list_for_each_entry_safe(dpaa2_comp, _comp_tmp,
691 + &qchan->comp_used, list) {
692 + fd_eq = (struct dpaa2_fd *)
693 + dpaa2_comp->fd_virt_addr;
694 +
695 + if (fd_eq->simple.addr ==
696 + fd->simple.addr) {
697 +
698 + list_del(&dpaa2_comp->list);
699 + list_add_tail(&dpaa2_comp->list,
700 + &qchan->comp_free);
701 +
702 + spin_lock(&qchan->vchan.lock);
703 + vchan_cookie_complete(
704 + &dpaa2_comp->vdesc);
705 + spin_unlock(&qchan->vchan.lock);
706 + found = 1;
707 + break;
708 + }
709 + }
710 + spin_unlock(&qchan->queue_lock);
711 + if (found)
712 + break;
713 + }
714 + }
715 +
716 + dpaa2_io_service_rearm(NULL, ctx);
717 +}
718 +
719 +static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv)
720 +{
721 + int err, i, num;
722 + struct device *dev = priv->dev;
723 + struct dpaa2_qdma_priv_per_prio *ppriv;
724 +
725 + num = priv->num_pairs;
726 + ppriv = priv->ppriv;
727 + for (i = 0; i < num; i++) {
728 + ppriv->nctx.is_cdan = 0;
729 + ppriv->nctx.desired_cpu = 1;
730 + ppriv->nctx.id = ppriv->rsp_fqid;
731 + ppriv->nctx.cb = dpaa2_qdma_fqdan_cb;
732 + err = dpaa2_io_service_register(NULL, &ppriv->nctx);
733 + if (err) {
734 + dev_err(dev, "Notification register failed\n");
735 + goto err_service;
736 + }
737 +
738 + ppriv->store = dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE,
739 + dev);
740 + if (!ppriv->store) {
741 + dev_err(dev, "dpaa2_io_store_create() failed\n");
742 + goto err_store;
743 + }
744 +
745 + ppriv++;
746 + }
747 + return 0;
748 +
749 +err_store:
750 + dpaa2_io_service_deregister(NULL, &ppriv->nctx);
751 +err_service:
752 + ppriv--;
753 + while (ppriv >= priv->ppriv) {
754 + dpaa2_io_service_deregister(NULL, &ppriv->nctx);
755 + dpaa2_io_store_destroy(ppriv->store);
756 + ppriv--;
757 + }
758 + return -1;
759 +}
760 +
761 +static void __cold dpaa2_dpmai_store_free(struct dpaa2_qdma_priv *priv)
762 +{
763 + struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
764 + int i;
765 +
766 + for (i = 0; i < priv->num_pairs; i++) {
767 + dpaa2_io_store_destroy(ppriv->store);
768 + ppriv++;
769 + }
770 +}
771 +
772 +static void __cold dpaa2_dpdmai_dpio_free(struct dpaa2_qdma_priv *priv)
773 +{
774 + struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
775 + int i;
776 +
777 + for (i = 0; i < priv->num_pairs; i++) {
778 + dpaa2_io_service_deregister(NULL, &ppriv->nctx);
779 + ppriv++;
780 + }
781 +}
782 +
783 +static int __cold dpaa2_dpdmai_bind(struct dpaa2_qdma_priv *priv)
784 +{
785 + int err;
786 + struct dpdmai_rx_queue_cfg rx_queue_cfg;
787 + struct device *dev = priv->dev;
788 + struct dpaa2_qdma_priv_per_prio *ppriv;
789 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
790 + int i, num;
791 +
792 + num = priv->num_pairs;
793 + ppriv = priv->ppriv;
794 + for (i = 0; i < num; i++) {
795 + rx_queue_cfg.options = DPDMAI_QUEUE_OPT_USER_CTX |
796 + DPDMAI_QUEUE_OPT_DEST;
797 + rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
798 + rx_queue_cfg.dest_cfg.dest_type = DPDMAI_DEST_DPIO;
799 + rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
800 + rx_queue_cfg.dest_cfg.priority = ppriv->prio;
801 + err = dpdmai_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
802 + rx_queue_cfg.dest_cfg.priority, &rx_queue_cfg);
803 + if (err) {
804 + dev_err(dev, "dpdmai_set_rx_queue() failed\n");
805 + return err;
806 + }
807 +
808 + ppriv++;
809 + }
810 +
811 + return 0;
812 +}
813 +
814 +static int __cold dpaa2_dpdmai_dpio_unbind(struct dpaa2_qdma_priv *priv)
815 +{
816 + int err = 0;
817 + struct device *dev = priv->dev;
818 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
819 + struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
820 + int i;
821 +
822 + for (i = 0; i < priv->num_pairs; i++) {
823 + ppriv->nctx.qman64 = 0;
824 + ppriv->nctx.dpio_id = 0;
825 + ppriv++;
826 + }
827 +
828 + err = dpdmai_reset(priv->mc_io, 0, ls_dev->mc_handle);
829 + if (err)
830 + dev_err(dev, "dpdmai_reset() failed\n");
831 +
832 + return err;
833 +}
834 +
835 +static void __cold dpaa2_dpdmai_free_pool(struct dpaa2_qdma_chan *qchan,
836 + struct list_head *head)
837 +{
838 + struct qdma_sg_blk *sgb_tmp, *_sgb_tmp;
839 + /* free the QDMA SG pool block */
840 + list_for_each_entry_safe(sgb_tmp, _sgb_tmp, head, list) {
841 + sgb_tmp->blk_virt_addr = (void *)((struct qdma_sg_blk *)
842 + sgb_tmp->blk_virt_addr - 1);
843 + sgb_tmp->blk_bus_addr = sgb_tmp->blk_bus_addr
844 + - sizeof(*sgb_tmp);
845 + dma_pool_free(qchan->sg_blk_pool, sgb_tmp->blk_virt_addr,
846 + sgb_tmp->blk_bus_addr);
847 + }
848 +
849 +}
850 +
851 +static void __cold dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan *qchan,
852 + struct list_head *head)
853 +{
854 + struct dpaa2_qdma_comp *comp_tmp, *_comp_tmp;
855 + /* free the QDMA comp resource */
856 + list_for_each_entry_safe(comp_tmp, _comp_tmp,
857 + head, list) {
858 + dma_pool_free(qchan->fd_pool,
859 + comp_tmp->fd_virt_addr,
860 + comp_tmp->fd_bus_addr);
861 + /* free the SG source block on comp */
862 + dpaa2_dpdmai_free_pool(qchan, &comp_tmp->sg_src_head);
863 + /* free the SG destination block on comp */
864 + dpaa2_dpdmai_free_pool(qchan, &comp_tmp->sg_dst_head);
865 + list_del(&comp_tmp->list);
866 + kfree(comp_tmp);
867 + }
868 +
869 +}
870 +
871 +static void __cold dpaa2_dpdmai_free_channels(
872 + struct dpaa2_qdma_engine *dpaa2_qdma)
873 +{
874 + struct dpaa2_qdma_chan *qchan;
875 + int num, i;
876 +
877 + num = dpaa2_qdma->n_chans;
878 + for (i = 0; i < num; i++) {
879 + qchan = &dpaa2_qdma->chans[i];
880 + dpaa2_dpdmai_free_comp(qchan, &qchan->comp_used);
881 + dpaa2_dpdmai_free_comp(qchan, &qchan->comp_free);
882 + dpaa2_dpdmai_free_pool(qchan, &qchan->sgb_free);
883 + dma_pool_destroy(qchan->fd_pool);
884 + dma_pool_destroy(qchan->sg_blk_pool);
885 + }
886 +}
887 +
888 +static int dpaa2_dpdmai_alloc_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
889 +{
890 + struct dpaa2_qdma_chan *dpaa2_chan;
891 + struct device *dev = &dpaa2_qdma->priv->dpdmai_dev->dev;
892 + int i;
893 +
894 + INIT_LIST_HEAD(&dpaa2_qdma->dma_dev.channels);
895 + for (i = 0; i < dpaa2_qdma->n_chans; i++) {
896 + dpaa2_chan = &dpaa2_qdma->chans[i];
897 + dpaa2_chan->qdma = dpaa2_qdma;
898 + dpaa2_chan->vchan.desc_free = dpaa2_qdma_free_desc;
899 + vchan_init(&dpaa2_chan->vchan, &dpaa2_qdma->dma_dev);
900 +
901 + dpaa2_chan->fd_pool = dma_pool_create("fd_pool",
902 + dev, FD_POOL_SIZE, 32, 0);
903 + if (!dpaa2_chan->fd_pool)
904 + return -1;
905 + dpaa2_chan->sg_blk_pool = dma_pool_create("sg_blk_pool",
906 + dev, SG_POOL_SIZE, 32, 0);
907 + if (!dpaa2_chan->sg_blk_pool)
908 + return -1;
909 +
910 + spin_lock_init(&dpaa2_chan->queue_lock);
911 + INIT_LIST_HEAD(&dpaa2_chan->comp_used);
912 + INIT_LIST_HEAD(&dpaa2_chan->comp_free);
913 + INIT_LIST_HEAD(&dpaa2_chan->sgb_free);
914 + }
915 + return 0;
916 +}
917 +
918 +static int dpaa2_qdma_probe(struct fsl_mc_device *dpdmai_dev)
919 +{
920 + struct dpaa2_qdma_priv *priv;
921 + struct device *dev = &dpdmai_dev->dev;
922 + struct dpaa2_qdma_engine *dpaa2_qdma;
923 + int err;
924 +
925 + priv = kzalloc(sizeof(*priv), GFP_KERNEL);
926 + if (!priv)
927 + return -ENOMEM;
928 + dev_set_drvdata(dev, priv);
929 + priv->dpdmai_dev = dpdmai_dev;
930 +
931 + priv->iommu_domain = iommu_get_domain_for_dev(dev);
932 + if (priv->iommu_domain)
933 + smmu_disable = false;
934 +
935 + /* obtain a MC portal */
936 + err = fsl_mc_portal_allocate(dpdmai_dev, 0, &priv->mc_io);
937 + if (err) {
938 + dev_err(dev, "MC portal allocation failed\n");
939 + goto err_mcportal;
940 + }
941 +
942 + /* DPDMAI initialization */
943 + err = dpaa2_qdma_setup(dpdmai_dev);
944 + if (err) {
945 + dev_err(dev, "dpaa2_dpdmai_setup() failed\n");
946 + goto err_dpdmai_setup;
947 + }
948 +
949 + /* DPIO */
950 + err = dpaa2_qdma_dpio_setup(priv);
951 + if (err) {
952 + dev_err(dev, "dpaa2_dpdmai_dpio_setup() failed\n");
953 + goto err_dpio_setup;
954 + }
955 +
956 + /* DPDMAI binding to DPIO */
957 + err = dpaa2_dpdmai_bind(priv);
958 + if (err) {
959 + dev_err(dev, "dpaa2_dpdmai_bind() failed\n");
960 + goto err_bind;
961 + }
962 +
963 + /* DPDMAI enable */
964 + err = dpdmai_enable(priv->mc_io, 0, dpdmai_dev->mc_handle);
965 + if (err) {
966 + dev_err(dev, "dpdmai_enable() faile\n");
967 + goto err_enable;
968 + }
969 +
970 + dpaa2_qdma = kzalloc(sizeof(*dpaa2_qdma), GFP_KERNEL);
971 + if (!dpaa2_qdma) {
972 + err = -ENOMEM;
973 + goto err_eng;
974 + }
975 +
976 + priv->dpaa2_qdma = dpaa2_qdma;
977 + dpaa2_qdma->priv = priv;
978 +
979 + dpaa2_qdma->n_chans = NUM_CH;
980 +
981 + err = dpaa2_dpdmai_alloc_channels(dpaa2_qdma);
982 + if (err) {
983 + dev_err(dev, "QDMA alloc channels faile\n");
984 + goto err_reg;
985 + }
986 +
987 + dma_cap_set(DMA_PRIVATE, dpaa2_qdma->dma_dev.cap_mask);
988 + dma_cap_set(DMA_SLAVE, dpaa2_qdma->dma_dev.cap_mask);
989 + dma_cap_set(DMA_MEMCPY, dpaa2_qdma->dma_dev.cap_mask);
990 + dma_cap_set(DMA_SG, dpaa2_qdma->dma_dev.cap_mask);
991 +
992 + dpaa2_qdma->dma_dev.dev = dev;
993 + dpaa2_qdma->dma_dev.device_alloc_chan_resources
994 + = dpaa2_qdma_alloc_chan_resources;
995 + dpaa2_qdma->dma_dev.device_free_chan_resources
996 + = dpaa2_qdma_free_chan_resources;
997 + dpaa2_qdma->dma_dev.device_tx_status = dpaa2_qdma_tx_status;
998 + dpaa2_qdma->dma_dev.device_prep_dma_memcpy = dpaa2_qdma_prep_memcpy;
999 + dpaa2_qdma->dma_dev.device_prep_dma_sg = dpaa2_qdma_prep_sg;
1000 + dpaa2_qdma->dma_dev.device_issue_pending = dpaa2_qdma_issue_pending;
1001 +
1002 + err = dma_async_device_register(&dpaa2_qdma->dma_dev);
1003 + if (err) {
1004 + dev_err(dev, "Can't register NXP QDMA engine.\n");
1005 + goto err_reg;
1006 + }
1007 +
1008 + return 0;
1009 +
1010 +err_reg:
1011 + dpaa2_dpdmai_free_channels(dpaa2_qdma);
1012 + kfree(dpaa2_qdma);
1013 +err_eng:
1014 + dpdmai_disable(priv->mc_io, 0, dpdmai_dev->mc_handle);
1015 +err_enable:
1016 + dpaa2_dpdmai_dpio_unbind(priv);
1017 +err_bind:
1018 + dpaa2_dpmai_store_free(priv);
1019 + dpaa2_dpdmai_dpio_free(priv);
1020 +err_dpio_setup:
1021 + dpdmai_close(priv->mc_io, 0, dpdmai_dev->mc_handle);
1022 +err_dpdmai_setup:
1023 + fsl_mc_portal_free(priv->mc_io);
1024 +err_mcportal:
1025 + kfree(priv->ppriv);
1026 + kfree(priv);
1027 + dev_set_drvdata(dev, NULL);
1028 + return err;
1029 +}
1030 +
1031 +static int dpaa2_qdma_remove(struct fsl_mc_device *ls_dev)
1032 +{
1033 + struct device *dev;
1034 + struct dpaa2_qdma_priv *priv;
1035 + struct dpaa2_qdma_engine *dpaa2_qdma;
1036 +
1037 + dev = &ls_dev->dev;
1038 + priv = dev_get_drvdata(dev);
1039 + dpaa2_qdma = priv->dpaa2_qdma;
1040 +
1041 + dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle);
1042 + dpaa2_dpdmai_dpio_unbind(priv);
1043 + dpaa2_dpmai_store_free(priv);
1044 + dpaa2_dpdmai_dpio_free(priv);
1045 + dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
1046 + fsl_mc_portal_free(priv->mc_io);
1047 + dev_set_drvdata(dev, NULL);
1048 + dpaa2_dpdmai_free_channels(dpaa2_qdma);
1049 +
1050 + dma_async_device_unregister(&dpaa2_qdma->dma_dev);
1051 + kfree(priv);
1052 + kfree(dpaa2_qdma);
1053 +
1054 + return 0;
1055 +}
1056 +
1057 +static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = {
1058 + {
1059 + .vendor = FSL_MC_VENDOR_FREESCALE,
1060 + .obj_type = "dpdmai",
1061 + },
1062 + { .vendor = 0x0 }
1063 +};
1064 +
1065 +static struct fsl_mc_driver dpaa2_qdma_driver = {
1066 + .driver = {
1067 + .name = "dpaa2-qdma",
1068 + .owner = THIS_MODULE,
1069 + },
1070 + .probe = dpaa2_qdma_probe,
1071 + .remove = dpaa2_qdma_remove,
1072 + .match_id_table = dpaa2_qdma_id_table
1073 +};
1074 +
1075 +static int __init dpaa2_qdma_driver_init(void)
1076 +{
1077 + return fsl_mc_driver_register(&(dpaa2_qdma_driver));
1078 +}
1079 +late_initcall(dpaa2_qdma_driver_init);
1080 +
1081 +static void __exit fsl_qdma_exit(void)
1082 +{
1083 + fsl_mc_driver_unregister(&(dpaa2_qdma_driver));
1084 +}
1085 +module_exit(fsl_qdma_exit);
1086 +
1087 +MODULE_DESCRIPTION("NXP DPAA2 qDMA driver");
1088 +MODULE_LICENSE("Dual BSD/GPL");
1089 diff --git a/drivers/dma/dpaa2-qdma/dpaa2-qdma.h b/drivers/dma/dpaa2-qdma/dpaa2-qdma.h
1090 new file mode 100644
1091 index 00000000..71a00db8
1092 --- /dev/null
1093 +++ b/drivers/dma/dpaa2-qdma/dpaa2-qdma.h
1094 @@ -0,0 +1,262 @@
1095 +/* Copyright 2015 NXP Semiconductor Inc.
1096 + *
1097 + * Redistribution and use in source and binary forms, with or without
1098 + * modification, are permitted provided that the following conditions are met:
1099 + * * Redistributions of source code must retain the above copyright
1100 + * notice, this list of conditions and the following disclaimer.
1101 + * * Redistributions in binary form must reproduce the above copyright
1102 + * notice, this list of conditions and the following disclaimer in the
1103 + * documentation and/or other materials provided with the distribution.
1104 + * * Neither the name of NXP Semiconductor nor the
1105 + * names of its contributors may be used to endorse or promote products
1106 + * derived from this software without specific prior written permission.
1107 + *
1108 + *
1109 + * ALTERNATIVELY, this software may be distributed under the terms of the
1110 + * GNU General Public License ("GPL") as published by the Free Software
1111 + * Foundation, either version 2 of that License or (at your option) any
1112 + * later version.
1113 + *
1114 + * THIS SOFTWARE IS PROVIDED BY NXP Semiconductor ``AS IS'' AND ANY
1115 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1116 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1117 + * DISCLAIMED. IN NO EVENT SHALL NXP Semiconductor BE LIABLE FOR ANY
1118 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1119 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1120 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1121 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1122 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1123 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1124 + */
1125 +
1126 +#ifndef __DPAA2_QDMA_H
1127 +#define __DPAA2_QDMA_H
1128 +
1129 +#define LONG_FORMAT 1
1130 +
1131 +#define DPAA2_QDMA_STORE_SIZE 16
1132 +#define NUM_CH 8
1133 +#define NUM_SG_PER_BLK 16
1134 +
1135 +#define QDMA_DMR_OFFSET 0x0
1136 +#define QDMA_DQ_EN (0 << 30)
1137 +#define QDMA_DQ_DIS (1 << 30)
1138 +
1139 +#define QDMA_DSR_M_OFFSET 0x10004
1140 +
1141 +struct dpaa2_qdma_sd_d {
1142 + uint32_t rsv:32;
1143 + union {
1144 + struct {
1145 + uint32_t ssd:12; /* souce stride distance */
1146 + uint32_t sss:12; /* souce stride size */
1147 + uint32_t rsv1:8;
1148 + } sdf;
1149 + struct {
1150 + uint32_t dsd:12; /* Destination stride distance */
1151 + uint32_t dss:12; /* Destination stride size */
1152 + uint32_t rsv2:8;
1153 + } ddf;
1154 + } df;
1155 + uint32_t rbpcmd; /* Route-by-port command */
1156 + uint32_t cmd;
1157 +} __attribute__((__packed__));
1158 +/* Source descriptor command read transaction type for RBP=0:
1159 + coherent copy of cacheable memory */
1160 +#define QDMA_SD_CMD_RDTTYPE_COHERENT (0xb << 28)
1161 +/* Destination descriptor command write transaction type for RBP=0:
1162 + coherent copy of cacheable memory */
1163 +#define QDMA_DD_CMD_WRTTYPE_COHERENT (0x6 << 28)
1164 +
1165 +#define QDMA_SG_FMT_SDB 0x0 /* single data buffer */
1166 +#define QDMA_SG_FMT_FDS 0x1 /* frame data section */
1167 +#define QDMA_SG_FMT_SGTE 0x2 /* SGT extension */
1168 +#define QDMA_SG_SL_SHORT 0x1 /* short length */
1169 +#define QDMA_SG_SL_LONG 0x0 /* short length */
1170 +#define QDMA_SG_F 0x1 /* last sg entry */
1171 +struct dpaa2_qdma_sg {
1172 + uint32_t addr_lo; /* address 0:31 */
1173 + uint32_t addr_hi:17; /* address 32:48 */
1174 + uint32_t rsv:15;
1175 + union {
1176 + uint32_t data_len_sl0; /* SL=0, the long format */
1177 + struct {
1178 + uint32_t len:17; /* SL=1, the short format */
1179 + uint32_t reserve:3;
1180 + uint32_t sf:1;
1181 + uint32_t sr:1;
1182 + uint32_t size:10; /* buff size */
1183 + } data_len_sl1;
1184 + } data_len; /* AVAIL_LENGTH */
1185 + struct {
1186 + uint32_t bpid:14;
1187 + uint32_t ivp:1;
1188 + uint32_t mbt:1;
1189 + uint32_t offset:12;
1190 + uint32_t fmt:2;
1191 + uint32_t sl:1;
1192 + uint32_t f:1;
1193 + } ctrl;
1194 +} __attribute__((__packed__));
1195 +
1196 +#define QMAN_FD_FMT_ENABLE (1 << 12) /* frame list table enable */
1197 +#define QMAN_FD_BMT_ENABLE (1 << 15) /* bypass memory translation */
1198 +#define QMAN_FD_BMT_DISABLE (0 << 15) /* bypass memory translation */
1199 +#define QMAN_FD_SL_DISABLE (0 << 14) /* short lengthe disabled */
1200 +#define QMAN_FD_SL_ENABLE (1 << 14) /* short lengthe enabled */
1201 +
1202 +#define QDMA_SB_FRAME (0 << 28) /* single frame */
1203 +#define QDMA_SG_FRAME (2 << 28) /* scatter gather frames */
1204 +#define QDMA_FINAL_BIT_DISABLE (0 << 31) /* final bit disable */
1205 +#define QDMA_FINAL_BIT_ENABLE (1 << 31) /* final bit enable */
1206 +
1207 +#define QDMA_FD_SHORT_FORMAT (1 << 11) /* short format */
1208 +#define QDMA_FD_LONG_FORMAT (0 << 11) /* long format */
1209 +#define QDMA_SER_DISABLE (0 << 8) /* no notification */
1210 +#define QDMA_SER_CTX (1 << 8) /* notification by FQD_CTX[fqid] */
1211 +#define QDMA_SER_DEST (2 << 8) /* notification by destination desc */
1212 +#define QDMA_SER_BOTH (3 << 8) /* soruce and dest notification */
1213 +#define QDMA_FD_SPF_ENALBE (1 << 30) /* source prefetch enable */
1214 +
1215 +#define QMAN_FD_VA_ENABLE (1 << 14) /* Address used is virtual address */
1216 +#define QMAN_FD_VA_DISABLE (0 << 14)/* Address used is a real address */
1217 +#define QMAN_FD_CBMT_ENABLE (1 << 15) /* Flow Context: 49bit physical address */
1218 +#define QMAN_FD_CBMT_DISABLE (0 << 15) /* Flow Context: 64bit virtual address */
1219 +#define QMAN_FD_SC_DISABLE (0 << 27) /* stashing control */
1220 +
1221 +#define QDMA_FL_FMT_SBF 0x0 /* Single buffer frame */
1222 +#define QDMA_FL_FMT_SGE 0x2 /* Scatter gather frame */
1223 +#define QDMA_FL_BMT_ENABLE 0x1 /* enable bypass memory translation */
1224 +#define QDMA_FL_BMT_DISABLE 0x0 /* enable bypass memory translation */
1225 +#define QDMA_FL_SL_LONG 0x0 /* long length */
1226 +#define QDMA_FL_SL_SHORT 0x1 /* short length */
1227 +#define QDMA_FL_F 0x1 /* last frame list bit */
1228 +/*Description of Frame list table structure*/
1229 +struct dpaa2_frame_list {
1230 + uint32_t addr_lo; /* lower 32 bits of address */
1231 + uint32_t addr_hi:17; /* upper 17 bits of address */
1232 + uint32_t resrvd:15;
1233 + union {
1234 + uint32_t data_len_sl0; /* If SL=0, then data length is 32 */
1235 + struct {
1236 + uint32_t data_len:18; /* IF SL=1; length is 18bit */
1237 + uint32_t resrvd:2;
1238 + uint32_t mem:12; /* Valid only when SL=1 */
1239 + } data_len_sl1;
1240 + } data_len;
1241 + /* word 4 */
1242 + uint32_t bpid:14; /* Frame buffer pool ID */
1243 + uint32_t ivp:1; /* Invalid Pool ID. */
1244 + uint32_t bmt:1; /* Bypass Memory Translation */
1245 + uint32_t offset:12; /* Frame offset */
1246 + uint32_t fmt:2; /* Frame Format */
1247 + uint32_t sl:1; /* Short Length */
1248 + uint32_t f:1; /* Final bit */
1249 +
1250 + uint32_t frc; /* Frame Context */
1251 + /* word 6 */
1252 + uint32_t err:8; /* Frame errors */
1253 + uint32_t resrvd0:8;
1254 + uint32_t asal:4; /* accelerator-specific annotation length */
1255 + uint32_t resrvd1:1;
1256 + uint32_t ptv2:1;
1257 + uint32_t ptv1:1;
1258 + uint32_t pta:1; /* pass-through annotation */
1259 + uint32_t resrvd2:8;
1260 +
1261 + uint32_t flc_lo; /* lower 32 bits fo flow context */
1262 + uint32_t flc_hi; /* higher 32 bits fo flow context */
1263 +} __attribute__((__packed__));
1264 +
1265 +struct dpaa2_qdma_chan {
1266 + struct virt_dma_chan vchan;
1267 + struct virt_dma_desc vdesc;
1268 + enum dma_status status;
1269 + struct dpaa2_qdma_engine *qdma;
1270 +
1271 + struct mutex dpaa2_queue_mutex;
1272 + spinlock_t queue_lock;
1273 + struct dma_pool *fd_pool;
1274 + struct dma_pool *sg_blk_pool;
1275 +
1276 + struct list_head comp_used;
1277 + struct list_head comp_free;
1278 +
1279 + struct list_head sgb_free;
1280 +};
1281 +
1282 +struct qdma_sg_blk {
1283 + dma_addr_t blk_bus_addr;
1284 + void *blk_virt_addr;
1285 + struct list_head list;
1286 +};
1287 +
1288 +struct dpaa2_qdma_comp {
1289 + dma_addr_t fd_bus_addr;
1290 + dma_addr_t fl_bus_addr;
1291 + dma_addr_t desc_bus_addr;
1292 + dma_addr_t sge_src_bus_addr;
1293 + dma_addr_t sge_dst_bus_addr;
1294 + void *fd_virt_addr;
1295 + void *fl_virt_addr;
1296 + void *desc_virt_addr;
1297 + void *sg_src_virt_addr;
1298 + void *sg_dst_virt_addr;
1299 + struct qdma_sg_blk *sg_blk;
1300 + uint32_t sg_blk_num;
1301 + struct list_head sg_src_head;
1302 + struct list_head sg_dst_head;
1303 + struct dpaa2_qdma_chan *qchan;
1304 + struct virt_dma_desc vdesc;
1305 + struct list_head list;
1306 +};
1307 +
1308 +struct dpaa2_qdma_engine {
1309 + struct dma_device dma_dev;
1310 + u32 n_chans;
1311 + struct dpaa2_qdma_chan chans[NUM_CH];
1312 +
1313 + struct dpaa2_qdma_priv *priv;
1314 +};
1315 +
1316 +/*
1317 + * dpaa2_qdma_priv - driver private data
1318 + */
1319 +struct dpaa2_qdma_priv {
1320 + int dpqdma_id;
1321 +
1322 + struct iommu_domain *iommu_domain;
1323 + struct dpdmai_attr dpdmai_attr;
1324 + struct device *dev;
1325 + struct fsl_mc_io *mc_io;
1326 + struct fsl_mc_device *dpdmai_dev;
1327 +
1328 + struct dpdmai_rx_queue_attr rx_queue_attr[DPDMAI_PRIO_NUM];
1329 + struct dpdmai_tx_queue_attr tx_queue_attr[DPDMAI_PRIO_NUM];
1330 +
1331 + uint8_t num_pairs;
1332 +
1333 + struct dpaa2_qdma_engine *dpaa2_qdma;
1334 + struct dpaa2_qdma_priv_per_prio *ppriv;
1335 +};
1336 +
1337 +struct dpaa2_qdma_priv_per_prio {
1338 + int req_fqid;
1339 + int rsp_fqid;
1340 + int prio;
1341 +
1342 + struct dpaa2_io_store *store;
1343 + struct dpaa2_io_notification_ctx nctx;
1344 +
1345 + struct dpaa2_qdma_priv *priv;
1346 +};
1347 +
1348 +/* FD pool size: one FD + 3 Frame list + 2 source/destination descriptor */
1349 +#define FD_POOL_SIZE (sizeof(struct dpaa2_fd) + \
1350 + sizeof(struct dpaa2_frame_list) * 3 + \
1351 + sizeof(struct dpaa2_qdma_sd_d) * 2)
1352 +
1353 +/* qdma_sg_blk + 16 SGs */
1354 +#define SG_POOL_SIZE (sizeof(struct qdma_sg_blk) +\
1355 + sizeof(struct dpaa2_qdma_sg) * NUM_SG_PER_BLK)
1356 +#endif /* __DPAA2_QDMA_H */
1357 diff --git a/drivers/dma/dpaa2-qdma/dpdmai.c b/drivers/dma/dpaa2-qdma/dpdmai.c
1358 new file mode 100644
1359 index 00000000..ad13fc1e
1360 --- /dev/null
1361 +++ b/drivers/dma/dpaa2-qdma/dpdmai.c
1362 @@ -0,0 +1,454 @@
1363 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
1364 + *
1365 + * Redistribution and use in source and binary forms, with or without
1366 + * modification, are permitted provided that the following conditions are met:
1367 + * * Redistributions of source code must retain the above copyright
1368 + * notice, this list of conditions and the following disclaimer.
1369 + * * Redistributions in binary form must reproduce the above copyright
1370 + * notice, this list of conditions and the following disclaimer in the
1371 + * documentation and/or other materials provided with the distribution.
1372 + * * Neither the name of the above-listed copyright holders nor the
1373 + * names of any contributors may be used to endorse or promote products
1374 + * derived from this software without specific prior written permission.
1375 + *
1376 + *
1377 + * ALTERNATIVELY, this software may be distributed under the terms of the
1378 + * GNU General Public License ("GPL") as published by the Free Software
1379 + * Foundation, either version 2 of that License or (at your option) any
1380 + * later version.
1381 + *
1382 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
1383 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1384 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1385 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
1386 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
1387 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
1388 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
1389 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
1390 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
1391 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1392 + * POSSIBILITY OF SUCH DAMAGE.
1393 + */
1394 +#include <linux/types.h>
1395 +#include <linux/io.h>
1396 +#include "fsl_dpdmai.h"
1397 +#include "fsl_dpdmai_cmd.h"
1398 +#include "../../../drivers/staging/fsl-mc/include/mc-sys.h"
1399 +#include "../../../drivers/staging/fsl-mc/include/mc-cmd.h"
1400 +
1401 +int dpdmai_open(struct fsl_mc_io *mc_io,
1402 + uint32_t cmd_flags,
1403 + int dpdmai_id,
1404 + uint16_t *token)
1405 +{
1406 + struct mc_command cmd = { 0 };
1407 + int err;
1408 +
1409 + /* prepare command */
1410 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_OPEN,
1411 + cmd_flags,
1412 + 0);
1413 + DPDMAI_CMD_OPEN(cmd, dpdmai_id);
1414 +
1415 + /* send command to mc*/
1416 + err = mc_send_command(mc_io, &cmd);
1417 + if (err)
1418 + return err;
1419 +
1420 + /* retrieve response parameters */
1421 + *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
1422 +
1423 + return 0;
1424 +}
1425 +
1426 +int dpdmai_close(struct fsl_mc_io *mc_io,
1427 + uint32_t cmd_flags,
1428 + uint16_t token)
1429 +{
1430 + struct mc_command cmd = { 0 };
1431 +
1432 + /* prepare command */
1433 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLOSE,
1434 + cmd_flags, token);
1435 +
1436 + /* send command to mc*/
1437 + return mc_send_command(mc_io, &cmd);
1438 +}
1439 +
1440 +int dpdmai_create(struct fsl_mc_io *mc_io,
1441 + uint32_t cmd_flags,
1442 + const struct dpdmai_cfg *cfg,
1443 + uint16_t *token)
1444 +{
1445 + struct mc_command cmd = { 0 };
1446 + int err;
1447 +
1448 + /* prepare command */
1449 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CREATE,
1450 + cmd_flags,
1451 + 0);
1452 + DPDMAI_CMD_CREATE(cmd, cfg);
1453 +
1454 + /* send command to mc*/
1455 + err = mc_send_command(mc_io, &cmd);
1456 + if (err)
1457 + return err;
1458 +
1459 + /* retrieve response parameters */
1460 + *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
1461 +
1462 + return 0;
1463 +}
1464 +
1465 +int dpdmai_destroy(struct fsl_mc_io *mc_io,
1466 + uint32_t cmd_flags,
1467 + uint16_t token)
1468 +{
1469 + struct mc_command cmd = { 0 };
1470 +
1471 + /* prepare command */
1472 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DESTROY,
1473 + cmd_flags,
1474 + token);
1475 +
1476 + /* send command to mc*/
1477 + return mc_send_command(mc_io, &cmd);
1478 +}
1479 +
1480 +int dpdmai_enable(struct fsl_mc_io *mc_io,
1481 + uint32_t cmd_flags,
1482 + uint16_t token)
1483 +{
1484 + struct mc_command cmd = { 0 };
1485 +
1486 + /* prepare command */
1487 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_ENABLE,
1488 + cmd_flags,
1489 + token);
1490 +
1491 + /* send command to mc*/
1492 + return mc_send_command(mc_io, &cmd);
1493 +}
1494 +
1495 +int dpdmai_disable(struct fsl_mc_io *mc_io,
1496 + uint32_t cmd_flags,
1497 + uint16_t token)
1498 +{
1499 + struct mc_command cmd = { 0 };
1500 +
1501 + /* prepare command */
1502 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DISABLE,
1503 + cmd_flags,
1504 + token);
1505 +
1506 + /* send command to mc*/
1507 + return mc_send_command(mc_io, &cmd);
1508 +}
1509 +
1510 +int dpdmai_is_enabled(struct fsl_mc_io *mc_io,
1511 + uint32_t cmd_flags,
1512 + uint16_t token,
1513 + int *en)
1514 +{
1515 + struct mc_command cmd = { 0 };
1516 + int err;
1517 + /* prepare command */
1518 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_IS_ENABLED,
1519 + cmd_flags,
1520 + token);
1521 +
1522 + /* send command to mc*/
1523 + err = mc_send_command(mc_io, &cmd);
1524 + if (err)
1525 + return err;
1526 +
1527 + /* retrieve response parameters */
1528 + DPDMAI_RSP_IS_ENABLED(cmd, *en);
1529 +
1530 + return 0;
1531 +}
1532 +
1533 +int dpdmai_reset(struct fsl_mc_io *mc_io,
1534 + uint32_t cmd_flags,
1535 + uint16_t token)
1536 +{
1537 + struct mc_command cmd = { 0 };
1538 +
1539 + /* prepare command */
1540 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_RESET,
1541 + cmd_flags,
1542 + token);
1543 +
1544 + /* send command to mc*/
1545 + return mc_send_command(mc_io, &cmd);
1546 +}
1547 +
1548 +int dpdmai_get_irq(struct fsl_mc_io *mc_io,
1549 + uint32_t cmd_flags,
1550 + uint16_t token,
1551 + uint8_t irq_index,
1552 + int *type,
1553 + struct dpdmai_irq_cfg *irq_cfg)
1554 +{
1555 + struct mc_command cmd = { 0 };
1556 + int err;
1557 +
1558 + /* prepare command */
1559 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ,
1560 + cmd_flags,
1561 + token);
1562 + DPDMAI_CMD_GET_IRQ(cmd, irq_index);
1563 +
1564 + /* send command to mc*/
1565 + err = mc_send_command(mc_io, &cmd);
1566 + if (err)
1567 + return err;
1568 +
1569 + /* retrieve response parameters */
1570 + DPDMAI_RSP_GET_IRQ(cmd, *type, irq_cfg);
1571 +
1572 + return 0;
1573 +}
1574 +
1575 +int dpdmai_set_irq(struct fsl_mc_io *mc_io,
1576 + uint32_t cmd_flags,
1577 + uint16_t token,
1578 + uint8_t irq_index,
1579 + struct dpdmai_irq_cfg *irq_cfg)
1580 +{
1581 + struct mc_command cmd = { 0 };
1582 +
1583 + /* prepare command */
1584 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ,
1585 + cmd_flags,
1586 + token);
1587 + DPDMAI_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
1588 +
1589 + /* send command to mc*/
1590 + return mc_send_command(mc_io, &cmd);
1591 +}
1592 +
1593 +int dpdmai_get_irq_enable(struct fsl_mc_io *mc_io,
1594 + uint32_t cmd_flags,
1595 + uint16_t token,
1596 + uint8_t irq_index,
1597 + uint8_t *en)
1598 +{
1599 + struct mc_command cmd = { 0 };
1600 + int err;
1601 +
1602 + /* prepare command */
1603 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_ENABLE,
1604 + cmd_flags,
1605 + token);
1606 + DPDMAI_CMD_GET_IRQ_ENABLE(cmd, irq_index);
1607 +
1608 + /* send command to mc*/
1609 + err = mc_send_command(mc_io, &cmd);
1610 + if (err)
1611 + return err;
1612 +
1613 + /* retrieve response parameters */
1614 + DPDMAI_RSP_GET_IRQ_ENABLE(cmd, *en);
1615 +
1616 + return 0;
1617 +}
1618 +
1619 +int dpdmai_set_irq_enable(struct fsl_mc_io *mc_io,
1620 + uint32_t cmd_flags,
1621 + uint16_t token,
1622 + uint8_t irq_index,
1623 + uint8_t en)
1624 +{
1625 + struct mc_command cmd = { 0 };
1626 +
1627 + /* prepare command */
1628 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ_ENABLE,
1629 + cmd_flags,
1630 + token);
1631 + DPDMAI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
1632 +
1633 + /* send command to mc*/
1634 + return mc_send_command(mc_io, &cmd);
1635 +}
1636 +
1637 +int dpdmai_get_irq_mask(struct fsl_mc_io *mc_io,
1638 + uint32_t cmd_flags,
1639 + uint16_t token,
1640 + uint8_t irq_index,
1641 + uint32_t *mask)
1642 +{
1643 + struct mc_command cmd = { 0 };
1644 + int err;
1645 +
1646 + /* prepare command */
1647 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_MASK,
1648 + cmd_flags,
1649 + token);
1650 + DPDMAI_CMD_GET_IRQ_MASK(cmd, irq_index);
1651 +
1652 + /* send command to mc*/
1653 + err = mc_send_command(mc_io, &cmd);
1654 + if (err)
1655 + return err;
1656 +
1657 + /* retrieve response parameters */
1658 + DPDMAI_RSP_GET_IRQ_MASK(cmd, *mask);
1659 +
1660 + return 0;
1661 +}
1662 +
1663 +int dpdmai_set_irq_mask(struct fsl_mc_io *mc_io,
1664 + uint32_t cmd_flags,
1665 + uint16_t token,
1666 + uint8_t irq_index,
1667 + uint32_t mask)
1668 +{
1669 + struct mc_command cmd = { 0 };
1670 +
1671 + /* prepare command */
1672 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ_MASK,
1673 + cmd_flags,
1674 + token);
1675 + DPDMAI_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
1676 +
1677 + /* send command to mc*/
1678 + return mc_send_command(mc_io, &cmd);
1679 +}
1680 +
1681 +int dpdmai_get_irq_status(struct fsl_mc_io *mc_io,
1682 + uint32_t cmd_flags,
1683 + uint16_t token,
1684 + uint8_t irq_index,
1685 + uint32_t *status)
1686 +{
1687 + struct mc_command cmd = { 0 };
1688 + int err;
1689 +
1690 + /* prepare command */
1691 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_STATUS,
1692 + cmd_flags,
1693 + token);
1694 + DPDMAI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
1695 +
1696 + /* send command to mc*/
1697 + err = mc_send_command(mc_io, &cmd);
1698 + if (err)
1699 + return err;
1700 +
1701 + /* retrieve response parameters */
1702 + DPDMAI_RSP_GET_IRQ_STATUS(cmd, *status);
1703 +
1704 + return 0;
1705 +}
1706 +
1707 +int dpdmai_clear_irq_status(struct fsl_mc_io *mc_io,
1708 + uint32_t cmd_flags,
1709 + uint16_t token,
1710 + uint8_t irq_index,
1711 + uint32_t status)
1712 +{
1713 + struct mc_command cmd = { 0 };
1714 +
1715 + /* prepare command */
1716 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLEAR_IRQ_STATUS,
1717 + cmd_flags,
1718 + token);
1719 + DPDMAI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
1720 +
1721 + /* send command to mc*/
1722 + return mc_send_command(mc_io, &cmd);
1723 +}
1724 +
1725 +int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
1726 + uint32_t cmd_flags,
1727 + uint16_t token,
1728 + struct dpdmai_attr *attr)
1729 +{
1730 + struct mc_command cmd = { 0 };
1731 + int err;
1732 +
1733 + /* prepare command */
1734 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_ATTR,
1735 + cmd_flags,
1736 + token);
1737 +
1738 + /* send command to mc*/
1739 + err = mc_send_command(mc_io, &cmd);
1740 + if (err)
1741 + return err;
1742 +
1743 + /* retrieve response parameters */
1744 + DPDMAI_RSP_GET_ATTR(cmd, attr);
1745 +
1746 + return 0;
1747 +}
1748 +
1749 +int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
1750 + uint32_t cmd_flags,
1751 + uint16_t token,
1752 + uint8_t priority,
1753 + const struct dpdmai_rx_queue_cfg *cfg)
1754 +{
1755 + struct mc_command cmd = { 0 };
1756 +
1757 + /* prepare command */
1758 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_RX_QUEUE,
1759 + cmd_flags,
1760 + token);
1761 + DPDMAI_CMD_SET_RX_QUEUE(cmd, priority, cfg);
1762 +
1763 + /* send command to mc*/
1764 + return mc_send_command(mc_io, &cmd);
1765 +}
1766 +
1767 +int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
1768 + uint32_t cmd_flags,
1769 + uint16_t token,
1770 + uint8_t priority, struct dpdmai_rx_queue_attr *attr)
1771 +{
1772 + struct mc_command cmd = { 0 };
1773 + int err;
1774 +
1775 + /* prepare command */
1776 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_RX_QUEUE,
1777 + cmd_flags,
1778 + token);
1779 + DPDMAI_CMD_GET_RX_QUEUE(cmd, priority);
1780 +
1781 + /* send command to mc*/
1782 + err = mc_send_command(mc_io, &cmd);
1783 + if (err)
1784 + return err;
1785 +
1786 + /* retrieve response parameters */
1787 + DPDMAI_RSP_GET_RX_QUEUE(cmd, attr);
1788 +
1789 + return 0;
1790 +}
1791 +
1792 +int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io,
1793 + uint32_t cmd_flags,
1794 + uint16_t token,
1795 + uint8_t priority,
1796 + struct dpdmai_tx_queue_attr *attr)
1797 +{
1798 + struct mc_command cmd = { 0 };
1799 + int err;
1800 +
1801 + /* prepare command */
1802 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_TX_QUEUE,
1803 + cmd_flags,
1804 + token);
1805 + DPDMAI_CMD_GET_TX_QUEUE(cmd, priority);
1806 +
1807 + /* send command to mc*/
1808 + err = mc_send_command(mc_io, &cmd);
1809 + if (err)
1810 + return err;
1811 +
1812 + /* retrieve response parameters */
1813 + DPDMAI_RSP_GET_TX_QUEUE(cmd, attr);
1814 +
1815 + return 0;
1816 +}
1817 diff --git a/drivers/dma/dpaa2-qdma/fsl_dpdmai.h b/drivers/dma/dpaa2-qdma/fsl_dpdmai.h
1818 new file mode 100644
1819 index 00000000..e931ce16
1820 --- /dev/null
1821 +++ b/drivers/dma/dpaa2-qdma/fsl_dpdmai.h
1822 @@ -0,0 +1,521 @@
1823 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
1824 + *
1825 + * Redistribution and use in source and binary forms, with or without
1826 + * modification, are permitted provided that the following conditions are met:
1827 + * * Redistributions of source code must retain the above copyright
1828 + * notice, this list of conditions and the following disclaimer.
1829 + * * Redistributions in binary form must reproduce the above copyright
1830 + * notice, this list of conditions and the following disclaimer in the
1831 + * documentation and/or other materials provided with the distribution.
1832 + * * Neither the name of the above-listed copyright holders nor the
1833 + * names of any contributors may be used to endorse or promote products
1834 + * derived from this software without specific prior written permission.
1835 + *
1836 + *
1837 + * ALTERNATIVELY, this software may be distributed under the terms of the
1838 + * GNU General Public License ("GPL") as published by the Free Software
1839 + * Foundation, either version 2 of that License or (at your option) any
1840 + * later version.
1841 + *
1842 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
1843 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1844 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1845 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
1846 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
1847 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
1848 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
1849 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
1850 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
1851 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1852 + * POSSIBILITY OF SUCH DAMAGE.
1853 + */
1854 +#ifndef __FSL_DPDMAI_H
1855 +#define __FSL_DPDMAI_H
1856 +
1857 +struct fsl_mc_io;
1858 +
1859 +/* Data Path DMA Interface API
1860 + * Contains initialization APIs and runtime control APIs for DPDMAI
1861 + */
1862 +
1863 +/* General DPDMAI macros */
1864 +
1865 +/**
1866 + * Maximum number of Tx/Rx priorities per DPDMAI object
1867 + */
1868 +#define DPDMAI_PRIO_NUM 2
1869 +
1870 +/**
1871 + * All queues considered; see dpdmai_set_rx_queue()
1872 + */
1873 +#define DPDMAI_ALL_QUEUES (uint8_t)(-1)
1874 +
1875 +/**
1876 + * dpdmai_open() - Open a control session for the specified object
1877 + * @mc_io: Pointer to MC portal's I/O object
1878 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
1879 + * @dpdmai_id: DPDMAI unique ID
1880 + * @token: Returned token; use in subsequent API calls
1881 + *
1882 + * This function can be used to open a control session for an
1883 + * already created object; an object may have been declared in
1884 + * the DPL or by calling the dpdmai_create() function.
1885 + * This function returns a unique authentication token,
1886 + * associated with the specific object ID and the specific MC
1887 + * portal; this token must be used in all subsequent commands for
1888 + * this specific object.
1889 + *
1890 + * Return: '0' on Success; Error code otherwise.
1891 + */
1892 +int dpdmai_open(struct fsl_mc_io *mc_io,
1893 + uint32_t cmd_flags,
1894 + int dpdmai_id,
1895 + uint16_t *token);
1896 +
1897 +/**
1898 + * dpdmai_close() - Close the control session of the object
1899 + * @mc_io: Pointer to MC portal's I/O object
1900 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
1901 + * @token: Token of DPDMAI object
1902 + *
1903 + * After this function is called, no further operations are
1904 + * allowed on the object without opening a new control session.
1905 + *
1906 + * Return: '0' on Success; Error code otherwise.
1907 + */
1908 +int dpdmai_close(struct fsl_mc_io *mc_io,
1909 + uint32_t cmd_flags,
1910 + uint16_t token);
1911 +
1912 +/**
1913 + * struct dpdmai_cfg - Structure representing DPDMAI configuration
1914 + * @priorities: Priorities for the DMA hardware processing; valid priorities are
1915 + * configured with values 1-8; the entry following last valid entry
1916 + * should be configured with 0
1917 + */
1918 +struct dpdmai_cfg {
1919 + uint8_t priorities[DPDMAI_PRIO_NUM];
1920 +};
1921 +
1922 +/**
1923 + * dpdmai_create() - Create the DPDMAI object
1924 + * @mc_io: Pointer to MC portal's I/O object
1925 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
1926 + * @cfg: Configuration structure
1927 + * @token: Returned token; use in subsequent API calls
1928 + *
1929 + * Create the DPDMAI object, allocate required resources and
1930 + * perform required initialization.
1931 + *
1932 + * The object can be created either by declaring it in the
1933 + * DPL file, or by calling this function.
1934 + *
1935 + * This function returns a unique authentication token,
1936 + * associated with the specific object ID and the specific MC
1937 + * portal; this token must be used in all subsequent calls to
1938 + * this specific object. For objects that are created using the
1939 + * DPL file, call dpdmai_open() function to get an authentication
1940 + * token first.
1941 + *
1942 + * Return: '0' on Success; Error code otherwise.
1943 + */
1944 +int dpdmai_create(struct fsl_mc_io *mc_io,
1945 + uint32_t cmd_flags,
1946 + const struct dpdmai_cfg *cfg,
1947 + uint16_t *token);
1948 +
1949 +/**
1950 + * dpdmai_destroy() - Destroy the DPDMAI object and release all its resources.
1951 + * @mc_io: Pointer to MC portal's I/O object
1952 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
1953 + * @token: Token of DPDMAI object
1954 + *
1955 + * Return: '0' on Success; error code otherwise.
1956 + */
1957 +int dpdmai_destroy(struct fsl_mc_io *mc_io,
1958 + uint32_t cmd_flags,
1959 + uint16_t token);
1960 +
1961 +/**
1962 + * dpdmai_enable() - Enable the DPDMAI, allow sending and receiving frames.
1963 + * @mc_io: Pointer to MC portal's I/O object
1964 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
1965 + * @token: Token of DPDMAI object
1966 + *
1967 + * Return: '0' on Success; Error code otherwise.
1968 + */
1969 +int dpdmai_enable(struct fsl_mc_io *mc_io,
1970 + uint32_t cmd_flags,
1971 + uint16_t token);
1972 +
1973 +/**
1974 + * dpdmai_disable() - Disable the DPDMAI, stop sending and receiving frames.
1975 + * @mc_io: Pointer to MC portal's I/O object
1976 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
1977 + * @token: Token of DPDMAI object
1978 + *
1979 + * Return: '0' on Success; Error code otherwise.
1980 + */
1981 +int dpdmai_disable(struct fsl_mc_io *mc_io,
1982 + uint32_t cmd_flags,
1983 + uint16_t token);
1984 +
1985 +/**
1986 + * dpdmai_is_enabled() - Check if the DPDMAI is enabled.
1987 + * @mc_io: Pointer to MC portal's I/O object
1988 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
1989 + * @token: Token of DPDMAI object
1990 + * @en: Returns '1' if object is enabled; '0' otherwise
1991 + *
1992 + * Return: '0' on Success; Error code otherwise.
1993 + */
1994 +int dpdmai_is_enabled(struct fsl_mc_io *mc_io,
1995 + uint32_t cmd_flags,
1996 + uint16_t token,
1997 + int *en);
1998 +
1999 +/**
2000 + * dpdmai_reset() - Reset the DPDMAI, returns the object to initial state.
2001 + * @mc_io: Pointer to MC portal's I/O object
2002 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2003 + * @token: Token of DPDMAI object
2004 + *
2005 + * Return: '0' on Success; Error code otherwise.
2006 + */
2007 +int dpdmai_reset(struct fsl_mc_io *mc_io,
2008 + uint32_t cmd_flags,
2009 + uint16_t token);
2010 +
2011 +/**
2012 + * struct dpdmai_irq_cfg - IRQ configuration
2013 + * @addr: Address that must be written to signal a message-based interrupt
2014 + * @val: Value to write into irq_addr address
2015 + * @irq_num: A user defined number associated with this IRQ
2016 + */
2017 +struct dpdmai_irq_cfg {
2018 + uint64_t addr;
2019 + uint32_t val;
2020 + int irq_num;
2021 +};
2022 +
2023 +/**
2024 + * dpdmai_set_irq() - Set IRQ information for the DPDMAI to trigger an interrupt.
2025 + * @mc_io: Pointer to MC portal's I/O object
2026 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2027 + * @token: Token of DPDMAI object
2028 + * @irq_index: Identifies the interrupt index to configure
2029 + * @irq_cfg: IRQ configuration
2030 + *
2031 + * Return: '0' on Success; Error code otherwise.
2032 + */
2033 +int dpdmai_set_irq(struct fsl_mc_io *mc_io,
2034 + uint32_t cmd_flags,
2035 + uint16_t token,
2036 + uint8_t irq_index,
2037 + struct dpdmai_irq_cfg *irq_cfg);
2038 +
2039 +/**
2040 + * dpdmai_get_irq() - Get IRQ information from the DPDMAI
2041 + *
2042 + * @mc_io: Pointer to MC portal's I/O object
2043 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2044 + * @token: Token of DPDMAI object
2045 + * @irq_index: The interrupt index to configure
2046 + * @type: Interrupt type: 0 represents message interrupt
2047 + * type (both irq_addr and irq_val are valid)
2048 + * @irq_cfg: IRQ attributes
2049 + *
2050 + * Return: '0' on Success; Error code otherwise.
2051 + */
2052 +int dpdmai_get_irq(struct fsl_mc_io *mc_io,
2053 + uint32_t cmd_flags,
2054 + uint16_t token,
2055 + uint8_t irq_index,
2056 + int *type,
2057 + struct dpdmai_irq_cfg *irq_cfg);
2058 +
2059 +/**
2060 + * dpdmai_set_irq_enable() - Set overall interrupt state.
2061 + * @mc_io: Pointer to MC portal's I/O object
2062 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2063 + * @token: Token of DPDMAI object
2064 + * @irq_index: The interrupt index to configure
2065 + * @en: Interrupt state - enable = 1, disable = 0
2066 + *
2067 + * Allows GPP software to control when interrupts are generated.
2068 + * Each interrupt can have up to 32 causes. The enable/disable control's the
2069 + * overall interrupt state. if the interrupt is disabled no causes will cause
2070 + * an interrupt
2071 + *
2072 + * Return: '0' on Success; Error code otherwise.
2073 + */
2074 +int dpdmai_set_irq_enable(struct fsl_mc_io *mc_io,
2075 + uint32_t cmd_flags,
2076 + uint16_t token,
2077 + uint8_t irq_index,
2078 + uint8_t en);
2079 +
2080 +/**
2081 + * dpdmai_get_irq_enable() - Get overall interrupt state
2082 + * @mc_io: Pointer to MC portal's I/O object
2083 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2084 + * @token: Token of DPDMAI object
2085 + * @irq_index: The interrupt index to configure
2086 + * @en: Returned Interrupt state - enable = 1, disable = 0
2087 + *
2088 + * Return: '0' on Success; Error code otherwise.
2089 + */
2090 +int dpdmai_get_irq_enable(struct fsl_mc_io *mc_io,
2091 + uint32_t cmd_flags,
2092 + uint16_t token,
2093 + uint8_t irq_index,
2094 + uint8_t *en);
2095 +
2096 +/**
2097 + * dpdmai_set_irq_mask() - Set interrupt mask.
2098 + * @mc_io: Pointer to MC portal's I/O object
2099 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2100 + * @token: Token of DPDMAI object
2101 + * @irq_index: The interrupt index to configure
2102 + * @mask: event mask to trigger interrupt;
2103 + * each bit:
2104 + * 0 = ignore event
2105 + * 1 = consider event for asserting IRQ
2106 + *
2107 + * Every interrupt can have up to 32 causes and the interrupt model supports
2108 + * masking/unmasking each cause independently
2109 + *
2110 + * Return: '0' on Success; Error code otherwise.
2111 + */
2112 +int dpdmai_set_irq_mask(struct fsl_mc_io *mc_io,
2113 + uint32_t cmd_flags,
2114 + uint16_t token,
2115 + uint8_t irq_index,
2116 + uint32_t mask);
2117 +
2118 +/**
2119 + * dpdmai_get_irq_mask() - Get interrupt mask.
2120 + * @mc_io: Pointer to MC portal's I/O object
2121 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2122 + * @token: Token of DPDMAI object
2123 + * @irq_index: The interrupt index to configure
2124 + * @mask: Returned event mask to trigger interrupt
2125 + *
2126 + * Every interrupt can have up to 32 causes and the interrupt model supports
2127 + * masking/unmasking each cause independently
2128 + *
2129 + * Return: '0' on Success; Error code otherwise.
2130 + */
2131 +int dpdmai_get_irq_mask(struct fsl_mc_io *mc_io,
2132 + uint32_t cmd_flags,
2133 + uint16_t token,
2134 + uint8_t irq_index,
2135 + uint32_t *mask);
2136 +
2137 +/**
2138 + * dpdmai_get_irq_status() - Get the current status of any pending interrupts
2139 + * @mc_io: Pointer to MC portal's I/O object
2140 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2141 + * @token: Token of DPDMAI object
2142 + * @irq_index: The interrupt index to configure
2143 + * @status: Returned interrupts status - one bit per cause:
2144 + * 0 = no interrupt pending
2145 + * 1 = interrupt pending
2146 + *
2147 + * Return: '0' on Success; Error code otherwise.
2148 + */
2149 +int dpdmai_get_irq_status(struct fsl_mc_io *mc_io,
2150 + uint32_t cmd_flags,
2151 + uint16_t token,
2152 + uint8_t irq_index,
2153 + uint32_t *status);
2154 +
2155 +/**
2156 + * dpdmai_clear_irq_status() - Clear a pending interrupt's status
2157 + * @mc_io: Pointer to MC portal's I/O object
2158 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2159 + * @token: Token of DPDMAI object
2160 + * @irq_index: The interrupt index to configure
2161 + * @status: bits to clear (W1C) - one bit per cause:
2162 + * 0 = don't change
2163 + * 1 = clear status bit
2164 + *
2165 + * Return: '0' on Success; Error code otherwise.
2166 + */
2167 +int dpdmai_clear_irq_status(struct fsl_mc_io *mc_io,
2168 + uint32_t cmd_flags,
2169 + uint16_t token,
2170 + uint8_t irq_index,
2171 + uint32_t status);
2172 +
2173 +/**
2174 + * struct dpdmai_attr - Structure representing DPDMAI attributes
2175 + * @id: DPDMAI object ID
2176 + * @version: DPDMAI version
2177 + * @num_of_priorities: number of priorities
2178 + */
2179 +struct dpdmai_attr {
2180 + int id;
2181 + /**
2182 + * struct version - DPDMAI version
2183 + * @major: DPDMAI major version
2184 + * @minor: DPDMAI minor version
2185 + */
2186 + struct {
2187 + uint16_t major;
2188 + uint16_t minor;
2189 + } version;
2190 + uint8_t num_of_priorities;
2191 +};
2192 +
2193 +/**
2194 + * dpdmai_get_attributes() - Retrieve DPDMAI attributes.
2195 + * @mc_io: Pointer to MC portal's I/O object
2196 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2197 + * @token: Token of DPDMAI object
2198 + * @attr: Returned object's attributes
2199 + *
2200 + * Return: '0' on Success; Error code otherwise.
2201 + */
2202 +int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
2203 + uint32_t cmd_flags,
2204 + uint16_t token,
2205 + struct dpdmai_attr *attr);
2206 +
2207 +/**
2208 + * enum dpdmai_dest - DPDMAI destination types
2209 + * @DPDMAI_DEST_NONE: Unassigned destination; The queue is set in parked mode
2210 + * and does not generate FQDAN notifications; user is expected to dequeue
2211 + * from the queue based on polling or other user-defined method
2212 + * @DPDMAI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
2213 + * notifications to the specified DPIO; user is expected to dequeue
2214 + * from the queue only after notification is received
2215 + * @DPDMAI_DEST_DPCON: The queue is set in schedule mode and does not generate
2216 + * FQDAN notifications, but is connected to the specified DPCON object;
2217 + * user is expected to dequeue from the DPCON channel
2218 + */
2219 +enum dpdmai_dest {
2220 + DPDMAI_DEST_NONE = 0,
2221 + DPDMAI_DEST_DPIO = 1,
2222 + DPDMAI_DEST_DPCON = 2
2223 +};
2224 +
2225 +/**
2226 + * struct dpdmai_dest_cfg - Structure representing DPDMAI destination parameters
2227 + * @dest_type: Destination type
2228 + * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
2229 + * @priority: Priority selection within the DPIO or DPCON channel; valid values
2230 + * are 0-1 or 0-7, depending on the number of priorities in that
2231 + * channel; not relevant for 'DPDMAI_DEST_NONE' option
2232 + */
2233 +struct dpdmai_dest_cfg {
2234 + enum dpdmai_dest dest_type;
2235 + int dest_id;
2236 + uint8_t priority;
2237 +};
2238 +
2239 +/* DPDMAI queue modification options */
2240 +
2241 +/**
2242 + * Select to modify the user's context associated with the queue
2243 + */
2244 +#define DPDMAI_QUEUE_OPT_USER_CTX 0x00000001
2245 +
2246 +/**
2247 + * Select to modify the queue's destination
2248 + */
2249 +#define DPDMAI_QUEUE_OPT_DEST 0x00000002
2250 +
2251 +/**
2252 + * struct dpdmai_rx_queue_cfg - DPDMAI RX queue configuration
2253 + * @options: Flags representing the suggested modifications to the queue;
2254 + * Use any combination of 'DPDMAI_QUEUE_OPT_<X>' flags
2255 + * @user_ctx: User context value provided in the frame descriptor of each
2256 + * dequeued frame;
2257 + * valid only if 'DPDMAI_QUEUE_OPT_USER_CTX' is contained in 'options'
2258 + * @dest_cfg: Queue destination parameters;
2259 + * valid only if 'DPDMAI_QUEUE_OPT_DEST' is contained in 'options'
2260 + */
2261 +struct dpdmai_rx_queue_cfg {
2262 + uint32_t options;
2263 + uint64_t user_ctx;
2264 + struct dpdmai_dest_cfg dest_cfg;
2265 +
2266 +};
2267 +
2268 +/**
2269 + * dpdmai_set_rx_queue() - Set Rx queue configuration
2270 + * @mc_io: Pointer to MC portal's I/O object
2271 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2272 + * @token: Token of DPDMAI object
2273 + * @priority: Select the queue relative to number of
2274 + * priorities configured at DPDMAI creation; use
2275 + * DPDMAI_ALL_QUEUES to configure all Rx queues
2276 + * identically.
2277 + * @cfg: Rx queue configuration
2278 + *
2279 + * Return: '0' on Success; Error code otherwise.
2280 + */
2281 +int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
2282 + uint32_t cmd_flags,
2283 + uint16_t token,
2284 + uint8_t priority,
2285 + const struct dpdmai_rx_queue_cfg *cfg);
2286 +
2287 +/**
2288 + * struct dpdmai_rx_queue_attr - Structure representing attributes of Rx queues
2289 + * @user_ctx: User context value provided in the frame descriptor of each
2290 + * dequeued frame
2291 + * @dest_cfg: Queue destination configuration
2292 + * @fqid: Virtual FQID value to be used for dequeue operations
2293 + */
2294 +struct dpdmai_rx_queue_attr {
2295 + uint64_t user_ctx;
2296 + struct dpdmai_dest_cfg dest_cfg;
2297 + uint32_t fqid;
2298 +};
2299 +
2300 +/**
2301 + * dpdmai_get_rx_queue() - Retrieve Rx queue attributes.
2302 + * @mc_io: Pointer to MC portal's I/O object
2303 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2304 + * @token: Token of DPDMAI object
2305 + * @priority: Select the queue relative to number of
2306 + * priorities configured at DPDMAI creation
2307 + * @attr: Returned Rx queue attributes
2308 + *
2309 + * Return: '0' on Success; Error code otherwise.
2310 + */
2311 +int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
2312 + uint32_t cmd_flags,
2313 + uint16_t token,
2314 + uint8_t priority,
2315 + struct dpdmai_rx_queue_attr *attr);
2316 +
2317 +/**
2318 + * struct dpdmai_tx_queue_attr - Structure representing attributes of Tx queues
2319 + * @fqid: Virtual FQID to be used for sending frames to DMA hardware
2320 + */
2321 +
2322 +struct dpdmai_tx_queue_attr {
2323 + uint32_t fqid;
2324 +};
2325 +
2326 +/**
2327 + * dpdmai_get_tx_queue() - Retrieve Tx queue attributes.
2328 + * @mc_io: Pointer to MC portal's I/O object
2329 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2330 + * @token: Token of DPDMAI object
2331 + * @priority: Select the queue relative to number of
2332 + * priorities configured at DPDMAI creation
2333 + * @attr: Returned Tx queue attributes
2334 + *
2335 + * Return: '0' on Success; Error code otherwise.
2336 + */
2337 +int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io,
2338 + uint32_t cmd_flags,
2339 + uint16_t token,
2340 + uint8_t priority,
2341 + struct dpdmai_tx_queue_attr *attr);
2342 +
2343 +#endif /* __FSL_DPDMAI_H */
2344 diff --git a/drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h b/drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h
2345 new file mode 100644
2346 index 00000000..7d403c01
2347 --- /dev/null
2348 +++ b/drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h
2349 @@ -0,0 +1,222 @@
2350 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
2351 + *
2352 + * Redistribution and use in source and binary forms, with or without
2353 + * modification, are permitted provided that the following conditions are met:
2354 + * * Redistributions of source code must retain the above copyright
2355 + * notice, this list of conditions and the following disclaimer.
2356 + * * Redistributions in binary form must reproduce the above copyright
2357 + * notice, this list of conditions and the following disclaimer in the
2358 + * documentation and/or other materials provided with the distribution.
2359 + * * Neither the name of the above-listed copyright holders nor the
2360 + * names of any contributors may be used to endorse or promote products
2361 + * derived from this software without specific prior written permission.
2362 + *
2363 + *
2364 + * ALTERNATIVELY, this software may be distributed under the terms of the
2365 + * GNU General Public License ("GPL") as published by the Free Software
2366 + * Foundation, either version 2 of that License or (at your option) any
2367 + * later version.
2368 + *
2369 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
2370 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2371 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2372 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
2373 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2374 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2375 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2376 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
2377 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
2378 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
2379 + * POSSIBILITY OF SUCH DAMAGE.
2380 + */
2381 +#ifndef _FSL_DPDMAI_CMD_H
2382 +#define _FSL_DPDMAI_CMD_H
2383 +
2384 +/* DPDMAI Version */
2385 +#define DPDMAI_VER_MAJOR 2
2386 +#define DPDMAI_VER_MINOR 2
2387 +
2388 +#define DPDMAI_CMD_BASE_VERSION 0
2389 +#define DPDMAI_CMD_ID_OFFSET 4
2390 +
2391 +/* Command IDs */
2392 +#define DPDMAI_CMDID_CLOSE ((0x800 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2393 +#define DPDMAI_CMDID_OPEN ((0x80E << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2394 +#define DPDMAI_CMDID_CREATE ((0x90E << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2395 +#define DPDMAI_CMDID_DESTROY ((0x900 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2396 +
2397 +#define DPDMAI_CMDID_ENABLE ((0x002 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2398 +#define DPDMAI_CMDID_DISABLE ((0x003 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2399 +#define DPDMAI_CMDID_GET_ATTR ((0x004 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2400 +#define DPDMAI_CMDID_RESET ((0x005 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2401 +#define DPDMAI_CMDID_IS_ENABLED ((0x006 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2402 +
2403 +#define DPDMAI_CMDID_SET_IRQ ((0x010 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2404 +#define DPDMAI_CMDID_GET_IRQ ((0x011 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2405 +#define DPDMAI_CMDID_SET_IRQ_ENABLE ((0x012 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2406 +#define DPDMAI_CMDID_GET_IRQ_ENABLE ((0x013 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2407 +#define DPDMAI_CMDID_SET_IRQ_MASK ((0x014 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2408 +#define DPDMAI_CMDID_GET_IRQ_MASK ((0x015 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2409 +#define DPDMAI_CMDID_GET_IRQ_STATUS ((0x016 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2410 +#define DPDMAI_CMDID_CLEAR_IRQ_STATUS ((0x017 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2411 +
2412 +#define DPDMAI_CMDID_SET_RX_QUEUE ((0x1A0 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2413 +#define DPDMAI_CMDID_GET_RX_QUEUE ((0x1A1 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2414 +#define DPDMAI_CMDID_GET_TX_QUEUE ((0x1A2 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2415 +
2416 +
2417 +#define MC_CMD_HDR_TOKEN_O 32 /* Token field offset */
2418 +#define MC_CMD_HDR_TOKEN_S 16 /* Token field size */
2419 +
2420 +
2421 +#define MAKE_UMASK64(_width) \
2422 + ((uint64_t)((_width) < 64 ? ((uint64_t)1 << (_width)) - 1 : \
2423 + (uint64_t)-1))
2424 +
2425 +static inline uint64_t mc_enc(int lsoffset, int width, uint64_t val)
2426 +{
2427 + return (uint64_t)(((uint64_t)val & MAKE_UMASK64(width)) << lsoffset);
2428 +}
2429 +
2430 +static inline uint64_t mc_dec(uint64_t val, int lsoffset, int width)
2431 +{
2432 + return (uint64_t)((val >> lsoffset) & MAKE_UMASK64(width));
2433 +}
2434 +
2435 +#define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \
2436 + ((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg))
2437 +
2438 +#define MC_RSP_OP(_cmd, _param, _offset, _width, _type, _arg) \
2439 + (_arg = (_type)mc_dec(_cmd.params[_param], (_offset), (_width)))
2440 +
2441 +#define MC_CMD_HDR_READ_TOKEN(_hdr) \
2442 + ((uint16_t)mc_dec((_hdr), MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S))
2443 +
2444 +/* cmd, param, offset, width, type, arg_name */
2445 +#define DPDMAI_CMD_OPEN(cmd, dpdmai_id) \
2446 + MC_CMD_OP(cmd, 0, 0, 32, int, dpdmai_id)
2447 +
2448 +/* cmd, param, offset, width, type, arg_name */
2449 +#define DPDMAI_CMD_CREATE(cmd, cfg) \
2450 +do { \
2451 + MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->priorities[0]);\
2452 + MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->priorities[1]);\
2453 +} while (0)
2454 +
2455 +/* cmd, param, offset, width, type, arg_name */
2456 +#define DPDMAI_RSP_IS_ENABLED(cmd, en) \
2457 + MC_RSP_OP(cmd, 0, 0, 1, int, en)
2458 +
2459 +/* cmd, param, offset, width, type, arg_name */
2460 +#define DPDMAI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
2461 +do { \
2462 + MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
2463 + MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
2464 + MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
2465 + MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
2466 +} while (0)
2467 +
2468 +/* cmd, param, offset, width, type, arg_name */
2469 +#define DPDMAI_CMD_GET_IRQ(cmd, irq_index) \
2470 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
2471 +
2472 +/* cmd, param, offset, width, type, arg_name */
2473 +#define DPDMAI_RSP_GET_IRQ(cmd, type, irq_cfg) \
2474 +do { \
2475 + MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
2476 + MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
2477 + MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
2478 + MC_RSP_OP(cmd, 2, 32, 32, int, type); \
2479 +} while (0)
2480 +
2481 +/* cmd, param, offset, width, type, arg_name */
2482 +#define DPDMAI_CMD_SET_IRQ_ENABLE(cmd, irq_index, enable_state) \
2483 +do { \
2484 + MC_CMD_OP(cmd, 0, 0, 8, uint8_t, enable_state); \
2485 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
2486 +} while (0)
2487 +
2488 +/* cmd, param, offset, width, type, arg_name */
2489 +#define DPDMAI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
2490 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
2491 +
2492 +/* cmd, param, offset, width, type, arg_name */
2493 +#define DPDMAI_RSP_GET_IRQ_ENABLE(cmd, enable_state) \
2494 + MC_RSP_OP(cmd, 0, 0, 8, uint8_t, enable_state)
2495 +
2496 +/* cmd, param, offset, width, type, arg_name */
2497 +#define DPDMAI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
2498 +do { \
2499 + MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
2500 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
2501 +} while (0)
2502 +
2503 +/* cmd, param, offset, width, type, arg_name */
2504 +#define DPDMAI_CMD_GET_IRQ_MASK(cmd, irq_index) \
2505 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
2506 +
2507 +/* cmd, param, offset, width, type, arg_name */
2508 +#define DPDMAI_RSP_GET_IRQ_MASK(cmd, mask) \
2509 + MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
2510 +
2511 +/* cmd, param, offset, width, type, arg_name */
2512 +#define DPDMAI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
2513 +do { \
2514 + MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
2515 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
2516 +} while (0)
2517 +
2518 +/* cmd, param, offset, width, type, arg_name */
2519 +#define DPDMAI_RSP_GET_IRQ_STATUS(cmd, status) \
2520 + MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
2521 +
2522 +/* cmd, param, offset, width, type, arg_name */
2523 +#define DPDMAI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
2524 +do { \
2525 + MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
2526 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
2527 +} while (0)
2528 +
2529 +/* cmd, param, offset, width, type, arg_name */
2530 +#define DPDMAI_RSP_GET_ATTR(cmd, attr) \
2531 +do { \
2532 + MC_RSP_OP(cmd, 0, 0, 32, int, attr->id); \
2533 + MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->num_of_priorities); \
2534 + MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\
2535 + MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\
2536 +} while (0)
2537 +
2538 +/* cmd, param, offset, width, type, arg_name */
2539 +#define DPDMAI_CMD_SET_RX_QUEUE(cmd, priority, cfg) \
2540 +do { \
2541 + MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \
2542 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority); \
2543 + MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority); \
2544 + MC_CMD_OP(cmd, 0, 48, 4, enum dpdmai_dest, cfg->dest_cfg.dest_type); \
2545 + MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \
2546 + MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\
2547 +} while (0)
2548 +
2549 +/* cmd, param, offset, width, type, arg_name */
2550 +#define DPDMAI_CMD_GET_RX_QUEUE(cmd, priority) \
2551 + MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority)
2552 +
2553 +/* cmd, param, offset, width, type, arg_name */
2554 +#define DPDMAI_RSP_GET_RX_QUEUE(cmd, attr) \
2555 +do { \
2556 + MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id);\
2557 + MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\
2558 + MC_RSP_OP(cmd, 0, 48, 4, enum dpdmai_dest, attr->dest_cfg.dest_type);\
2559 + MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx);\
2560 + MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->fqid);\
2561 +} while (0)
2562 +
2563 +/* cmd, param, offset, width, type, arg_name */
2564 +#define DPDMAI_CMD_GET_TX_QUEUE(cmd, priority) \
2565 + MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority)
2566 +
2567 +/* cmd, param, offset, width, type, arg_name */
2568 +#define DPDMAI_RSP_GET_TX_QUEUE(cmd, attr) \
2569 + MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->fqid)
2570 +
2571 +#endif /* _FSL_DPDMAI_CMD_H */
2572 diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
2573 new file mode 100644
2574 index 00000000..6c4c2813
2575 --- /dev/null
2576 +++ b/drivers/dma/fsl-qdma.c
2577 @@ -0,0 +1,1201 @@
2578 +/*
2579 + * drivers/dma/fsl-qdma.c
2580 + *
2581 + * Copyright 2014-2015 Freescale Semiconductor, Inc.
2582 + *
2583 + * Driver for the Freescale qDMA engine with software command queue mode.
2584 + * Channel virtualization is supported through enqueuing of DMA jobs to,
2585 + * or dequeuing DMA jobs from, different work queues.
2586 + * This module can be found on Freescale LS SoCs.
2587 + *
2588 + * This program is free software; you can redistribute it and/or modify it
2589 + * under the terms of the GNU General Public License as published by the
2590 + * Free Software Foundation; either version 2 of the License, or (at your
2591 + * option) any later version.
2592 + */
2593 +
2594 +#include <asm/cacheflush.h>
2595 +#include <linux/clk.h>
2596 +#include <linux/delay.h>
2597 +#include <linux/dma-mapping.h>
2598 +#include <linux/dmapool.h>
2599 +#include <linux/init.h>
2600 +#include <linux/interrupt.h>
2601 +#include <linux/module.h>
2602 +#include <linux/of.h>
2603 +#include <linux/of_address.h>
2604 +#include <linux/of_device.h>
2605 +#include <linux/of_dma.h>
2606 +#include <linux/of_irq.h>
2607 +#include <linux/slab.h>
2608 +#include <linux/spinlock.h>
2609 +
2610 +#include "virt-dma.h"
2611 +
2612 +#define FSL_QDMA_DMR 0x0
2613 +#define FSL_QDMA_DSR 0x4
2614 +#define FSL_QDMA_DEIER 0xe00
2615 +#define FSL_QDMA_DEDR 0xe04
2616 +#define FSL_QDMA_DECFDW0R 0xe10
2617 +#define FSL_QDMA_DECFDW1R 0xe14
2618 +#define FSL_QDMA_DECFDW2R 0xe18
2619 +#define FSL_QDMA_DECFDW3R 0xe1c
2620 +#define FSL_QDMA_DECFQIDR 0xe30
2621 +#define FSL_QDMA_DECBR 0xe34
2622 +
2623 +#define FSL_QDMA_BCQMR(x) (0xc0 + 0x100 * (x))
2624 +#define FSL_QDMA_BCQSR(x) (0xc4 + 0x100 * (x))
2625 +#define FSL_QDMA_BCQEDPA_SADDR(x) (0xc8 + 0x100 * (x))
2626 +#define FSL_QDMA_BCQDPA_SADDR(x) (0xcc + 0x100 * (x))
2627 +#define FSL_QDMA_BCQEEPA_SADDR(x) (0xd0 + 0x100 * (x))
2628 +#define FSL_QDMA_BCQEPA_SADDR(x) (0xd4 + 0x100 * (x))
2629 +#define FSL_QDMA_BCQIER(x) (0xe0 + 0x100 * (x))
2630 +#define FSL_QDMA_BCQIDR(x) (0xe4 + 0x100 * (x))
2631 +
2632 +#define FSL_QDMA_SQDPAR 0x80c
2633 +#define FSL_QDMA_SQEPAR 0x814
2634 +#define FSL_QDMA_BSQMR 0x800
2635 +#define FSL_QDMA_BSQSR 0x804
2636 +#define FSL_QDMA_BSQICR 0x828
2637 +#define FSL_QDMA_CQMR 0xa00
2638 +#define FSL_QDMA_CQDSCR1 0xa08
2639 +#define FSL_QDMA_CQDSCR2 0xa0c
2640 +#define FSL_QDMA_CQIER 0xa10
2641 +#define FSL_QDMA_CQEDR 0xa14
2642 +#define FSL_QDMA_SQCCMR 0xa20
2643 +
2644 +#define FSL_QDMA_SQICR_ICEN
2645 +
2646 +#define FSL_QDMA_CQIDR_CQT 0xff000000
2647 +#define FSL_QDMA_CQIDR_SQPE 0x800000
2648 +#define FSL_QDMA_CQIDR_SQT 0x8000
2649 +
2650 +#define FSL_QDMA_BCQIER_CQTIE 0x8000
2651 +#define FSL_QDMA_BCQIER_CQPEIE 0x800000
2652 +#define FSL_QDMA_BSQICR_ICEN 0x80000000
2653 +#define FSL_QDMA_BSQICR_ICST(x) ((x) << 16)
2654 +#define FSL_QDMA_CQIER_MEIE 0x80000000
2655 +#define FSL_QDMA_CQIER_TEIE 0x1
2656 +#define FSL_QDMA_SQCCMR_ENTER_WM 0x200000
2657 +
2658 +#define FSL_QDMA_QUEUE_MAX 8
2659 +
2660 +#define FSL_QDMA_BCQMR_EN 0x80000000
2661 +#define FSL_QDMA_BCQMR_EI 0x40000000
2662 +#define FSL_QDMA_BCQMR_CD_THLD(x) ((x) << 20)
2663 +#define FSL_QDMA_BCQMR_CQ_SIZE(x) ((x) << 16)
2664 +
2665 +#define FSL_QDMA_BCQSR_QF 0x10000
2666 +#define FSL_QDMA_BCQSR_XOFF 0x1
2667 +
2668 +#define FSL_QDMA_BSQMR_EN 0x80000000
2669 +#define FSL_QDMA_BSQMR_DI 0x40000000
2670 +#define FSL_QDMA_BSQMR_CQ_SIZE(x) ((x) << 16)
2671 +
2672 +#define FSL_QDMA_BSQSR_QE 0x20000
2673 +
2674 +#define FSL_QDMA_DMR_DQD 0x40000000
2675 +#define FSL_QDMA_DSR_DB 0x80000000
2676 +
2677 +#define FSL_QDMA_BASE_BUFFER_SIZE 96
2678 +#define FSL_QDMA_EXPECT_SG_ENTRY_NUM 16
2679 +#define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN 64
2680 +#define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX 16384
2681 +#define FSL_QDMA_QUEUE_NUM_MAX 8
2682 +
2683 +#define FSL_QDMA_CMD_RWTTYPE 0x4
2684 +#define FSL_QDMA_CMD_LWC 0x2
2685 +
2686 +#define FSL_QDMA_CMD_RWTTYPE_OFFSET 28
2687 +#define FSL_QDMA_CMD_NS_OFFSET 27
2688 +#define FSL_QDMA_CMD_DQOS_OFFSET 24
2689 +#define FSL_QDMA_CMD_WTHROTL_OFFSET 20
2690 +#define FSL_QDMA_CMD_DSEN_OFFSET 19
2691 +#define FSL_QDMA_CMD_LWC_OFFSET 16
2692 +
2693 +#define FSL_QDMA_E_SG_TABLE 1
2694 +#define FSL_QDMA_E_DATA_BUFFER 0
2695 +#define FSL_QDMA_F_LAST_ENTRY 1
2696 +
2697 +u64 pre_addr, pre_queue;
2698 +
2699 +struct fsl_qdma_ccdf {
2700 + u8 status;
2701 + u32 rev1:22;
2702 + u32 ser:1;
2703 + u32 rev2:1;
2704 + u32 rev3:20;
2705 + u32 offset:9;
2706 + u32 format:3;
2707 + union {
2708 + struct {
2709 + u32 addr_lo; /* low 32-bits of 40-bit address */
2710 + u32 addr_hi:8; /* high 8-bits of 40-bit address */
2711 + u32 rev4:16;
2712 + u32 queue:3;
2713 + u32 rev5:3;
2714 + u32 dd:2; /* dynamic debug */
2715 + };
2716 + struct {
2717 + u64 addr:40;
2718 + /* More efficient address accessor */
2719 + u64 __notaddress:24;
2720 + };
2721 + };
2722 +} __packed;
2723 +
2724 +struct fsl_qdma_csgf {
2725 + u32 offset:13;
2726 + u32 rev1:19;
2727 + u32 length:30;
2728 + u32 f:1;
2729 + u32 e:1;
2730 + union {
2731 + struct {
2732 + u32 addr_lo; /* low 32-bits of 40-bit address */
2733 + u32 addr_hi:8; /* high 8-bits of 40-bit address */
2734 + u32 rev2:24;
2735 + };
2736 + struct {
2737 + u64 addr:40;
2738 + /* More efficient address accessor */
2739 + u64 __notaddress:24;
2740 + };
2741 + };
2742 +} __packed;
2743 +
2744 +struct fsl_qdma_sdf {
2745 + u32 rev3:32;
2746 + u32 ssd:12; /* souce stride distance */
2747 + u32 sss:12; /* souce stride size */
2748 + u32 rev4:8;
2749 + u32 rev5:32;
2750 + u32 cmd;
2751 +} __packed;
2752 +
2753 +struct fsl_qdma_ddf {
2754 + u32 rev1:32;
2755 + u32 dsd:12; /* Destination stride distance */
2756 + u32 dss:12; /* Destination stride size */
2757 + u32 rev2:8;
2758 + u32 rev3:32;
2759 + u32 cmd;
2760 +} __packed;
2761 +
2762 +struct fsl_qdma_chan {
2763 + struct virt_dma_chan vchan;
2764 + struct virt_dma_desc vdesc;
2765 + enum dma_status status;
2766 + u32 slave_id;
2767 + struct fsl_qdma_engine *qdma;
2768 + struct fsl_qdma_queue *queue;
2769 + struct list_head qcomp;
2770 +};
2771 +
2772 +struct fsl_qdma_queue {
2773 + struct fsl_qdma_ccdf *virt_head;
2774 + struct fsl_qdma_ccdf *virt_tail;
2775 + struct list_head comp_used;
2776 + struct list_head comp_free;
2777 + struct dma_pool *comp_pool;
2778 + struct dma_pool *sg_pool;
2779 + spinlock_t queue_lock;
2780 + dma_addr_t bus_addr;
2781 + u32 n_cq;
2782 + u32 id;
2783 + struct fsl_qdma_ccdf *cq;
2784 +};
2785 +
2786 +struct fsl_qdma_sg {
2787 + dma_addr_t bus_addr;
2788 + void *virt_addr;
2789 +};
2790 +
2791 +struct fsl_qdma_comp {
2792 + dma_addr_t bus_addr;
2793 + void *virt_addr;
2794 + struct fsl_qdma_chan *qchan;
2795 + struct fsl_qdma_sg *sg_block;
2796 + struct virt_dma_desc vdesc;
2797 + struct list_head list;
2798 + u32 sg_block_src;
2799 + u32 sg_block_dst;
2800 +};
2801 +
2802 +struct fsl_qdma_engine {
2803 + struct dma_device dma_dev;
2804 + void __iomem *ctrl_base;
2805 + void __iomem *status_base;
2806 + void __iomem *block_base;
2807 + u32 n_chans;
2808 + u32 n_queues;
2809 + struct mutex fsl_qdma_mutex;
2810 + int error_irq;
2811 + int queue_irq;
2812 + bool big_endian;
2813 + struct fsl_qdma_queue *queue;
2814 + struct fsl_qdma_queue *status;
2815 + struct fsl_qdma_chan chans[];
2816 +
2817 +};
2818 +
2819 +static u32 qdma_readl(struct fsl_qdma_engine *qdma, void __iomem *addr)
2820 +{
2821 + if (qdma->big_endian)
2822 + return ioread32be(addr);
2823 + else
2824 + return ioread32(addr);
2825 +}
2826 +
2827 +static void qdma_writel(struct fsl_qdma_engine *qdma, u32 val,
2828 + void __iomem *addr)
2829 +{
2830 + if (qdma->big_endian)
2831 + iowrite32be(val, addr);
2832 + else
2833 + iowrite32(val, addr);
2834 +}
2835 +
2836 +static struct fsl_qdma_chan *to_fsl_qdma_chan(struct dma_chan *chan)
2837 +{
2838 + return container_of(chan, struct fsl_qdma_chan, vchan.chan);
2839 +}
2840 +
2841 +static struct fsl_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
2842 +{
2843 + return container_of(vd, struct fsl_qdma_comp, vdesc);
2844 +}
2845 +
2846 +static int fsl_qdma_alloc_chan_resources(struct dma_chan *chan)
2847 +{
2848 + /*
2849 + * In QDMA mode, We don't need to do anything.
2850 + */
2851 + return 0;
2852 +}
2853 +
2854 +static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
2855 +{
2856 + struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
2857 + unsigned long flags;
2858 + LIST_HEAD(head);
2859 +
2860 + spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
2861 + vchan_get_all_descriptors(&fsl_chan->vchan, &head);
2862 + spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
2863 +
2864 + vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
2865 +}
2866 +
2867 +static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
2868 + dma_addr_t dst, dma_addr_t src, u32 len)
2869 +{
2870 + struct fsl_qdma_ccdf *ccdf;
2871 + struct fsl_qdma_csgf *csgf_desc, *csgf_src, *csgf_dest;
2872 + struct fsl_qdma_sdf *sdf;
2873 + struct fsl_qdma_ddf *ddf;
2874 +
2875 + ccdf = (struct fsl_qdma_ccdf *)fsl_comp->virt_addr;
2876 + csgf_desc = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 1;
2877 + csgf_src = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 2;
2878 + csgf_dest = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 3;
2879 + sdf = (struct fsl_qdma_sdf *)fsl_comp->virt_addr + 4;
2880 + ddf = (struct fsl_qdma_ddf *)fsl_comp->virt_addr + 5;
2881 +
2882 + memset(fsl_comp->virt_addr, 0, FSL_QDMA_BASE_BUFFER_SIZE);
2883 + /* Head Command Descriptor(Frame Descriptor) */
2884 + ccdf->addr = fsl_comp->bus_addr + 16;
2885 + ccdf->format = 1; /* Compound S/G format */
2886 + /* Status notification is enqueued to status queue. */
2887 + ccdf->ser = 1;
2888 + /* Compound Command Descriptor(Frame List Table) */
2889 + csgf_desc->addr = fsl_comp->bus_addr + 64;
2890 + /* It must be 32 as Compound S/G Descriptor */
2891 + csgf_desc->length = 32;
2892 + csgf_src->addr = src;
2893 + csgf_src->length = len;
2894 + csgf_dest->addr = dst;
2895 + csgf_dest->length = len;
2896 + /* This entry is the last entry. */
2897 + csgf_dest->f = FSL_QDMA_F_LAST_ENTRY;
2898 + /* Descriptor Buffer */
2899 + sdf->cmd = FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET;
2900 + ddf->cmd = FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET;
2901 + ddf->cmd |= FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET;
2902 +}
2903 +
2904 +static void fsl_qdma_comp_fill_sg(
2905 + struct fsl_qdma_comp *fsl_comp,
2906 + struct scatterlist *dst_sg, unsigned int dst_nents,
2907 + struct scatterlist *src_sg, unsigned int src_nents)
2908 +{
2909 + struct fsl_qdma_ccdf *ccdf;
2910 + struct fsl_qdma_csgf *csgf_desc, *csgf_src, *csgf_dest, *csgf_sg;
2911 + struct fsl_qdma_sdf *sdf;
2912 + struct fsl_qdma_ddf *ddf;
2913 + struct fsl_qdma_sg *sg_block, *temp;
2914 + struct scatterlist *sg;
2915 + u64 total_src_len = 0;
2916 + u64 total_dst_len = 0;
2917 + u32 i;
2918 +
2919 + ccdf = (struct fsl_qdma_ccdf *)fsl_comp->virt_addr;
2920 + csgf_desc = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 1;
2921 + csgf_src = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 2;
2922 + csgf_dest = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 3;
2923 + sdf = (struct fsl_qdma_sdf *)fsl_comp->virt_addr + 4;
2924 + ddf = (struct fsl_qdma_ddf *)fsl_comp->virt_addr + 5;
2925 +
2926 + memset(fsl_comp->virt_addr, 0, FSL_QDMA_BASE_BUFFER_SIZE);
2927 + /* Head Command Descriptor(Frame Descriptor) */
2928 + ccdf->addr = fsl_comp->bus_addr + 16;
2929 + ccdf->format = 1; /* Compound S/G format */
2930 + /* Status notification is enqueued to status queue. */
2931 + ccdf->ser = 1;
2932 +
2933 + /* Compound Command Descriptor(Frame List Table) */
2934 + csgf_desc->addr = fsl_comp->bus_addr + 64;
2935 + /* It must be 32 as Compound S/G Descriptor */
2936 + csgf_desc->length = 32;
2937 +
2938 + sg_block = fsl_comp->sg_block;
2939 + csgf_src->addr = sg_block->bus_addr;
2940 + /* This entry link to the s/g entry. */
2941 + csgf_src->e = FSL_QDMA_E_SG_TABLE;
2942 +
2943 + temp = sg_block + fsl_comp->sg_block_src;
2944 + csgf_dest->addr = temp->bus_addr;
2945 + /* This entry is the last entry. */
2946 + csgf_dest->f = FSL_QDMA_F_LAST_ENTRY;
2947 + /* This entry link to the s/g entry. */
2948 + csgf_dest->e = FSL_QDMA_E_SG_TABLE;
2949 +
2950 + for_each_sg(src_sg, sg, src_nents, i) {
2951 + temp = sg_block + i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1);
2952 + csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr +
2953 + i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1);
2954 + csgf_sg->addr = sg_dma_address(sg);
2955 + csgf_sg->length = sg_dma_len(sg);
2956 + total_src_len += sg_dma_len(sg);
2957 +
2958 + if (i == src_nents - 1)
2959 + csgf_sg->f = FSL_QDMA_F_LAST_ENTRY;
2960 + if (i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) ==
2961 + FSL_QDMA_EXPECT_SG_ENTRY_NUM - 2) {
2962 + csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr +
2963 + FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1;
2964 + temp = sg_block +
2965 + i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
2966 + csgf_sg->addr = temp->bus_addr;
2967 + csgf_sg->e = FSL_QDMA_E_SG_TABLE;
2968 + }
2969 + }
2970 +
2971 + sg_block += fsl_comp->sg_block_src;
2972 + for_each_sg(dst_sg, sg, dst_nents, i) {
2973 + temp = sg_block + i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1);
2974 + csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr +
2975 + i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1);
2976 + csgf_sg->addr = sg_dma_address(sg);
2977 + csgf_sg->length = sg_dma_len(sg);
2978 + total_dst_len += sg_dma_len(sg);
2979 +
2980 + if (i == dst_nents - 1)
2981 + csgf_sg->f = FSL_QDMA_F_LAST_ENTRY;
2982 + if (i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) ==
2983 + FSL_QDMA_EXPECT_SG_ENTRY_NUM - 2) {
2984 + csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr +
2985 + FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1;
2986 + temp = sg_block +
2987 + i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
2988 + csgf_sg->addr = temp->bus_addr;
2989 + csgf_sg->e = FSL_QDMA_E_SG_TABLE;
2990 + }
2991 + }
2992 +
2993 + if (total_src_len != total_dst_len)
2994 + dev_err(&fsl_comp->qchan->vchan.chan.dev->device,
2995 + "The data length for src and dst isn't match.\n");
2996 +
2997 + csgf_src->length = total_src_len;
2998 + csgf_dest->length = total_dst_len;
2999 +
3000 + /* Descriptor Buffer */
3001 + sdf->cmd = FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET;
3002 + ddf->cmd = FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET;
3003 +}
3004 +
3005 +/*
3006 + * Prei-request full command descriptor for enqueue.
3007 + */
3008 +static int fsl_qdma_pre_request_enqueue_desc(struct fsl_qdma_queue *queue)
3009 +{
3010 + struct fsl_qdma_comp *comp_temp;
3011 + int i;
3012 +
3013 + for (i = 0; i < queue->n_cq; i++) {
3014 + comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
3015 + if (!comp_temp)
3016 + return -1;
3017 + comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,
3018 + GFP_NOWAIT,
3019 + &comp_temp->bus_addr);
3020 + if (!comp_temp->virt_addr)
3021 + return -1;
3022 + list_add_tail(&comp_temp->list, &queue->comp_free);
3023 + }
3024 + return 0;
3025 +}
3026 +
3027 +/*
3028 + * Request a command descriptor for enqueue.
3029 + */
3030 +static struct fsl_qdma_comp *fsl_qdma_request_enqueue_desc(
3031 + struct fsl_qdma_chan *fsl_chan,
3032 + unsigned int dst_nents,
3033 + unsigned int src_nents)
3034 +{
3035 + struct fsl_qdma_comp *comp_temp;
3036 + struct fsl_qdma_sg *sg_block;
3037 + struct fsl_qdma_queue *queue = fsl_chan->queue;
3038 + unsigned long flags;
3039 + unsigned int dst_sg_entry_block, src_sg_entry_block, sg_entry_total, i;
3040 +
3041 + spin_lock_irqsave(&queue->queue_lock, flags);
3042 + if (list_empty(&queue->comp_free)) {
3043 + spin_unlock_irqrestore(&queue->queue_lock, flags);
3044 + comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
3045 + if (!comp_temp)
3046 + return NULL;
3047 + comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,
3048 + GFP_NOWAIT,
3049 + &comp_temp->bus_addr);
3050 + if (!comp_temp->virt_addr)
3051 + return NULL;
3052 + } else {
3053 + comp_temp = list_first_entry(&queue->comp_free,
3054 + struct fsl_qdma_comp,
3055 + list);
3056 + list_del(&comp_temp->list);
3057 + spin_unlock_irqrestore(&queue->queue_lock, flags);
3058 + }
3059 +
3060 + if (dst_nents != 0)
3061 + dst_sg_entry_block = dst_nents /
3062 + (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
3063 + else
3064 + dst_sg_entry_block = 0;
3065 +
3066 + if (src_nents != 0)
3067 + src_sg_entry_block = src_nents /
3068 + (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
3069 + else
3070 + src_sg_entry_block = 0;
3071 +
3072 + sg_entry_total = dst_sg_entry_block + src_sg_entry_block;
3073 + if (sg_entry_total) {
3074 + sg_block = kzalloc(sizeof(*sg_block) *
3075 + sg_entry_total,
3076 + GFP_KERNEL);
3077 + if (!sg_block)
3078 + return NULL;
3079 + comp_temp->sg_block = sg_block;
3080 + for (i = 0; i < sg_entry_total; i++) {
3081 + sg_block->virt_addr = dma_pool_alloc(queue->sg_pool,
3082 + GFP_NOWAIT,
3083 + &sg_block->bus_addr);
3084 + memset(sg_block->virt_addr, 0,
3085 + FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16);
3086 + sg_block++;
3087 + }
3088 + }
3089 +
3090 + comp_temp->sg_block_src = src_sg_entry_block;
3091 + comp_temp->sg_block_dst = dst_sg_entry_block;
3092 + comp_temp->qchan = fsl_chan;
3093 +
3094 + return comp_temp;
3095 +}
3096 +
3097 +static struct fsl_qdma_queue *fsl_qdma_alloc_queue_resources(
3098 + struct platform_device *pdev,
3099 + unsigned int queue_num)
3100 +{
3101 + struct device_node *np = pdev->dev.of_node;
3102 + struct fsl_qdma_queue *queue_head, *queue_temp;
3103 + int ret, len, i;
3104 + unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
3105 +
3106 + if (queue_num > FSL_QDMA_QUEUE_MAX)
3107 + queue_num = FSL_QDMA_QUEUE_MAX;
3108 + len = sizeof(*queue_head) * queue_num;
3109 + queue_head = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
3110 + if (!queue_head)
3111 + return NULL;
3112 +
3113 + ret = of_property_read_u32_array(np, "queue-sizes", queue_size,
3114 + queue_num);
3115 + if (ret) {
3116 + dev_err(&pdev->dev, "Can't get queue-sizes.\n");
3117 + return NULL;
3118 + }
3119 +
3120 + for (i = 0; i < queue_num; i++) {
3121 + if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX
3122 + || queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
3123 + dev_err(&pdev->dev, "Get wrong queue-sizes.\n");
3124 + return NULL;
3125 + }
3126 + queue_temp = queue_head + i;
3127 + queue_temp->cq = dma_alloc_coherent(&pdev->dev,
3128 + sizeof(struct fsl_qdma_ccdf) *
3129 + queue_size[i],
3130 + &queue_temp->bus_addr,
3131 + GFP_KERNEL);
3132 + if (!queue_temp->cq)
3133 + return NULL;
3134 + queue_temp->n_cq = queue_size[i];
3135 + queue_temp->id = i;
3136 + queue_temp->virt_head = queue_temp->cq;
3137 + queue_temp->virt_tail = queue_temp->cq;
3138 + /*
3139 + * The dma pool for queue command buffer
3140 + */
3141 + queue_temp->comp_pool = dma_pool_create("comp_pool",
3142 + &pdev->dev,
3143 + FSL_QDMA_BASE_BUFFER_SIZE,
3144 + 16, 0);
3145 + if (!queue_temp->comp_pool) {
3146 + dma_free_coherent(&pdev->dev,
3147 + sizeof(struct fsl_qdma_ccdf) *
3148 + queue_size[i],
3149 + queue_temp->cq,
3150 + queue_temp->bus_addr);
3151 + return NULL;
3152 + }
3153 + /*
3154 + * The dma pool for queue command buffer
3155 + */
3156 + queue_temp->sg_pool = dma_pool_create("sg_pool",
3157 + &pdev->dev,
3158 + FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16,
3159 + 64, 0);
3160 + if (!queue_temp->sg_pool) {
3161 + dma_free_coherent(&pdev->dev,
3162 + sizeof(struct fsl_qdma_ccdf) *
3163 + queue_size[i],
3164 + queue_temp->cq,
3165 + queue_temp->bus_addr);
3166 + dma_pool_destroy(queue_temp->comp_pool);
3167 + return NULL;
3168 + }
3169 + /*
3170 + * List for queue command buffer
3171 + */
3172 + INIT_LIST_HEAD(&queue_temp->comp_used);
3173 + INIT_LIST_HEAD(&queue_temp->comp_free);
3174 + spin_lock_init(&queue_temp->queue_lock);
3175 + }
3176 +
3177 + return queue_head;
3178 +}
3179 +
3180 +static struct fsl_qdma_queue *fsl_qdma_prep_status_queue(
3181 + struct platform_device *pdev)
3182 +{
3183 + struct device_node *np = pdev->dev.of_node;
3184 + struct fsl_qdma_queue *status_head;
3185 + unsigned int status_size;
3186 + int ret;
3187 +
3188 + ret = of_property_read_u32(np, "status-sizes", &status_size);
3189 + if (ret) {
3190 + dev_err(&pdev->dev, "Can't get status-sizes.\n");
3191 + return NULL;
3192 + }
3193 + if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX
3194 + || status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
3195 + dev_err(&pdev->dev, "Get wrong status_size.\n");
3196 + return NULL;
3197 + }
3198 + status_head = devm_kzalloc(&pdev->dev, sizeof(*status_head),
3199 + GFP_KERNEL);
3200 + if (!status_head)
3201 + return NULL;
3202 +
3203 + /*
3204 + * Buffer for queue command
3205 + */
3206 + status_head->cq = dma_alloc_coherent(&pdev->dev,
3207 + sizeof(struct fsl_qdma_ccdf) *
3208 + status_size,
3209 + &status_head->bus_addr,
3210 + GFP_KERNEL);
3211 + if (!status_head->cq)
3212 + return NULL;
3213 + status_head->n_cq = status_size;
3214 + status_head->virt_head = status_head->cq;
3215 + status_head->virt_tail = status_head->cq;
3216 + status_head->comp_pool = NULL;
3217 +
3218 + return status_head;
3219 +}
3220 +
3221 +static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
3222 +{
3223 + void __iomem *ctrl = fsl_qdma->ctrl_base;
3224 + void __iomem *block = fsl_qdma->block_base;
3225 + int i, count = 5;
3226 + u32 reg;
3227 +
3228 + /* Disable the command queue and wait for idle state. */
3229 + reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
3230 + reg |= FSL_QDMA_DMR_DQD;
3231 + qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
3232 + for (i = 0; i < FSL_QDMA_QUEUE_NUM_MAX; i++)
3233 + qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQMR(i));
3234 +
3235 + while (1) {
3236 + reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DSR);
3237 + if (!(reg & FSL_QDMA_DSR_DB))
3238 + break;
3239 + if (count-- < 0)
3240 + return -EBUSY;
3241 + udelay(100);
3242 + }
3243 +
3244 + /* Disable status queue. */
3245 + qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BSQMR);
3246 +
3247 + /*
3248 + * Clear the command queue interrupt detect register for all queues.
3249 + */
3250 + qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
3251 +
3252 + return 0;
3253 +}
3254 +
3255 +static int fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma)
3256 +{
3257 + struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
3258 + struct fsl_qdma_queue *fsl_status = fsl_qdma->status;
3259 + struct fsl_qdma_queue *temp_queue;
3260 + struct fsl_qdma_comp *fsl_comp;
3261 + struct fsl_qdma_ccdf *status_addr;
3262 + struct fsl_qdma_csgf *csgf_src;
3263 + void __iomem *block = fsl_qdma->block_base;
3264 + u32 reg, i;
3265 + bool duplicate, duplicate_handle;
3266 +
3267 + while (1) {
3268 + duplicate = 0;
3269 + duplicate_handle = 0;
3270 + reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR);
3271 + if (reg & FSL_QDMA_BSQSR_QE)
3272 + return 0;
3273 + status_addr = fsl_status->virt_head;
3274 + if (status_addr->queue == pre_queue &&
3275 + status_addr->addr == pre_addr)
3276 + duplicate = 1;
3277 +
3278 + i = status_addr->queue;
3279 + pre_queue = status_addr->queue;
3280 + pre_addr = status_addr->addr;
3281 + temp_queue = fsl_queue + i;
3282 + spin_lock(&temp_queue->queue_lock);
3283 + if (list_empty(&temp_queue->comp_used)) {
3284 + if (duplicate)
3285 + duplicate_handle = 1;
3286 + else {
3287 + spin_unlock(&temp_queue->queue_lock);
3288 + return -1;
3289 + }
3290 + } else {
3291 + fsl_comp = list_first_entry(&temp_queue->comp_used,
3292 + struct fsl_qdma_comp,
3293 + list);
3294 + csgf_src = (struct fsl_qdma_csgf *)fsl_comp->virt_addr
3295 + + 2;
3296 + if (fsl_comp->bus_addr + 16 !=
3297 + (dma_addr_t)status_addr->addr) {
3298 + if (duplicate)
3299 + duplicate_handle = 1;
3300 + else {
3301 + spin_unlock(&temp_queue->queue_lock);
3302 + return -1;
3303 + }
3304 + }
3305 + }
3306 +
3307 + if (duplicate_handle) {
3308 + reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
3309 + reg |= FSL_QDMA_BSQMR_DI;
3310 + status_addr->addr = 0x0;
3311 + fsl_status->virt_head++;
3312 + if (fsl_status->virt_head == fsl_status->cq
3313 + + fsl_status->n_cq)
3314 + fsl_status->virt_head = fsl_status->cq;
3315 + qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
3316 + spin_unlock(&temp_queue->queue_lock);
3317 + continue;
3318 + }
3319 + list_del(&fsl_comp->list);
3320 +
3321 + reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
3322 + reg |= FSL_QDMA_BSQMR_DI;
3323 + status_addr->addr = 0x0;
3324 + fsl_status->virt_head++;
3325 + if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
3326 + fsl_status->virt_head = fsl_status->cq;
3327 + qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
3328 + spin_unlock(&temp_queue->queue_lock);
3329 +
3330 + spin_lock(&fsl_comp->qchan->vchan.lock);
3331 + vchan_cookie_complete(&fsl_comp->vdesc);
3332 + fsl_comp->qchan->status = DMA_COMPLETE;
3333 + spin_unlock(&fsl_comp->qchan->vchan.lock);
3334 + }
3335 + return 0;
3336 +}
3337 +
3338 +static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id)
3339 +{
3340 + struct fsl_qdma_engine *fsl_qdma = dev_id;
3341 + unsigned int intr;
3342 + void __iomem *status = fsl_qdma->status_base;
3343 +
3344 + intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR);
3345 +
3346 + if (intr)
3347 + dev_err(fsl_qdma->dma_dev.dev, "DMA transaction error!\n");
3348 +
3349 + qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR);
3350 + return IRQ_HANDLED;
3351 +}
3352 +
3353 +static irqreturn_t fsl_qdma_queue_handler(int irq, void *dev_id)
3354 +{
3355 + struct fsl_qdma_engine *fsl_qdma = dev_id;
3356 + unsigned int intr, reg;
3357 + void __iomem *block = fsl_qdma->block_base;
3358 + void __iomem *ctrl = fsl_qdma->ctrl_base;
3359 +
3360 + intr = qdma_readl(fsl_qdma, block + FSL_QDMA_BCQIDR(0));
3361 +
3362 + if ((intr & FSL_QDMA_CQIDR_SQT) != 0)
3363 + intr = fsl_qdma_queue_transfer_complete(fsl_qdma);
3364 +
3365 + if (intr != 0) {
3366 + reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
3367 + reg |= FSL_QDMA_DMR_DQD;
3368 + qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
3369 + qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQIER(0));
3370 + dev_err(fsl_qdma->dma_dev.dev, "QDMA: status err!\n");
3371 + }
3372 +
3373 + qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
3374 +
3375 + return IRQ_HANDLED;
3376 +}
3377 +
3378 +static int
3379 +fsl_qdma_irq_init(struct platform_device *pdev,
3380 + struct fsl_qdma_engine *fsl_qdma)
3381 +{
3382 + int ret;
3383 +
3384 + fsl_qdma->error_irq = platform_get_irq_byname(pdev,
3385 + "qdma-error");
3386 + if (fsl_qdma->error_irq < 0) {
3387 + dev_err(&pdev->dev, "Can't get qdma controller irq.\n");
3388 + return fsl_qdma->error_irq;
3389 + }
3390 +
3391 + fsl_qdma->queue_irq = platform_get_irq_byname(pdev, "qdma-queue");
3392 + if (fsl_qdma->queue_irq < 0) {
3393 + dev_err(&pdev->dev, "Can't get qdma queue irq.\n");
3394 + return fsl_qdma->queue_irq;
3395 + }
3396 +
3397 + ret = devm_request_irq(&pdev->dev, fsl_qdma->error_irq,
3398 + fsl_qdma_error_handler, 0, "qDMA error", fsl_qdma);
3399 + if (ret) {
3400 + dev_err(&pdev->dev, "Can't register qDMA controller IRQ.\n");
3401 + return ret;
3402 + }
3403 + ret = devm_request_irq(&pdev->dev, fsl_qdma->queue_irq,
3404 + fsl_qdma_queue_handler, 0, "qDMA queue", fsl_qdma);
3405 + if (ret) {
3406 + dev_err(&pdev->dev, "Can't register qDMA queue IRQ.\n");
3407 + return ret;
3408 + }
3409 +
3410 + return 0;
3411 +}
3412 +
3413 +static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
3414 +{
3415 + struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
3416 + struct fsl_qdma_queue *temp;
3417 + void __iomem *ctrl = fsl_qdma->ctrl_base;
3418 + void __iomem *status = fsl_qdma->status_base;
3419 + void __iomem *block = fsl_qdma->block_base;
3420 + int i, ret;
3421 + u32 reg;
3422 +
3423 + /* Try to halt the qDMA engine first. */
3424 + ret = fsl_qdma_halt(fsl_qdma);
3425 + if (ret) {
3426 + dev_err(fsl_qdma->dma_dev.dev, "DMA halt failed!");
3427 + return ret;
3428 + }
3429 +
3430 + /*
3431 + * Clear the command queue interrupt detect register for all queues.
3432 + */
3433 + qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
3434 +
3435 + for (i = 0; i < fsl_qdma->n_queues; i++) {
3436 + temp = fsl_queue + i;
3437 + /*
3438 + * Initialize Command Queue registers to point to the first
3439 + * command descriptor in memory.
3440 + * Dequeue Pointer Address Registers
3441 + * Enqueue Pointer Address Registers
3442 + */
3443 + qdma_writel(fsl_qdma, temp->bus_addr,
3444 + block + FSL_QDMA_BCQDPA_SADDR(i));
3445 + qdma_writel(fsl_qdma, temp->bus_addr,
3446 + block + FSL_QDMA_BCQEPA_SADDR(i));
3447 +
3448 + /* Initialize the queue mode. */
3449 + reg = FSL_QDMA_BCQMR_EN;
3450 + reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq)-4);
3451 + reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq)-6);
3452 + qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BCQMR(i));
3453 + }
3454 +
3455 + /*
3456 + * Workaround for erratum: ERR010812.
3457 + * We must enable XOFF to avoid the enqueue rejection occurs.
3458 + * Setting SQCCMR ENTER_WM to 0x20.
3459 + */
3460 + qdma_writel(fsl_qdma, FSL_QDMA_SQCCMR_ENTER_WM,
3461 + block + FSL_QDMA_SQCCMR);
3462 + /*
3463 + * Initialize status queue registers to point to the first
3464 + * command descriptor in memory.
3465 + * Dequeue Pointer Address Registers
3466 + * Enqueue Pointer Address Registers
3467 + */
3468 + qdma_writel(fsl_qdma, fsl_qdma->status->bus_addr,
3469 + block + FSL_QDMA_SQEPAR);
3470 + qdma_writel(fsl_qdma, fsl_qdma->status->bus_addr,
3471 + block + FSL_QDMA_SQDPAR);
3472 + /* Initialize status queue interrupt. */
3473 + qdma_writel(fsl_qdma, FSL_QDMA_BCQIER_CQTIE,
3474 + block + FSL_QDMA_BCQIER(0));
3475 + qdma_writel(fsl_qdma, FSL_QDMA_BSQICR_ICEN | FSL_QDMA_BSQICR_ICST(5)
3476 + | 0x8000,
3477 + block + FSL_QDMA_BSQICR);
3478 + qdma_writel(fsl_qdma, FSL_QDMA_CQIER_MEIE | FSL_QDMA_CQIER_TEIE,
3479 + block + FSL_QDMA_CQIER);
3480 + /* Initialize controller interrupt register. */
3481 + qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR);
3482 + qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEIER);
3483 +
3484 + /* Initialize the status queue mode. */
3485 + reg = FSL_QDMA_BSQMR_EN;
3486 + reg |= FSL_QDMA_BSQMR_CQ_SIZE(ilog2(fsl_qdma->status->n_cq)-6);
3487 + qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
3488 +
3489 + reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
3490 + reg &= ~FSL_QDMA_DMR_DQD;
3491 + qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
3492 +
3493 + return 0;
3494 +}
3495 +
3496 +static struct dma_async_tx_descriptor *fsl_qdma_prep_dma_sg(
3497 + struct dma_chan *chan,
3498 + struct scatterlist *dst_sg, unsigned int dst_nents,
3499 + struct scatterlist *src_sg, unsigned int src_nents,
3500 + unsigned long flags)
3501 +{
3502 + struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
3503 + struct fsl_qdma_comp *fsl_comp;
3504 +
3505 + fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan,
3506 + dst_nents,
3507 + src_nents);
3508 + fsl_qdma_comp_fill_sg(fsl_comp, dst_sg, dst_nents, src_sg, src_nents);
3509 +
3510 + return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags);
3511 +}
3512 +
3513 +static struct dma_async_tx_descriptor *
3514 +fsl_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
3515 + dma_addr_t src, size_t len, unsigned long flags)
3516 +{
3517 + struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
3518 + struct fsl_qdma_comp *fsl_comp;
3519 +
3520 + fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan, 0, 0);
3521 + fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len);
3522 +
3523 + return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags);
3524 +}
3525 +
3526 +static void fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
3527 +{
3528 + void __iomem *block = fsl_chan->qdma->block_base;
3529 + struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
3530 + struct fsl_qdma_comp *fsl_comp;
3531 + struct virt_dma_desc *vdesc;
3532 + u32 reg;
3533 +
3534 + reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQSR(fsl_queue->id));
3535 + if (reg & (FSL_QDMA_BCQSR_QF | FSL_QDMA_BCQSR_XOFF))
3536 + return;
3537 + vdesc = vchan_next_desc(&fsl_chan->vchan);
3538 + if (!vdesc)
3539 + return;
3540 + list_del(&vdesc->node);
3541 + fsl_comp = to_fsl_qdma_comp(vdesc);
3542 +
3543 + memcpy(fsl_queue->virt_head++, fsl_comp->virt_addr, 16);
3544 + if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq)
3545 + fsl_queue->virt_head = fsl_queue->cq;
3546 +
3547 + list_add_tail(&fsl_comp->list, &fsl_queue->comp_used);
3548 + barrier();
3549 + reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQMR(fsl_queue->id));
3550 + reg |= FSL_QDMA_BCQMR_EI;
3551 + qdma_writel(fsl_chan->qdma, reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
3552 + fsl_chan->status = DMA_IN_PROGRESS;
3553 +}
3554 +
3555 +static enum dma_status fsl_qdma_tx_status(struct dma_chan *chan,
3556 + dma_cookie_t cookie, struct dma_tx_state *txstate)
3557 +{
3558 + return dma_cookie_status(chan, cookie, txstate);
3559 +}
3560 +
3561 +static void fsl_qdma_free_desc(struct virt_dma_desc *vdesc)
3562 +{
3563 + struct fsl_qdma_comp *fsl_comp;
3564 + struct fsl_qdma_queue *fsl_queue;
3565 + struct fsl_qdma_sg *sg_block;
3566 + unsigned long flags;
3567 + unsigned int i;
3568 +
3569 + fsl_comp = to_fsl_qdma_comp(vdesc);
3570 + fsl_queue = fsl_comp->qchan->queue;
3571 +
3572 + if (fsl_comp->sg_block) {
3573 + for (i = 0; i < fsl_comp->sg_block_src +
3574 + fsl_comp->sg_block_dst; i++) {
3575 + sg_block = fsl_comp->sg_block + i;
3576 + dma_pool_free(fsl_queue->sg_pool,
3577 + sg_block->virt_addr,
3578 + sg_block->bus_addr);
3579 + }
3580 + kfree(fsl_comp->sg_block);
3581 + }
3582 +
3583 + spin_lock_irqsave(&fsl_queue->queue_lock, flags);
3584 + list_add_tail(&fsl_comp->list, &fsl_queue->comp_free);
3585 + spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
3586 +}
3587 +
3588 +static void fsl_qdma_issue_pending(struct dma_chan *chan)
3589 +{
3590 + struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
3591 + struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
3592 + unsigned long flags;
3593 +
3594 + spin_lock_irqsave(&fsl_queue->queue_lock, flags);
3595 + spin_lock(&fsl_chan->vchan.lock);
3596 + if (vchan_issue_pending(&fsl_chan->vchan))
3597 + fsl_qdma_enqueue_desc(fsl_chan);
3598 + spin_unlock(&fsl_chan->vchan.lock);
3599 + spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
3600 +}
3601 +
3602 +static int fsl_qdma_probe(struct platform_device *pdev)
3603 +{
3604 + struct device_node *np = pdev->dev.of_node;
3605 + struct fsl_qdma_engine *fsl_qdma;
3606 + struct fsl_qdma_chan *fsl_chan;
3607 + struct resource *res;
3608 + unsigned int len, chans, queues;
3609 + int ret, i;
3610 +
3611 + ret = of_property_read_u32(np, "channels", &chans);
3612 + if (ret) {
3613 + dev_err(&pdev->dev, "Can't get channels.\n");
3614 + return ret;
3615 + }
3616 +
3617 + len = sizeof(*fsl_qdma) + sizeof(*fsl_chan) * chans;
3618 + fsl_qdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
3619 + if (!fsl_qdma)
3620 + return -ENOMEM;
3621 +
3622 + ret = of_property_read_u32(np, "queues", &queues);
3623 + if (ret) {
3624 + dev_err(&pdev->dev, "Can't get queues.\n");
3625 + return ret;
3626 + }
3627 +
3628 + fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, queues);
3629 + if (!fsl_qdma->queue)
3630 + return -ENOMEM;
3631 +
3632 + fsl_qdma->status = fsl_qdma_prep_status_queue(pdev);
3633 + if (!fsl_qdma->status)
3634 + return -ENOMEM;
3635 +
3636 + fsl_qdma->n_chans = chans;
3637 + fsl_qdma->n_queues = queues;
3638 + mutex_init(&fsl_qdma->fsl_qdma_mutex);
3639 +
3640 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3641 + fsl_qdma->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
3642 + if (IS_ERR(fsl_qdma->ctrl_base))
3643 + return PTR_ERR(fsl_qdma->ctrl_base);
3644 +
3645 + res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
3646 + fsl_qdma->status_base = devm_ioremap_resource(&pdev->dev, res);
3647 + if (IS_ERR(fsl_qdma->status_base))
3648 + return PTR_ERR(fsl_qdma->status_base);
3649 +
3650 + res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
3651 + fsl_qdma->block_base = devm_ioremap_resource(&pdev->dev, res);
3652 + if (IS_ERR(fsl_qdma->block_base))
3653 + return PTR_ERR(fsl_qdma->block_base);
3654 +
3655 + ret = fsl_qdma_irq_init(pdev, fsl_qdma);
3656 + if (ret)
3657 + return ret;
3658 +
3659 + fsl_qdma->big_endian = of_property_read_bool(np, "big-endian");
3660 + INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels);
3661 + for (i = 0; i < fsl_qdma->n_chans; i++) {
3662 + struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
3663 +
3664 + fsl_chan->qdma = fsl_qdma;
3665 + fsl_chan->queue = fsl_qdma->queue + i % fsl_qdma->n_queues;
3666 + fsl_chan->vchan.desc_free = fsl_qdma_free_desc;
3667 + INIT_LIST_HEAD(&fsl_chan->qcomp);
3668 + vchan_init(&fsl_chan->vchan, &fsl_qdma->dma_dev);
3669 + }
3670 + for (i = 0; i < fsl_qdma->n_queues; i++)
3671 + fsl_qdma_pre_request_enqueue_desc(fsl_qdma->queue + i);
3672 +
3673 + dma_cap_set(DMA_MEMCPY, fsl_qdma->dma_dev.cap_mask);
3674 + dma_cap_set(DMA_SG, fsl_qdma->dma_dev.cap_mask);
3675 +
3676 + fsl_qdma->dma_dev.dev = &pdev->dev;
3677 + fsl_qdma->dma_dev.device_alloc_chan_resources
3678 + = fsl_qdma_alloc_chan_resources;
3679 + fsl_qdma->dma_dev.device_free_chan_resources
3680 + = fsl_qdma_free_chan_resources;
3681 + fsl_qdma->dma_dev.device_tx_status = fsl_qdma_tx_status;
3682 + fsl_qdma->dma_dev.device_prep_dma_memcpy = fsl_qdma_prep_memcpy;
3683 + fsl_qdma->dma_dev.device_prep_dma_sg = fsl_qdma_prep_dma_sg;
3684 + fsl_qdma->dma_dev.device_issue_pending = fsl_qdma_issue_pending;
3685 +
3686 + dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
3687 +
3688 + platform_set_drvdata(pdev, fsl_qdma);
3689 +
3690 + ret = dma_async_device_register(&fsl_qdma->dma_dev);
3691 + if (ret) {
3692 + dev_err(&pdev->dev, "Can't register Freescale qDMA engine.\n");
3693 + return ret;
3694 + }
3695 +
3696 + ret = fsl_qdma_reg_init(fsl_qdma);
3697 + if (ret) {
3698 + dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
3699 + return ret;
3700 + }
3701 +
3702 +
3703 + return 0;
3704 +}
3705 +
3706 +static int fsl_qdma_remove(struct platform_device *pdev)
3707 +{
3708 + struct device_node *np = pdev->dev.of_node;
3709 + struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
3710 + struct fsl_qdma_queue *queue_temp;
3711 + struct fsl_qdma_queue *status = fsl_qdma->status;
3712 + struct fsl_qdma_comp *comp_temp, *_comp_temp;
3713 + int i;
3714 +
3715 + of_dma_controller_free(np);
3716 + dma_async_device_unregister(&fsl_qdma->dma_dev);
3717 +
3718 + /* Free descriptor areas */
3719 + for (i = 0; i < fsl_qdma->n_queues; i++) {
3720 + queue_temp = fsl_qdma->queue + i;
3721 + list_for_each_entry_safe(comp_temp, _comp_temp,
3722 + &queue_temp->comp_used, list) {
3723 + dma_pool_free(queue_temp->comp_pool,
3724 + comp_temp->virt_addr,
3725 + comp_temp->bus_addr);
3726 + list_del(&comp_temp->list);
3727 + kfree(comp_temp);
3728 + }
3729 + list_for_each_entry_safe(comp_temp, _comp_temp,
3730 + &queue_temp->comp_free, list) {
3731 + dma_pool_free(queue_temp->comp_pool,
3732 + comp_temp->virt_addr,
3733 + comp_temp->bus_addr);
3734 + list_del(&comp_temp->list);
3735 + kfree(comp_temp);
3736 + }
3737 + dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_ccdf) *
3738 + queue_temp->n_cq, queue_temp->cq,
3739 + queue_temp->bus_addr);
3740 + dma_pool_destroy(queue_temp->comp_pool);
3741 + }
3742 +
3743 + dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_ccdf) *
3744 + status->n_cq, status->cq, status->bus_addr);
3745 + return 0;
3746 +}
3747 +
3748 +static const struct of_device_id fsl_qdma_dt_ids[] = {
3749 + { .compatible = "fsl,ls1021a-qdma", },
3750 + { /* sentinel */ }
3751 +};
3752 +MODULE_DEVICE_TABLE(of, fsl_qdma_dt_ids);
3753 +
3754 +static struct platform_driver fsl_qdma_driver = {
3755 + .driver = {
3756 + .name = "fsl-qdma",
3757 + .owner = THIS_MODULE,
3758 + .of_match_table = fsl_qdma_dt_ids,
3759 + },
3760 + .probe = fsl_qdma_probe,
3761 + .remove = fsl_qdma_remove,
3762 +};
3763 +
3764 +static int __init fsl_qdma_init(void)
3765 +{
3766 + return platform_driver_register(&fsl_qdma_driver);
3767 +}
3768 +subsys_initcall(fsl_qdma_init);
3769 +
3770 +static void __exit fsl_qdma_exit(void)
3771 +{
3772 + platform_driver_unregister(&fsl_qdma_driver);
3773 +}
3774 +module_exit(fsl_qdma_exit);
3775 +
3776 +MODULE_ALIAS("platform:fsl-qdma");
3777 +MODULE_DESCRIPTION("Freescale qDMA engine driver");
3778 +MODULE_LICENSE("GPL v2");
3779 --
3780 2.14.1
3781