layerscape: add patches-5.4
[openwrt/staging/dedeckeh.git] / target / linux / layerscape / patches-5.4 / 701-net-0222-soc-fsl-dpio-QMAN-performance-improvement.-Function-.patch
1 From 5c88fa1440b2e4d0bdd46dad5370eb8c2181951b Mon Sep 17 00:00:00 2001
2 From: Youri Querry <youri.querry_1@nxp.com>
3 Date: Mon, 4 Nov 2019 11:00:24 -0500
4 Subject: [PATCH] soc: fsl: dpio: QMAN performance improvement. Function
5 pointer indirection.
6
7 We are making the access decision in the initialization and
8 setting the function pointers accordingly.
9
10 Signed-off-by: Youri Querry <youri.querry_1@nxp.com>
11 ---
12 drivers/soc/fsl/dpio/qbman-portal.c | 455 ++++++++++++++++++++++++++++++------
13 drivers/soc/fsl/dpio/qbman-portal.h | 130 ++++++++++-
14 2 files changed, 508 insertions(+), 77 deletions(-)
15
16 --- a/drivers/soc/fsl/dpio/qbman-portal.c
17 +++ b/drivers/soc/fsl/dpio/qbman-portal.c
18 @@ -84,6 +84,82 @@ enum qbman_sdqcr_fc {
19 qbman_sdqcr_fc_up_to_3 = 1
20 };
21
22 +/* Internal Function declaration */
23 +static int qbman_swp_enqueue_direct(struct qbman_swp *s,
24 + const struct qbman_eq_desc *d,
25 + const struct dpaa2_fd *fd);
26 +static int qbman_swp_enqueue_mem_back(struct qbman_swp *s,
27 + const struct qbman_eq_desc *d,
28 + const struct dpaa2_fd *fd);
29 +static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
30 + const struct qbman_eq_desc *d,
31 + const struct dpaa2_fd *fd,
32 + uint32_t *flags,
33 + int num_frames);
34 +static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
35 + const struct qbman_eq_desc *d,
36 + const struct dpaa2_fd *fd,
37 + uint32_t *flags,
38 + int num_frames);
39 +static int
40 +qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
41 + const struct qbman_eq_desc *d,
42 + const struct dpaa2_fd *fd,
43 + int num_frames);
44 +static
45 +int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
46 + const struct qbman_eq_desc *d,
47 + const struct dpaa2_fd *fd,
48 + int num_frames);
49 +static int qbman_swp_pull_direct(struct qbman_swp *s,
50 + struct qbman_pull_desc *d);
51 +static int qbman_swp_pull_mem_back(struct qbman_swp *s,
52 + struct qbman_pull_desc *d);
53 +
54 +const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s);
55 +const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s);
56 +
57 +static int qbman_swp_release_direct(struct qbman_swp *s,
58 + const struct qbman_release_desc *d,
59 + const u64 *buffers,
60 + unsigned int num_buffers);
61 +static int qbman_swp_release_mem_back(struct qbman_swp *s,
62 + const struct qbman_release_desc *d,
63 + const u64 *buffers,
64 + unsigned int num_buffers);
65 +
66 +/* Function pointers */
67 +int (*qbman_swp_enqueue_ptr)(struct qbman_swp *s,
68 + const struct qbman_eq_desc *d,
69 + const struct dpaa2_fd *fd)
70 + = qbman_swp_enqueue_direct;
71 +
72 +int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
73 + const struct qbman_eq_desc *d,
74 + const struct dpaa2_fd *fd,
75 + uint32_t *flags,
76 + int num_frames)
77 + = qbman_swp_enqueue_multiple_direct;
78 +
79 +int
80 +(*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
81 + const struct qbman_eq_desc *d,
82 + const struct dpaa2_fd *fd,
83 + int num_frames)
84 + = qbman_swp_enqueue_multiple_desc_direct;
85 +
86 +int (*qbman_swp_pull_ptr)(struct qbman_swp *s, struct qbman_pull_desc *d)
87 + = qbman_swp_pull_direct;
88 +
89 +const struct dpaa2_dq *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s)
90 + = qbman_swp_dqrr_next_direct;
91 +
92 +int (*qbman_swp_release_ptr)(struct qbman_swp *s,
93 + const struct qbman_release_desc *d,
94 + const u64 *buffers,
95 + unsigned int num_buffers)
96 + = qbman_swp_release_direct;
97 +
98 #define dccvac(p) { asm volatile("dc cvac, %0;" : : "r" (p) : "memory"); }
99 #define dcivac(p) { asm volatile("dc ivac, %0" : : "r"(p) : "memory"); }
100 static inline void qbman_inval_prefetch(struct qbman_swp *p, u32 offset)
101 @@ -227,6 +303,19 @@ struct qbman_swp *qbman_swp_init(const s
102 * applied when dequeues from a specific channel are enabled.
103 */
104 qbman_write_register(p, QBMAN_CINH_SWP_SDQCR, 0);
105 +
106 + if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
107 + qbman_swp_enqueue_ptr =
108 + qbman_swp_enqueue_mem_back;
109 + qbman_swp_enqueue_multiple_ptr =
110 + qbman_swp_enqueue_multiple_mem_back;
111 + qbman_swp_enqueue_multiple_desc_ptr =
112 + qbman_swp_enqueue_multiple_desc_mem_back;
113 + qbman_swp_pull_ptr = qbman_swp_pull_mem_back;
114 + qbman_swp_dqrr_next_ptr = qbman_swp_dqrr_next_mem_back;
115 + qbman_swp_release_ptr = qbman_swp_release_mem_back;
116 + }
117 +
118 return p;
119 }
120
121 @@ -494,7 +583,7 @@ static inline void qbman_write_eqcr_am_r
122 }
123
124 /**
125 - * qbman_swp_enqueue() - Issue an enqueue command
126 + * qbman_swp_enqueue_direct() - Issue an enqueue command
127 * @s: the software portal used for enqueue
128 * @d: the enqueue descriptor
129 * @fd: the frame descriptor to be enqueued
130 @@ -504,7 +593,7 @@ static inline void qbman_write_eqcr_am_r
131 *
132 * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
133 */
134 -int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
135 +int qbman_swp_enqueue_direct(struct qbman_swp *s, const struct qbman_eq_desc *d,
136 const struct dpaa2_fd *fd)
137 {
138 struct qbman_eq_desc_with_fd *p;
139 @@ -527,22 +616,58 @@ int qbman_swp_enqueue(struct qbman_swp *
140 memcpy(&p->desc.tgtid, &d->tgtid, 24);
141 memcpy(&p->fd, fd, sizeof(*fd));
142
143 - if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
144 - /* Set the verb byte, have to substitute in the valid-bit */
145 - dma_wmb();
146 - p->desc.verb = d->verb | EQAR_VB(eqar);
147 - dccvac(p);
148 - } else {
149 - p->desc.verb = d->verb | EQAR_VB(eqar);
150 - dma_wmb();
151 - qbman_write_eqcr_am_rt_register(s, EQAR_IDX(eqar));
152 - }
153 + /* Set the verb byte, have to substitute in the valid-bit */
154 + dma_wmb();
155 + p->desc.verb = d->verb | EQAR_VB(eqar);
156 + dccvac(p);
157
158 return 0;
159 }
160
161 /**
162 - * qbman_swp_enqueue_multiple() - Issue a multi enqueue command
163 + * qbman_swp_enqueue_mem_back() - Issue an enqueue command
164 + * @s: the software portal used for enqueue
165 + * @d: the enqueue descriptor
166 + * @fd: the frame descriptor to be enqueued
167 + *
168 + * Please note that 'fd' should only be NULL if the "action" of the
169 + * descriptor is "orp_hole" or "orp_nesn".
170 + *
171 + * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
172 + */
173 +int qbman_swp_enqueue_mem_back(struct qbman_swp *s,
174 + const struct qbman_eq_desc *d,
175 + const struct dpaa2_fd *fd)
176 +{
177 + struct qbman_eq_desc_with_fd *p;
178 + u32 eqar = qbman_read_register(s, QBMAN_CINH_SWP_EQAR);
179 +
180 + if (!EQAR_SUCCESS(eqar))
181 + return -EBUSY;
182 +
183 + p = qbman_get_cmd(s, QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
184 + /* This is mapped as DEVICE type memory, writes are
185 + * with address alignment:
186 + * desc.dca address alignment = 1
187 + * desc.seqnum address alignment = 2
188 + * desc.orpid address alignment = 4
189 + * desc.tgtid address alignment = 8
190 + */
191 + p->desc.dca = d->dca;
192 + p->desc.seqnum = d->seqnum;
193 + p->desc.orpid = d->orpid;
194 + memcpy(&p->desc.tgtid, &d->tgtid, 24);
195 + memcpy(&p->fd, fd, sizeof(*fd));
196 +
197 + p->desc.verb = d->verb | EQAR_VB(eqar);
198 + dma_wmb();
199 + qbman_write_eqcr_am_rt_register(s, EQAR_IDX(eqar));
200 +
201 + return 0;
202 +}
203 +
204 +/**
205 + * qbman_swp_enqueue_multiple_direct() - Issue a multi enqueue command
206 * using one enqueue descriptor
207 * @s: the software portal used for enqueue
208 * @d: the enqueue descriptor
209 @@ -552,16 +677,16 @@ int qbman_swp_enqueue(struct qbman_swp *
210 *
211 * Return the number of fd enqueued, or a negative error number.
212 */
213 -int qbman_swp_enqueue_multiple(struct qbman_swp *s,
214 - const struct qbman_eq_desc *d,
215 - const struct dpaa2_fd *fd,
216 - uint32_t *flags,
217 - int num_frames)
218 +int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
219 + const struct qbman_eq_desc *d,
220 + const struct dpaa2_fd *fd,
221 + uint32_t *flags,
222 + int num_frames)
223 {
224 int count = 0;
225
226 while (count < num_frames) {
227 - if (qbman_swp_enqueue(s, d, fd) != 0)
228 + if (qbman_swp_enqueue_direct(s, d, fd) != 0)
229 break;
230 count++;
231 }
232 @@ -570,7 +695,35 @@ int qbman_swp_enqueue_multiple(struct qb
233 }
234
235 /**
236 - * qbman_swp_enqueue_multiple_desc() - Issue a multi enqueue command
237 + * qbman_swp_enqueue_multiple_mem_back() - Issue a multi enqueue command
238 + * using one enqueue descriptor
239 + * @s: the software portal used for enqueue
240 + * @d: the enqueue descriptor
241 + * @fd: table pointer of frame descriptor table to be enqueued
242 + * @flags: table pointer of flags, not used for the moment
243 + * @num_frames: number of fd to be enqueued
244 + *
245 + * Return the number of fd enqueued, or a negative error number.
246 + */
247 +int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
248 + const struct qbman_eq_desc *d,
249 + const struct dpaa2_fd *fd,
250 + uint32_t *flags,
251 + int num_frames)
252 +{
253 + int count = 0;
254 +
255 + while (count < num_frames) {
256 + if (qbman_swp_enqueue_mem_back(s, d, fd) != 0)
257 + break;
258 + count++;
259 + }
260 +
261 + return count;
262 +}
263 +
264 +/**
265 + * qbman_swp_enqueue_multiple_desc_direct() - Issue a multi enqueue command
266 * using multiple enqueue descriptor
267 * @s: the software portal used for enqueue
268 * @d: table of minimal enqueue descriptor
269 @@ -579,15 +732,41 @@ int qbman_swp_enqueue_multiple(struct qb
270 *
271 * Return the number of fd enqueued, or a negative error number.
272 */
273 -int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
274 - const struct qbman_eq_desc *d,
275 - const struct dpaa2_fd *fd,
276 - int num_frames)
277 +int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
278 + const struct qbman_eq_desc *d,
279 + const struct dpaa2_fd *fd,
280 + int num_frames)
281 {
282 int count = 0;
283
284 while (count < num_frames) {
285 - if (qbman_swp_enqueue(s, &(d[count]), fd) != 0)
286 + if (qbman_swp_enqueue_direct(s, &(d[count]), fd) != 0)
287 + break;
288 + count++;
289 + }
290 +
291 + return count;
292 +}
293 +
294 +/**
295 + * qbman_swp_enqueue_multiple_desc_mem_back() - Issue a multi enqueue command
296 + * using multiple enqueue descriptor
297 + * @s: the software portal used for enqueue
298 + * @d: table of minimal enqueue descriptor
299 + * @fd: table pointer of frame descriptor table to be enqueued
300 + * @num_frames: number of fd to be enqueued
301 + *
302 + * Return the number of fd enqueued, or a negative error number.
303 + */
304 +int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
305 + const struct qbman_eq_desc *d,
306 + const struct dpaa2_fd *fd,
307 + int num_frames)
308 +{
309 + int count = 0;
310 +
311 + while (count < num_frames) {
312 + if (qbman_swp_enqueue_mem_back(s, &(d[count]), fd) != 0)
313 break;
314 count++;
315 }
316 @@ -750,7 +929,7 @@ void qbman_pull_desc_set_channel(struct
317 }
318
319 /**
320 - * qbman_swp_pull() - Issue the pull dequeue command
321 + * qbman_swp_pull_direct() - Issue the pull dequeue command
322 * @s: the software portal object
323 * @d: the software portal descriptor which has been configured with
324 * the set of qbman_pull_desc_set_*() calls
325 @@ -758,7 +937,7 @@ void qbman_pull_desc_set_channel(struct
326 * Return 0 for success, and -EBUSY if the software portal is not ready
327 * to do pull dequeue.
328 */
329 -int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
330 +int qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d)
331 {
332 struct qbman_pull_desc *p;
333
334 @@ -776,19 +955,46 @@ int qbman_swp_pull(struct qbman_swp *s,
335 p->dq_src = d->dq_src;
336 p->rsp_addr = d->rsp_addr;
337 p->rsp_addr_virt = d->rsp_addr_virt;
338 + dma_wmb();
339 + /* Set the verb byte, have to substitute in the valid-bit */
340 + p->verb = d->verb | s->vdq.valid_bit;
341 + s->vdq.valid_bit ^= QB_VALID_BIT;
342 + dccvac(p);
343
344 - if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
345 - dma_wmb();
346 - /* Set the verb byte, have to substitute in the valid-bit */
347 - p->verb = d->verb | s->vdq.valid_bit;
348 - s->vdq.valid_bit ^= QB_VALID_BIT;
349 - dccvac(p);
350 - } else {
351 - p->verb = d->verb | s->vdq.valid_bit;
352 - s->vdq.valid_bit ^= QB_VALID_BIT;
353 - dma_wmb();
354 - qbman_write_register(s, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
355 + return 0;
356 +}
357 +
358 +/**
359 + * qbman_swp_pull_mem_back() - Issue the pull dequeue command
360 + * @s: the software portal object
361 + * @d: the software portal descriptor which has been configured with
362 + * the set of qbman_pull_desc_set_*() calls
363 + *
364 + * Return 0 for success, and -EBUSY if the software portal is not ready
365 + * to do pull dequeue.
366 + */
367 +int qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d)
368 +{
369 + struct qbman_pull_desc *p;
370 +
371 + if (!atomic_dec_and_test(&s->vdq.available)) {
372 + atomic_inc(&s->vdq.available);
373 + return -EBUSY;
374 }
375 + s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
376 + if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
377 + p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
378 + else
379 + p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM);
380 + p->numf = d->numf;
381 + p->tok = QMAN_DQ_TOKEN_VALID;
382 + p->dq_src = d->dq_src;
383 + p->rsp_addr = d->rsp_addr;
384 + p->rsp_addr_virt = d->rsp_addr_virt;
385 + p->verb = d->verb | s->vdq.valid_bit;
386 + s->vdq.valid_bit ^= QB_VALID_BIT;
387 + dma_wmb();
388 + qbman_write_register(s, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
389
390 return 0;
391 }
392 @@ -796,14 +1002,14 @@ int qbman_swp_pull(struct qbman_swp *s,
393 #define QMAN_DQRR_PI_MASK 0xf
394
395 /**
396 - * qbman_swp_dqrr_next() - Get an valid DQRR entry
397 + * qbman_swp_dqrr_next_direct() - Get an valid DQRR entry
398 * @s: the software portal object
399 *
400 * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
401 * only once, so repeated calls can return a sequence of DQRR entries, without
402 * requiring they be consumed immediately or in any particular order.
403 */
404 -const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
405 +const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s)
406 {
407 u32 verb;
408 u32 response_verb;
409 @@ -845,10 +1051,97 @@ const struct dpaa2_dq *qbman_swp_dqrr_ne
410 qbman_inval_prefetch(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
411 }
412
413 - if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
414 - p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
415 - else
416 - p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
417 + p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
418 + verb = p->dq.verb;
419 +
420 + /*
421 + * If the valid-bit isn't of the expected polarity, nothing there. Note,
422 + * in the DQRR reset bug workaround, we shouldn't need to skip these
423 + * check, because we've already determined that a new entry is available
424 + * and we've invalidated the cacheline before reading it, so the
425 + * valid-bit behaviour is repaired and should tell us what we already
426 + * knew from reading PI.
427 + */
428 + if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
429 + qbman_inval_prefetch(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
430 + return NULL;
431 + }
432 + /*
433 + * There's something there. Move "next_idx" attention to the next ring
434 + * entry (and prefetch it) before returning what we found.
435 + */
436 + s->dqrr.next_idx++;
437 + s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */
438 + if (!s->dqrr.next_idx)
439 + s->dqrr.valid_bit ^= QB_VALID_BIT;
440 +
441 + /*
442 + * If this is the final response to a volatile dequeue command
443 + * indicate that the vdq is available
444 + */
445 + flags = p->dq.stat;
446 + response_verb = verb & QBMAN_RESULT_MASK;
447 + if ((response_verb == QBMAN_RESULT_DQ) &&
448 + (flags & DPAA2_DQ_STAT_VOLATILE) &&
449 + (flags & DPAA2_DQ_STAT_EXPIRED))
450 + atomic_inc(&s->vdq.available);
451 +
452 + qbman_inval_prefetch(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
453 +
454 + return p;
455 +}
456 +
457 +/**
458 + * qbman_swp_dqrr_next_mem_back() - Get an valid DQRR entry
459 + * @s: the software portal object
460 + *
461 + * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
462 + * only once, so repeated calls can return a sequence of DQRR entries, without
463 + * requiring they be consumed immediately or in any particular order.
464 + */
465 +const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s)
466 +{
467 + u32 verb;
468 + u32 response_verb;
469 + u32 flags;
470 + struct dpaa2_dq *p;
471 +
472 + /* Before using valid-bit to detect if something is there, we have to
473 + * handle the case of the DQRR reset bug...
474 + */
475 + if (unlikely(s->dqrr.reset_bug)) {
476 + /*
477 + * We pick up new entries by cache-inhibited producer index,
478 + * which means that a non-coherent mapping would require us to
479 + * invalidate and read *only* once that PI has indicated that
480 + * there's an entry here. The first trip around the DQRR ring
481 + * will be much less efficient than all subsequent trips around
482 + * it...
483 + */
484 + u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
485 + QMAN_DQRR_PI_MASK;
486 +
487 + /* there are new entries if pi != next_idx */
488 + if (pi == s->dqrr.next_idx)
489 + return NULL;
490 +
491 + /*
492 + * if next_idx is/was the last ring index, and 'pi' is
493 + * different, we can disable the workaround as all the ring
494 + * entries have now been DMA'd to so valid-bit checking is
495 + * repaired. Note: this logic needs to be based on next_idx
496 + * (which increments one at a time), rather than on pi (which
497 + * can burst and wrap-around between our snapshots of it).
498 + */
499 + if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
500 + pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
501 + s->dqrr.next_idx, pi);
502 + s->dqrr.reset_bug = 0;
503 + }
504 + qbman_inval_prefetch(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
505 + }
506 +
507 + p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
508 verb = p->dq.verb;
509
510 /*
511 @@ -976,7 +1269,7 @@ void qbman_release_desc_set_rcdi(struct
512 #define RAR_SUCCESS(rar) ((rar) & 0x100)
513
514 /**
515 - * qbman_swp_release() - Issue a buffer release command
516 + * qbman_swp_release_direct() - Issue a buffer release command
517 * @s: the software portal object
518 * @d: the release descriptor
519 * @buffers: a pointer pointing to the buffer address to be released
520 @@ -984,8 +1277,9 @@ void qbman_release_desc_set_rcdi(struct
521 *
522 * Return 0 for success, -EBUSY if the release command ring is not ready.
523 */
524 -int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
525 - const u64 *buffers, unsigned int num_buffers)
526 +int qbman_swp_release_direct(struct qbman_swp *s,
527 + const struct qbman_release_desc *d,
528 + const u64 *buffers, unsigned int num_buffers)
529 {
530 int i;
531 struct qbman_release_desc *p;
532 @@ -999,29 +1293,60 @@ int qbman_swp_release(struct qbman_swp *
533 return -EBUSY;
534
535 /* Start the release command */
536 - if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
537 - p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
538 - else
539 - p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
540 + p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
541 +
542 /* Copy the caller's buffer pointers to the command */
543 for (i = 0; i < num_buffers; i++)
544 p->buf[i] = cpu_to_le64(buffers[i]);
545 p->bpid = d->bpid;
546
547 - if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
548 - /*
549 - * Set the verb byte, have to substitute in the valid-bit
550 - * and the number of buffers.
551 - */
552 - dma_wmb();
553 - p->verb = d->verb | RAR_VB(rar) | num_buffers;
554 - dccvac(p);
555 - } else {
556 - p->verb = d->verb | RAR_VB(rar) | num_buffers;
557 - dma_wmb();
558 - qbman_write_register(s, QBMAN_CINH_SWP_RCR_AM_RT +
559 - RAR_IDX(rar) * 4, QMAN_RT_MODE);
560 - }
561 + /*
562 + * Set the verb byte, have to substitute in the valid-bit
563 + * and the number of buffers.
564 + */
565 + dma_wmb();
566 + p->verb = d->verb | RAR_VB(rar) | num_buffers;
567 + dccvac(p);
568 +
569 + return 0;
570 +}
571 +
572 +/**
573 + * qbman_swp_release_mem_back() - Issue a buffer release command
574 + * @s: the software portal object
575 + * @d: the release descriptor
576 + * @buffers: a pointer pointing to the buffer address to be released
577 + * @num_buffers: number of buffers to be released, must be less than 8
578 + *
579 + * Return 0 for success, -EBUSY if the release command ring is not ready.
580 + */
581 +int qbman_swp_release_mem_back(struct qbman_swp *s,
582 + const struct qbman_release_desc *d,
583 + const u64 *buffers, unsigned int num_buffers)
584 +{
585 + int i;
586 + struct qbman_release_desc *p;
587 + u32 rar;
588 +
589 + if (!num_buffers || (num_buffers > 7))
590 + return -EINVAL;
591 +
592 + rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR);
593 + if (!RAR_SUCCESS(rar))
594 + return -EBUSY;
595 +
596 + /* Start the release command */
597 + p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
598 +
599 + /* Copy the caller's buffer pointers to the command */
600 + for (i = 0; i < num_buffers; i++)
601 + p->buf[i] = cpu_to_le64(buffers[i]);
602 + p->bpid = d->bpid;
603 +
604 + p->verb = d->verb | RAR_VB(rar) | num_buffers;
605 + dma_wmb();
606 + qbman_write_register(s, QBMAN_CINH_SWP_RCR_AM_RT +
607 + RAR_IDX(rar) * 4, QMAN_RT_MODE);
608
609 return 0;
610 }
611 --- a/drivers/soc/fsl/dpio/qbman-portal.h
612 +++ b/drivers/soc/fsl/dpio/qbman-portal.h
613 @@ -145,6 +145,33 @@ struct qbman_swp {
614 } dqrr;
615 };
616
617 +/* Function pointers */
618 +extern
619 +int (*qbman_swp_enqueue_ptr)(struct qbman_swp *s,
620 + const struct qbman_eq_desc *d,
621 + const struct dpaa2_fd *fd);
622 +extern
623 +int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
624 + const struct qbman_eq_desc *d,
625 + const struct dpaa2_fd *fd,
626 + uint32_t *flags,
627 + int num_frames);
628 +extern
629 +int (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
630 + const struct qbman_eq_desc *d,
631 + const struct dpaa2_fd *fd,
632 + int num_frames);
633 +extern
634 +int (*qbman_swp_pull_ptr)(struct qbman_swp *s, struct qbman_pull_desc *d);
635 +extern
636 +const struct dpaa2_dq *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s);
637 +extern
638 +int (*qbman_swp_release_ptr)(struct qbman_swp *s,
639 + const struct qbman_release_desc *d,
640 + const u64 *buffers,
641 + unsigned int num_buffers);
642 +
643 +/* Functions */
644 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
645 void qbman_swp_finish(struct qbman_swp *p);
646 u32 qbman_swp_interrupt_read_status(struct qbman_swp *p);
647 @@ -169,9 +196,6 @@ void qbman_pull_desc_set_wq(struct qbman
648 void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
649 enum qbman_pull_type_e dct);
650
651 -int qbman_swp_pull(struct qbman_swp *p, struct qbman_pull_desc *d);
652 -
653 -const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s);
654 void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq);
655
656 int qbman_result_has_new_result(struct qbman_swp *p, const struct dpaa2_dq *dq);
657 @@ -186,17 +210,12 @@ void qbman_eq_desc_set_fq(struct qbman_e
658 void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
659 u32 qd_bin, u32 qd_prio);
660
661 -int qbman_swp_enqueue(struct qbman_swp *p, const struct qbman_eq_desc *d,
662 - const struct dpaa2_fd *fd);
663 -
664 int qbman_orp_drop(struct qbman_swp *s, u16 orpid, u16 seqnum);
665
666 void qbman_release_desc_clear(struct qbman_release_desc *d);
667 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid);
668 void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);
669
670 -int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
671 - const u64 *buffers, unsigned int num_buffers);
672 int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
673 unsigned int num_buffers);
674 int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
675 @@ -209,18 +228,60 @@ void *qbman_swp_mc_start(struct qbman_sw
676 void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb);
677 void *qbman_swp_mc_result(struct qbman_swp *p);
678
679 -int
680 +/**
681 + * qbman_swp_enqueue() - Issue an enqueue command
682 + * @s: the software portal used for enqueue
683 + * @d: the enqueue descriptor
684 + * @fd: the frame descriptor to be enqueued
685 + *
686 + * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
687 + */
688 +static inline int
689 +qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
690 + const struct dpaa2_fd *fd)
691 +{
692 + return qbman_swp_enqueue_ptr(s, d, fd);
693 +}
694 +
695 +/**
696 + * qbman_swp_enqueue_multiple() - Issue a multi enqueue command
697 + * using one enqueue descriptor
698 + * @s: the software portal used for enqueue
699 + * @d: the enqueue descriptor
700 + * @fd: table pointer of frame descriptor table to be enqueued
701 + * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
702 + * @num_frames: number of fd to be enqueued
703 + *
704 + * Return the number of fd enqueued, or a negative error number.
705 + */
706 +static inline int
707 qbman_swp_enqueue_multiple(struct qbman_swp *s,
708 const struct qbman_eq_desc *d,
709 const struct dpaa2_fd *fd,
710 uint32_t *flags,
711 - int num_frames);
712 + int num_frames)
713 +{
714 + return qbman_swp_enqueue_multiple_ptr(s, d, fd, flags, num_frames);
715 +}
716
717 -int
718 +/**
719 + * qbman_swp_enqueue_multiple_desc() - Issue a multi enqueue command
720 + * using multiple enqueue descriptor
721 + * @s: the software portal used for enqueue
722 + * @d: table of minimal enqueue descriptor
723 + * @fd: table pointer of frame descriptor table to be enqueued
724 + * @num_frames: number of fd to be enqueued
725 + *
726 + * Return the number of fd enqueued, or a negative error number.
727 + */
728 +static inline int
729 qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
730 const struct qbman_eq_desc *d,
731 const struct dpaa2_fd *fd,
732 - int num_frames);
733 + int num_frames)
734 +{
735 + return qbman_swp_enqueue_multiple_desc_ptr(s, d, fd, num_frames);
736 +}
737
738 /**
739 * qbman_result_is_DQ() - check if the dequeue result is a dequeue response
740 @@ -533,4 +594,49 @@ int qbman_bp_query(struct qbman_swp *s,
741
742 u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a);
743
744 +/**
745 + * qbman_swp_release() - Issue a buffer release command
746 + * @s: the software portal object
747 + * @d: the release descriptor
748 + * @buffers: a pointer pointing to the buffer address to be released
749 + * @num_buffers: number of buffers to be released, must be less than 8
750 + *
751 + * Return 0 for success, -EBUSY if the release command ring is not ready.
752 + */
753 +static inline int qbman_swp_release(struct qbman_swp *s,
754 + const struct qbman_release_desc *d,
755 + const u64 *buffers,
756 + unsigned int num_buffers)
757 +{
758 + return qbman_swp_release_ptr(s, d, buffers, num_buffers);
759 +}
760 +
761 +/**
762 + * qbman_swp_pull() - Issue the pull dequeue command
763 + * @s: the software portal object
764 + * @d: the software portal descriptor which has been configured with
765 + * the set of qbman_pull_desc_set_*() calls
766 + *
767 + * Return 0 for success, and -EBUSY if the software portal is not ready
768 + * to do pull dequeue.
769 + */
770 +static inline int qbman_swp_pull(struct qbman_swp *s,
771 + struct qbman_pull_desc *d)
772 +{
773 + return qbman_swp_pull_ptr(s, d);
774 +}
775 +
776 +/**
777 + * qbman_swp_dqrr_next() - Get an valid DQRR entry
778 + * @s: the software portal object
779 + *
780 + * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
781 + * only once, so repeated calls can return a sequence of DQRR entries, without
782 + * requiring they be consumed immediately or in any particular order.
783 + */
784 +static inline const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
785 +{
786 + return qbman_swp_dqrr_next_ptr(s);
787 +}
788 +
789 #endif /* __FSL_QBMAN_PORTAL_H */