layerscape: add patches-5.4
[openwrt/staging/hauke.git] / target / linux / layerscape / patches-5.4 / 701-net-0007-fsl_qbman-SDK-DPAA-1.x-QBMan-drivers.patch
1 From 772438d01bf57bc8939f53c3101a323fc774428f Mon Sep 17 00:00:00 2001
2 From: Madalin Bucur <madalin.bucur@nxp.com>
3 Date: Wed, 10 May 2017 16:30:12 +0300
4 Subject: [PATCH] fsl_qbman: SDK DPAA 1.x QBMan drivers
5
6 Signed-off-by: Roy Pledge <roy.pledge@nxp.com>
7 Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
8 ---
9 drivers/staging/fsl_qbman/Kconfig | 228 +
10 drivers/staging/fsl_qbman/Makefile | 28 +
11 drivers/staging/fsl_qbman/bman_config.c | 720 +++
12 drivers/staging/fsl_qbman/bman_debugfs.c | 119 +
13 drivers/staging/fsl_qbman/bman_driver.c | 559 +++
14 drivers/staging/fsl_qbman/bman_high.c | 1145 +++++
15 drivers/staging/fsl_qbman/bman_low.h | 565 +++
16 drivers/staging/fsl_qbman/bman_private.h | 166 +
17 drivers/staging/fsl_qbman/bman_test.c | 56 +
18 drivers/staging/fsl_qbman/bman_test.h | 44 +
19 drivers/staging/fsl_qbman/bman_test_high.c | 183 +
20 drivers/staging/fsl_qbman/bman_test_thresh.c | 196 +
21 drivers/staging/fsl_qbman/dpa_alloc.c | 706 +++
22 drivers/staging/fsl_qbman/dpa_sys.h | 259 ++
23 drivers/staging/fsl_qbman/dpa_sys_arm.h | 95 +
24 drivers/staging/fsl_qbman/dpa_sys_arm64.h | 102 +
25 drivers/staging/fsl_qbman/dpa_sys_ppc32.h | 70 +
26 drivers/staging/fsl_qbman/dpa_sys_ppc64.h | 79 +
27 drivers/staging/fsl_qbman/fsl_usdpaa.c | 1984 ++++++++
28 drivers/staging/fsl_qbman/fsl_usdpaa_irq.c | 289 ++
29 drivers/staging/fsl_qbman/qbman_driver.c | 88 +
30 drivers/staging/fsl_qbman/qman_config.c | 1224 +++++
31 drivers/staging/fsl_qbman/qman_debugfs.c | 1594 +++++++
32 drivers/staging/fsl_qbman/qman_driver.c | 961 ++++
33 drivers/staging/fsl_qbman/qman_high.c | 5669 +++++++++++++++++++++++
34 drivers/staging/fsl_qbman/qman_low.h | 1427 ++++++
35 drivers/staging/fsl_qbman/qman_private.h | 398 ++
36 drivers/staging/fsl_qbman/qman_test.c | 57 +
37 drivers/staging/fsl_qbman/qman_test.h | 45 +
38 drivers/staging/fsl_qbman/qman_test_high.c | 216 +
39 drivers/staging/fsl_qbman/qman_test_hotpotato.c | 502 ++
40 drivers/staging/fsl_qbman/qman_utility.c | 129 +
41 include/linux/fsl_bman.h | 532 +++
42 include/linux/fsl_qman.h | 3888 ++++++++++++++++
43 include/linux/fsl_usdpaa.h | 372 ++
44 35 files changed, 24695 insertions(+)
45 create mode 100644 drivers/staging/fsl_qbman/Kconfig
46 create mode 100644 drivers/staging/fsl_qbman/Makefile
47 create mode 100644 drivers/staging/fsl_qbman/bman_config.c
48 create mode 100644 drivers/staging/fsl_qbman/bman_debugfs.c
49 create mode 100644 drivers/staging/fsl_qbman/bman_driver.c
50 create mode 100644 drivers/staging/fsl_qbman/bman_high.c
51 create mode 100644 drivers/staging/fsl_qbman/bman_low.h
52 create mode 100644 drivers/staging/fsl_qbman/bman_private.h
53 create mode 100644 drivers/staging/fsl_qbman/bman_test.c
54 create mode 100644 drivers/staging/fsl_qbman/bman_test.h
55 create mode 100644 drivers/staging/fsl_qbman/bman_test_high.c
56 create mode 100644 drivers/staging/fsl_qbman/bman_test_thresh.c
57 create mode 100644 drivers/staging/fsl_qbman/dpa_alloc.c
58 create mode 100644 drivers/staging/fsl_qbman/dpa_sys.h
59 create mode 100644 drivers/staging/fsl_qbman/dpa_sys_arm.h
60 create mode 100644 drivers/staging/fsl_qbman/dpa_sys_arm64.h
61 create mode 100644 drivers/staging/fsl_qbman/dpa_sys_ppc32.h
62 create mode 100644 drivers/staging/fsl_qbman/dpa_sys_ppc64.h
63 create mode 100644 drivers/staging/fsl_qbman/fsl_usdpaa.c
64 create mode 100644 drivers/staging/fsl_qbman/fsl_usdpaa_irq.c
65 create mode 100644 drivers/staging/fsl_qbman/qbman_driver.c
66 create mode 100644 drivers/staging/fsl_qbman/qman_config.c
67 create mode 100644 drivers/staging/fsl_qbman/qman_debugfs.c
68 create mode 100644 drivers/staging/fsl_qbman/qman_driver.c
69 create mode 100644 drivers/staging/fsl_qbman/qman_high.c
70 create mode 100644 drivers/staging/fsl_qbman/qman_low.h
71 create mode 100644 drivers/staging/fsl_qbman/qman_private.h
72 create mode 100644 drivers/staging/fsl_qbman/qman_test.c
73 create mode 100644 drivers/staging/fsl_qbman/qman_test.h
74 create mode 100644 drivers/staging/fsl_qbman/qman_test_high.c
75 create mode 100644 drivers/staging/fsl_qbman/qman_test_hotpotato.c
76 create mode 100644 drivers/staging/fsl_qbman/qman_utility.c
77 create mode 100644 include/linux/fsl_bman.h
78 create mode 100644 include/linux/fsl_qman.h
79 create mode 100644 include/linux/fsl_usdpaa.h
80
81 --- /dev/null
82 +++ b/drivers/staging/fsl_qbman/Kconfig
83 @@ -0,0 +1,228 @@
84 +config FSL_SDK_DPA
85 + bool "Freescale Datapath Queue and Buffer management"
86 + depends on !FSL_DPAA
87 + select FSL_QMAN_FQ_LOOKUP if PPC64
88 + select FSL_QMAN_FQ_LOOKUP if ARM64
89 +
90 +
91 +menu "Freescale Datapath QMan/BMan options"
92 + depends on FSL_SDK_DPA
93 +
94 +config FSL_DPA_CHECKING
95 + bool "additional driver checking"
96 + default n
97 + ---help---
98 + Compiles in additional checks to sanity-check the drivers and any
99 + use of it by other code. Not recommended for performance.
100 +
101 +config FSL_DPA_CAN_WAIT
102 + bool
103 + default y
104 +
105 +config FSL_DPA_CAN_WAIT_SYNC
106 + bool
107 + default y
108 +
109 +config FSL_DPA_PIRQ_FAST
110 + bool
111 + default y
112 +
113 +config FSL_DPA_PIRQ_SLOW
114 + bool
115 + default y
116 +
117 +config FSL_DPA_PORTAL_SHARE
118 + bool
119 + default y
120 +
121 +config FSL_SDK_BMAN
122 + bool "Freescale Buffer Manager (BMan) support"
123 + default y
124 +
125 +if FSL_SDK_BMAN
126 +
127 +config FSL_BMAN_CONFIG
128 + bool "BMan device management"
129 + default y
130 + ---help---
131 + If this linux image is running natively, you need this option. If this
132 + linux image is running as a guest OS under the hypervisor, only one
133 + guest OS ("the control plane") needs this option.
134 +
135 +config FSL_BMAN_TEST
136 + tristate "BMan self-tests"
137 + default n
138 + ---help---
139 + This option compiles self-test code for BMan.
140 +
141 +config FSL_BMAN_TEST_HIGH
142 + bool "BMan high-level self-test"
143 + depends on FSL_BMAN_TEST
144 + default y
145 + ---help---
146 + This requires the presence of cpu-affine portals, and performs
147 + high-level API testing with them (whichever portal(s) are affine to
148 + the cpu(s) the test executes on).
149 +
150 +config FSL_BMAN_TEST_THRESH
151 + bool "BMan threshold test"
152 + depends on FSL_BMAN_TEST
153 + default y
154 + ---help---
155 + Multi-threaded (SMP) test of BMan pool depletion. A pool is seeded
156 + before multiple threads (one per cpu) create pool objects to track
157 + depletion state changes. The pool is then drained to empty by a
158 + "drainer" thread, and the other threads that they observe exactly
159 + the depletion state changes that are expected.
160 +
161 +config FSL_BMAN_DEBUGFS
162 + tristate "BMan debugfs interface"
163 + depends on DEBUG_FS
164 + default y
165 + ---help---
166 + This option compiles debugfs code for BMan.
167 +
168 +endif # FSL_SDK_BMAN
169 +
170 +config FSL_SDK_QMAN
171 + bool "Freescale Queue Manager (QMan) support"
172 + default y
173 +
174 +if FSL_SDK_QMAN
175 +
176 +config FSL_QMAN_POLL_LIMIT
177 + int
178 + default 32
179 +
180 +config FSL_QMAN_CONFIG
181 + bool "QMan device management"
182 + default y
183 + ---help---
184 + If this linux image is running natively, you need this option. If this
185 + linux image is running as a guest OS under the hypervisor, only one
186 + guest OS ("the control plane") needs this option.
187 +
188 +config FSL_QMAN_TEST
189 + tristate "QMan self-tests"
190 + default n
191 + ---help---
192 + This option compiles self-test code for QMan.
193 +
194 +config FSL_QMAN_TEST_STASH_POTATO
195 + bool "QMan 'hot potato' data-stashing self-test"
196 + depends on FSL_QMAN_TEST
197 + default y
198 + ---help---
199 + This performs a "hot potato" style test enqueuing/dequeuing a frame
200 + across a series of FQs scheduled to different portals (and cpus), with
201 + DQRR, data and context stashing always on.
202 +
203 +config FSL_QMAN_TEST_HIGH
204 + bool "QMan high-level self-test"
205 + depends on FSL_QMAN_TEST
206 + default y
207 + ---help---
208 + This requires the presence of cpu-affine portals, and performs
209 + high-level API testing with them (whichever portal(s) are affine to
210 + the cpu(s) the test executes on).
211 +
212 +config FSL_QMAN_DEBUGFS
213 + tristate "QMan debugfs interface"
214 + depends on DEBUG_FS
215 + default y
216 + ---help---
217 + This option compiles debugfs code for QMan.
218 +
219 +# H/w settings that can be hard-coded for now.
220 +config FSL_QMAN_FQD_SZ
221 + int "size of Frame Queue Descriptor region"
222 + default 10
223 + ---help---
224 + This is the size of the FQD region defined as: PAGE_SIZE * (2^value)
225 + ex: 10 => PAGE_SIZE * (2^10)
226 + Note: Default device-trees now require minimum Kconfig setting of 10.
227 +
228 +config FSL_QMAN_PFDR_SZ
229 + int "size of the PFDR pool"
230 + default 13
231 + ---help---
232 + This is the size of the PFDR pool defined as: PAGE_SIZE * (2^value)
233 + ex: 13 => PAGE_SIZE * (2^13)
234 +
235 +# Corenet initiator settings. Stash request queues are 4-deep to match cores'
236 +# ability to snart. Stash priority is 3, other priorities are 2.
237 +config FSL_QMAN_CI_SCHED_CFG_SRCCIV
238 + int
239 + depends on FSL_QMAN_CONFIG
240 + default 4
241 +config FSL_QMAN_CI_SCHED_CFG_SRQ_W
242 + int
243 + depends on FSL_QMAN_CONFIG
244 + default 3
245 +config FSL_QMAN_CI_SCHED_CFG_RW_W
246 + int
247 + depends on FSL_QMAN_CONFIG
248 + default 2
249 +config FSL_QMAN_CI_SCHED_CFG_BMAN_W
250 + int
251 + depends on FSL_QMAN_CONFIG
252 + default 2
253 +
254 +# portal interrupt settings
255 +config FSL_QMAN_PIRQ_DQRR_ITHRESH
256 + int
257 + default 12
258 +config FSL_QMAN_PIRQ_MR_ITHRESH
259 + int
260 + default 4
261 +config FSL_QMAN_PIRQ_IPERIOD
262 + int
263 + default 100
264 +
265 +# 64 bit kernel support
266 +config FSL_QMAN_FQ_LOOKUP
267 + bool
268 + default n
269 +
270 +config QMAN_CEETM_UPDATE_PERIOD
271 + int "Token update period for shaping, in nanoseconds"
272 + default 1000
273 + ---help---
274 + Traffic shaping works by performing token calculations (using
275 + credits) on shaper instances periodically. This update period
276 + sets the granularity for how often those token rate credit
277 + updates are performed, and thus determines the accuracy and
278 + range of traffic rates that can be configured by users. The
279 + reference manual recommends a 1 microsecond period as providing
280 + a good balance between granularity and range.
281 +
282 + Unless you know what you are doing, leave this value at its default.
283 +
284 +config FSL_QMAN_INIT_TIMEOUT
285 + int "timeout for qman init stage, in seconds"
286 + default 10
287 + ---help---
288 + The timeout setting to quit the initialization loop for non-control
289 + partition in case the control partition fails to boot-up.
290 +
291 +endif # FSL_SDK_QMAN
292 +
293 +config FSL_USDPAA
294 + bool "Freescale USDPAA process driver"
295 + depends on FSL_SDK_DPA
296 + default y
297 + ---help---
298 + This driver provides user-space access to kernel-managed
299 + resource interfaces for USDPAA applications, on the assumption
300 + that each process will open this device once. Specifically, this
301 + device exposes functionality that would be awkward if exposed
302 + via the portal devices - ie. this device exposes functionality
303 + that is inherently process-wide rather than portal-specific.
304 + This device is necessary for obtaining access to DMA memory and
305 + for allocation of Qman and Bman resources. In short, if you wish
306 + to use USDPAA applications, you need this.
307 +
308 + If unsure, say Y.
309 +
310 +
311 +endmenu
312 --- /dev/null
313 +++ b/drivers/staging/fsl_qbman/Makefile
314 @@ -0,0 +1,28 @@
315 +subdir-ccflags-y := -Werror
316 +
317 +# Common
318 +obj-$(CONFIG_FSL_SDK_DPA) += dpa_alloc.o
319 +obj-$(CONFIG_FSL_SDK_DPA) += qbman_driver.o
320 +
321 +# Bman
322 +obj-$(CONFIG_FSL_SDK_BMAN) += bman_high.o
323 +obj-$(CONFIG_FSL_BMAN_CONFIG) += bman_config.o bman_driver.o
324 +obj-$(CONFIG_FSL_BMAN_TEST) += bman_tester.o
325 +obj-$(CONFIG_FSL_BMAN_DEBUGFS) += bman_debugfs_interface.o
326 +bman_tester-y = bman_test.o
327 +bman_tester-$(CONFIG_FSL_BMAN_TEST_HIGH) += bman_test_high.o
328 +bman_tester-$(CONFIG_FSL_BMAN_TEST_THRESH) += bman_test_thresh.o
329 +bman_debugfs_interface-y = bman_debugfs.o
330 +
331 +# Qman
332 +obj-$(CONFIG_FSL_SDK_QMAN) += qman_high.o qman_utility.o
333 +obj-$(CONFIG_FSL_QMAN_CONFIG) += qman_config.o qman_driver.o
334 +obj-$(CONFIG_FSL_QMAN_TEST) += qman_tester.o
335 +qman_tester-y = qman_test.o
336 +qman_tester-$(CONFIG_FSL_QMAN_TEST_STASH_POTATO) += qman_test_hotpotato.o
337 +qman_tester-$(CONFIG_FSL_QMAN_TEST_HIGH) += qman_test_high.o
338 +obj-$(CONFIG_FSL_QMAN_DEBUGFS) += qman_debugfs_interface.o
339 +qman_debugfs_interface-y = qman_debugfs.o
340 +
341 +# USDPAA
342 +obj-$(CONFIG_FSL_USDPAA) += fsl_usdpaa.o fsl_usdpaa_irq.o
343 --- /dev/null
344 +++ b/drivers/staging/fsl_qbman/bman_config.c
345 @@ -0,0 +1,720 @@
346 +/* Copyright (c) 2009-2012 Freescale Semiconductor, Inc.
347 + *
348 + * Redistribution and use in source and binary forms, with or without
349 + * modification, are permitted provided that the following conditions are met:
350 + * * Redistributions of source code must retain the above copyright
351 + * notice, this list of conditions and the following disclaimer.
352 + * * Redistributions in binary form must reproduce the above copyright
353 + * notice, this list of conditions and the following disclaimer in the
354 + * documentation and/or other materials provided with the distribution.
355 + * * Neither the name of Freescale Semiconductor nor the
356 + * names of its contributors may be used to endorse or promote products
357 + * derived from this software without specific prior written permission.
358 + *
359 + *
360 + * ALTERNATIVELY, this software may be distributed under the terms of the
361 + * GNU General Public License ("GPL") as published by the Free Software
362 + * Foundation, either version 2 of that License or (at your option) any
363 + * later version.
364 + *
365 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
366 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
367 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
368 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
369 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
370 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
371 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
372 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
373 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
374 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
375 + */
376 +
377 +#include <asm/cacheflush.h>
378 +#include "bman_private.h"
379 +#include <linux/of_reserved_mem.h>
380 +
381 +/* Last updated for v00.79 of the BG */
382 +
383 +struct bman;
384 +
385 +/* Register offsets */
386 +#define REG_POOL_SWDET(n) (0x0000 + ((n) * 0x04))
387 +#define REG_POOL_HWDET(n) (0x0100 + ((n) * 0x04))
388 +#define REG_POOL_SWDXT(n) (0x0200 + ((n) * 0x04))
389 +#define REG_POOL_HWDXT(n) (0x0300 + ((n) * 0x04))
390 +#define REG_POOL_CONTENT(n) (0x0600 + ((n) * 0x04))
391 +#define REG_FBPR_FPC 0x0800
392 +#define REG_STATE_IDLE 0x960
393 +#define REG_STATE_STOP 0x964
394 +#define REG_ECSR 0x0a00
395 +#define REG_ECIR 0x0a04
396 +#define REG_EADR 0x0a08
397 +#define REG_EDATA(n) (0x0a10 + ((n) * 0x04))
398 +#define REG_SBEC(n) (0x0a80 + ((n) * 0x04))
399 +#define REG_IP_REV_1 0x0bf8
400 +#define REG_IP_REV_2 0x0bfc
401 +#define REG_FBPR_BARE 0x0c00
402 +#define REG_FBPR_BAR 0x0c04
403 +#define REG_FBPR_AR 0x0c10
404 +#define REG_SRCIDR 0x0d04
405 +#define REG_LIODNR 0x0d08
406 +#define REG_ERR_ISR 0x0e00 /* + "enum bm_isr_reg" */
407 +
408 +/* Used by all error interrupt registers except 'inhibit' */
409 +#define BM_EIRQ_IVCI 0x00000010 /* Invalid Command Verb */
410 +#define BM_EIRQ_FLWI 0x00000008 /* FBPR Low Watermark */
411 +#define BM_EIRQ_MBEI 0x00000004 /* Multi-bit ECC Error */
412 +#define BM_EIRQ_SBEI 0x00000002 /* Single-bit ECC Error */
413 +#define BM_EIRQ_BSCN 0x00000001 /* pool State Change Notification */
414 +
415 +/* BMAN_ECIR valid error bit */
416 +#define PORTAL_ECSR_ERR (BM_EIRQ_IVCI)
417 +
418 +union bman_ecir {
419 + u32 ecir_raw;
420 + struct {
421 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
422 + u32 __reserved1:4;
423 + u32 portal_num:4;
424 + u32 __reserved2:12;
425 + u32 numb:4;
426 + u32 __reserved3:2;
427 + u32 pid:6;
428 +#else
429 + u32 pid:6;
430 + u32 __reserved3:2;
431 + u32 numb:4;
432 + u32 __reserved2:12;
433 + u32 portal_num:4;
434 + u32 __reserved1:4;
435 +#endif
436 + } __packed info;
437 +};
438 +
439 +union bman_eadr {
440 + u32 eadr_raw;
441 + struct {
442 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
443 + u32 __reserved1:5;
444 + u32 memid:3;
445 + u32 __reserved2:14;
446 + u32 eadr:10;
447 +#else
448 + u32 eadr:10;
449 + u32 __reserved2:14;
450 + u32 memid:3;
451 + u32 __reserved1:5;
452 +#endif
453 + } __packed info;
454 +};
455 +
456 +struct bman_hwerr_txt {
457 + u32 mask;
458 + const char *txt;
459 +};
460 +
461 +#define BMAN_HWE_TXT(a, b) { .mask = BM_EIRQ_##a, .txt = b }
462 +
463 +static const struct bman_hwerr_txt bman_hwerr_txts[] = {
464 + BMAN_HWE_TXT(IVCI, "Invalid Command Verb"),
465 + BMAN_HWE_TXT(FLWI, "FBPR Low Watermark"),
466 + BMAN_HWE_TXT(MBEI, "Multi-bit ECC Error"),
467 + BMAN_HWE_TXT(SBEI, "Single-bit ECC Error"),
468 + BMAN_HWE_TXT(BSCN, "Pool State Change Notification"),
469 +};
470 +#define BMAN_HWE_COUNT (sizeof(bman_hwerr_txts)/sizeof(struct bman_hwerr_txt))
471 +
472 +struct bman_error_info_mdata {
473 + u16 addr_mask;
474 + u16 bits;
475 + const char *txt;
476 +};
477 +
478 +#define BMAN_ERR_MDATA(a, b, c) { .addr_mask = a, .bits = b, .txt = c}
479 +static const struct bman_error_info_mdata error_mdata[] = {
480 + BMAN_ERR_MDATA(0x03FF, 192, "Stockpile memory"),
481 + BMAN_ERR_MDATA(0x00FF, 256, "SW portal ring memory port 1"),
482 + BMAN_ERR_MDATA(0x00FF, 256, "SW portal ring memory port 2"),
483 +};
484 +#define BMAN_ERR_MDATA_COUNT \
485 + (sizeof(error_mdata)/sizeof(struct bman_error_info_mdata))
486 +
487 +/* Add this in Kconfig */
488 +#define BMAN_ERRS_TO_UNENABLE (BM_EIRQ_FLWI)
489 +
490 +/**
491 + * bm_err_isr_<reg>_<verb> - Manipulate global interrupt registers
492 + * @v: for accessors that write values, this is the 32-bit value
493 + *
494 + * Manipulates BMAN_ERR_ISR, BMAN_ERR_IER, BMAN_ERR_ISDR, BMAN_ERR_IIR. All
495 + * manipulations except bm_err_isr_[un]inhibit() use 32-bit masks composed of
496 + * the BM_EIRQ_*** definitions. Note that "bm_err_isr_enable_write" means
497 + * "write the enable register" rather than "enable the write register"!
498 + */
499 +#define bm_err_isr_status_read(bm) \
500 + __bm_err_isr_read(bm, bm_isr_status)
501 +#define bm_err_isr_status_clear(bm, m) \
502 + __bm_err_isr_write(bm, bm_isr_status, m)
503 +#define bm_err_isr_enable_read(bm) \
504 + __bm_err_isr_read(bm, bm_isr_enable)
505 +#define bm_err_isr_enable_write(bm, v) \
506 + __bm_err_isr_write(bm, bm_isr_enable, v)
507 +#define bm_err_isr_disable_read(bm) \
508 + __bm_err_isr_read(bm, bm_isr_disable)
509 +#define bm_err_isr_disable_write(bm, v) \
510 + __bm_err_isr_write(bm, bm_isr_disable, v)
511 +#define bm_err_isr_inhibit(bm) \
512 + __bm_err_isr_write(bm, bm_isr_inhibit, 1)
513 +#define bm_err_isr_uninhibit(bm) \
514 + __bm_err_isr_write(bm, bm_isr_inhibit, 0)
515 +
516 +/*
517 + * TODO: unimplemented registers
518 + *
519 + * BMAN_POOLk_SDCNT, BMAN_POOLk_HDCNT, BMAN_FULT,
520 + * BMAN_VLDPL, BMAN_EECC, BMAN_SBET, BMAN_EINJ
521 + */
522 +
523 +/* Encapsulate "struct bman *" as a cast of the register space address. */
524 +
525 +static struct bman *bm_create(void *regs)
526 +{
527 + return (struct bman *)regs;
528 +}
529 +
530 +static inline u32 __bm_in(struct bman *bm, u32 offset)
531 +{
532 + return in_be32((void *)bm + offset);
533 +}
534 +static inline void __bm_out(struct bman *bm, u32 offset, u32 val)
535 +{
536 + out_be32((void *)bm + offset, val);
537 +}
538 +#define bm_in(reg) __bm_in(bm, REG_##reg)
539 +#define bm_out(reg, val) __bm_out(bm, REG_##reg, val)
540 +
541 +static u32 __bm_err_isr_read(struct bman *bm, enum bm_isr_reg n)
542 +{
543 + return __bm_in(bm, REG_ERR_ISR + (n << 2));
544 +}
545 +
546 +static void __bm_err_isr_write(struct bman *bm, enum bm_isr_reg n, u32 val)
547 +{
548 + __bm_out(bm, REG_ERR_ISR + (n << 2), val);
549 +}
550 +
551 +static void bm_get_version(struct bman *bm, u16 *id, u8 *major, u8 *minor)
552 +{
553 + u32 v = bm_in(IP_REV_1);
554 + *id = (v >> 16);
555 + *major = (v >> 8) & 0xff;
556 + *minor = v & 0xff;
557 +}
558 +
559 +static u32 __generate_thresh(u32 val, int roundup)
560 +{
561 + u32 e = 0; /* co-efficient, exponent */
562 + int oddbit = 0;
563 + while (val > 0xff) {
564 + oddbit = val & 1;
565 + val >>= 1;
566 + e++;
567 + if (roundup && oddbit)
568 + val++;
569 + }
570 + DPA_ASSERT(e < 0x10);
571 + return val | (e << 8);
572 +}
573 +
574 +static void bm_set_pool(struct bman *bm, u8 pool, u32 swdet, u32 swdxt,
575 + u32 hwdet, u32 hwdxt)
576 +{
577 + DPA_ASSERT(pool < bman_pool_max);
578 + bm_out(POOL_SWDET(pool), __generate_thresh(swdet, 0));
579 + bm_out(POOL_SWDXT(pool), __generate_thresh(swdxt, 1));
580 + bm_out(POOL_HWDET(pool), __generate_thresh(hwdet, 0));
581 + bm_out(POOL_HWDXT(pool), __generate_thresh(hwdxt, 1));
582 +}
583 +
584 +static void bm_set_memory(struct bman *bm, u64 ba, int prio, u32 size)
585 +{
586 + u32 exp = ilog2(size);
587 + /* choke if size isn't within range */
588 + DPA_ASSERT((size >= 4096) && (size <= 1073741824) &&
589 + is_power_of_2(size));
590 + /* choke if '[e]ba' has lower-alignment than 'size' */
591 + DPA_ASSERT(!(ba & (size - 1)));
592 + bm_out(FBPR_BARE, upper_32_bits(ba));
593 + bm_out(FBPR_BAR, lower_32_bits(ba));
594 + bm_out(FBPR_AR, (prio ? 0x40000000 : 0) | (exp - 1));
595 +}
596 +
597 +/*****************/
598 +/* Config driver */
599 +/*****************/
600 +
601 +/* TODO: Kconfig these? */
602 +#define DEFAULT_FBPR_SZ (PAGE_SIZE << 12)
603 +
604 +/* We support only one of these. */
605 +static struct bman *bm;
606 +static struct device_node *bm_node;
607 +
608 +/* And this state belongs to 'bm'. It is set during fsl_bman_init(), but used
609 + * during bman_init_ccsr(). */
610 +static dma_addr_t fbpr_a;
611 +static size_t fbpr_sz = DEFAULT_FBPR_SZ;
612 +
613 +static int bman_fbpr(struct reserved_mem *rmem)
614 +{
615 + fbpr_a = rmem->base;
616 + fbpr_sz = rmem->size;
617 +
618 + WARN_ON(!(fbpr_a && fbpr_sz));
619 +
620 + return 0;
621 +}
622 +RESERVEDMEM_OF_DECLARE(bman_fbpr, "fsl,bman-fbpr", bman_fbpr);
623 +
624 +static int __init fsl_bman_init(struct device_node *node)
625 +{
626 + struct resource res;
627 + u32 __iomem *regs;
628 + const char *s;
629 + int ret, standby = 0;
630 + u16 id;
631 + u8 major, minor;
632 +
633 + ret = of_address_to_resource(node, 0, &res);
634 + if (ret) {
635 + pr_err("Can't get %s property 'reg'\n",
636 + node->full_name);
637 + return ret;
638 + }
639 + s = of_get_property(node, "fsl,hv-claimable", &ret);
640 + if (s && !strcmp(s, "standby"))
641 + standby = 1;
642 + /* Global configuration */
643 + regs = ioremap(res.start, res.end - res.start + 1);
644 + bm = bm_create(regs);
645 + BUG_ON(!bm);
646 + bm_node = node;
647 + bm_get_version(bm, &id, &major, &minor);
648 + pr_info("Bman ver:%04x,%02x,%02x\n", id, major, minor);
649 + if ((major == 1) && (minor == 0)) {
650 + bman_ip_rev = BMAN_REV10;
651 + bman_pool_max = 64;
652 + } else if ((major == 2) && (minor == 0)) {
653 + bman_ip_rev = BMAN_REV20;
654 + bman_pool_max = 8;
655 + } else if ((major == 2) && (minor == 1)) {
656 + bman_ip_rev = BMAN_REV21;
657 + bman_pool_max = 64;
658 + } else {
659 + pr_warn("unknown Bman version, default to rev1.0\n");
660 + }
661 +
662 + if (standby) {
663 + pr_info(" -> in standby mode\n");
664 + return 0;
665 + }
666 + return 0;
667 +}
668 +
669 +int bman_have_ccsr(void)
670 +{
671 + return bm ? 1 : 0;
672 +}
673 +
674 +int bm_pool_set(u32 bpid, const u32 *thresholds)
675 +{
676 + if (!bm)
677 + return -ENODEV;
678 + bm_set_pool(bm, bpid, thresholds[0],
679 + thresholds[1], thresholds[2],
680 + thresholds[3]);
681 + return 0;
682 +}
683 +EXPORT_SYMBOL(bm_pool_set);
684 +
685 +__init int bman_init_early(void)
686 +{
687 + struct device_node *dn;
688 + int ret;
689 +
690 + for_each_compatible_node(dn, NULL, "fsl,bman") {
691 + if (bm)
692 + pr_err("%s: only one 'fsl,bman' allowed\n",
693 + dn->full_name);
694 + else {
695 + if (!of_device_is_available(dn))
696 + continue;
697 +
698 + ret = fsl_bman_init(dn);
699 + BUG_ON(ret);
700 + }
701 + }
702 + return 0;
703 +}
704 +postcore_initcall_sync(bman_init_early);
705 +
706 +
707 +static void log_edata_bits(u32 bit_count)
708 +{
709 + u32 i, j, mask = 0xffffffff;
710 +
711 + pr_warn("Bman ErrInt, EDATA:\n");
712 + i = bit_count/32;
713 + if (bit_count%32) {
714 + i++;
715 + mask = ~(mask << bit_count%32);
716 + }
717 + j = 16-i;
718 + pr_warn(" 0x%08x\n", bm_in(EDATA(j)) & mask);
719 + j++;
720 + for (; j < 16; j++)
721 + pr_warn(" 0x%08x\n", bm_in(EDATA(j)));
722 +}
723 +
724 +static void log_additional_error_info(u32 isr_val, u32 ecsr_val)
725 +{
726 + union bman_ecir ecir_val;
727 + union bman_eadr eadr_val;
728 +
729 + ecir_val.ecir_raw = bm_in(ECIR);
730 + /* Is portal info valid */
731 + if (ecsr_val & PORTAL_ECSR_ERR) {
732 + pr_warn("Bman ErrInt: SWP id %d, numb %d, pid %d\n",
733 + ecir_val.info.portal_num, ecir_val.info.numb,
734 + ecir_val.info.pid);
735 + }
736 + if (ecsr_val & (BM_EIRQ_SBEI|BM_EIRQ_MBEI)) {
737 + eadr_val.eadr_raw = bm_in(EADR);
738 + pr_warn("Bman ErrInt: EADR Memory: %s, 0x%x\n",
739 + error_mdata[eadr_val.info.memid].txt,
740 + error_mdata[eadr_val.info.memid].addr_mask
741 + & eadr_val.info.eadr);
742 + log_edata_bits(error_mdata[eadr_val.info.memid].bits);
743 + }
744 +}
745 +
746 +/* Bman interrupt handler */
747 +static irqreturn_t bman_isr(int irq, void *ptr)
748 +{
749 + u32 isr_val, ier_val, ecsr_val, isr_mask, i;
750 +
751 + ier_val = bm_err_isr_enable_read(bm);
752 + isr_val = bm_err_isr_status_read(bm);
753 + ecsr_val = bm_in(ECSR);
754 + isr_mask = isr_val & ier_val;
755 +
756 + if (!isr_mask)
757 + return IRQ_NONE;
758 + for (i = 0; i < BMAN_HWE_COUNT; i++) {
759 + if (bman_hwerr_txts[i].mask & isr_mask) {
760 + pr_warn("Bman ErrInt: %s\n", bman_hwerr_txts[i].txt);
761 + if (bman_hwerr_txts[i].mask & ecsr_val) {
762 + log_additional_error_info(isr_mask, ecsr_val);
763 + /* Re-arm error capture registers */
764 + bm_out(ECSR, ecsr_val);
765 + }
766 + if (bman_hwerr_txts[i].mask & BMAN_ERRS_TO_UNENABLE) {
767 + pr_devel("Bman un-enabling error 0x%x\n",
768 + bman_hwerr_txts[i].mask);
769 + ier_val &= ~bman_hwerr_txts[i].mask;
770 + bm_err_isr_enable_write(bm, ier_val);
771 + }
772 + }
773 + }
774 + bm_err_isr_status_clear(bm, isr_val);
775 + return IRQ_HANDLED;
776 +}
777 +
778 +static int __bind_irq(void)
779 +{
780 + int ret, err_irq;
781 +
782 + err_irq = of_irq_to_resource(bm_node, 0, NULL);
783 + if (err_irq == 0) {
784 + pr_info("Can't get %s property '%s'\n", bm_node->full_name,
785 + "interrupts");
786 + return -ENODEV;
787 + }
788 + ret = request_irq(err_irq, bman_isr, IRQF_SHARED, "bman-err", bm_node);
789 + if (ret) {
790 + pr_err("request_irq() failed %d for '%s'\n", ret,
791 + bm_node->full_name);
792 + return -ENODEV;
793 + }
794 + /* Disable Buffer Pool State Change */
795 + bm_err_isr_disable_write(bm, BM_EIRQ_BSCN);
796 + /* Write-to-clear any stale bits, (eg. starvation being asserted prior
797 + * to resource allocation during driver init). */
798 + bm_err_isr_status_clear(bm, 0xffffffff);
799 + /* Enable Error Interrupts */
800 + bm_err_isr_enable_write(bm, 0xffffffff);
801 + return 0;
802 +}
803 +
804 +int bman_init_ccsr(struct device_node *node)
805 +{
806 + int ret;
807 + if (!bman_have_ccsr())
808 + return 0;
809 + if (node != bm_node)
810 + return -EINVAL;
811 + /* FBPR memory */
812 + bm_set_memory(bm, fbpr_a, 0, fbpr_sz);
813 + pr_info("bman-fbpr addr %pad size 0x%zx\n", &fbpr_a, fbpr_sz);
814 +
815 + ret = __bind_irq();
816 + if (ret)
817 + return ret;
818 + return 0;
819 +}
820 +
821 +u32 bm_pool_free_buffers(u32 bpid)
822 +{
823 + return bm_in(POOL_CONTENT(bpid));
824 +}
825 +
826 +#ifdef CONFIG_SYSFS
827 +
828 +#define DRV_NAME "fsl-bman"
829 +#define SBEC_MAX_ID 1
830 +#define SBEC_MIN_ID 0
831 +
832 +static ssize_t show_fbpr_fpc(struct device *dev,
833 + struct device_attribute *dev_attr, char *buf)
834 +{
835 + return snprintf(buf, PAGE_SIZE, "%u\n", bm_in(FBPR_FPC));
836 +};
837 +
838 +static ssize_t show_pool_count(struct device *dev,
839 + struct device_attribute *dev_attr, char *buf)
840 +{
841 + u32 data;
842 + int i;
843 +
844 + if (!sscanf(dev_attr->attr.name, "%d", &i) || (i >= bman_pool_max))
845 + return -EINVAL;
846 + data = bm_in(POOL_CONTENT(i));
847 + return snprintf(buf, PAGE_SIZE, "%d\n", data);
848 +};
849 +
850 +static ssize_t show_err_isr(struct device *dev,
851 + struct device_attribute *dev_attr, char *buf)
852 +{
853 + return snprintf(buf, PAGE_SIZE, "0x%08x\n", bm_in(ERR_ISR));
854 +};
855 +
856 +static ssize_t show_sbec(struct device *dev,
857 + struct device_attribute *dev_attr, char *buf)
858 +{
859 + int i;
860 +
861 + if (!sscanf(dev_attr->attr.name, "sbec_%d", &i))
862 + return -EINVAL;
863 + if (i < SBEC_MIN_ID || i > SBEC_MAX_ID)
864 + return -EINVAL;
865 + return snprintf(buf, PAGE_SIZE, "%u\n", bm_in(SBEC(i)));
866 +};
867 +
868 +static DEVICE_ATTR(err_isr, S_IRUSR, show_err_isr, NULL);
869 +static DEVICE_ATTR(fbpr_fpc, S_IRUSR, show_fbpr_fpc, NULL);
870 +
871 +/* Didn't use DEVICE_ATTR as 64 of this would be required.
872 + * Initialize them when needed. */
873 +static char *name_attrs_pool_count; /* "xx" + null-terminator */
874 +static struct device_attribute *dev_attr_buffer_pool_count;
875 +
876 +static DEVICE_ATTR(sbec_0, S_IRUSR, show_sbec, NULL);
877 +static DEVICE_ATTR(sbec_1, S_IRUSR, show_sbec, NULL);
878 +
879 +static struct attribute *bman_dev_attributes[] = {
880 + &dev_attr_fbpr_fpc.attr,
881 + &dev_attr_err_isr.attr,
882 + NULL
883 +};
884 +
885 +static struct attribute *bman_dev_ecr_attributes[] = {
886 + &dev_attr_sbec_0.attr,
887 + &dev_attr_sbec_1.attr,
888 + NULL
889 +};
890 +
891 +static struct attribute **bman_dev_pool_count_attributes;
892 +
893 +
894 +/* root level */
895 +static const struct attribute_group bman_dev_attr_grp = {
896 + .name = NULL,
897 + .attrs = bman_dev_attributes
898 +};
899 +static const struct attribute_group bman_dev_ecr_grp = {
900 + .name = "error_capture",
901 + .attrs = bman_dev_ecr_attributes
902 +};
903 +static struct attribute_group bman_dev_pool_countent_grp = {
904 + .name = "pool_count",
905 +};
906 +
907 +static int of_fsl_bman_remove(struct platform_device *ofdev)
908 +{
909 + sysfs_remove_group(&ofdev->dev.kobj, &bman_dev_attr_grp);
910 + return 0;
911 +};
912 +
913 +static int of_fsl_bman_probe(struct platform_device *ofdev)
914 +{
915 + int ret, i;
916 +
917 + ret = sysfs_create_group(&ofdev->dev.kobj, &bman_dev_attr_grp);
918 + if (ret)
919 + goto done;
920 + ret = sysfs_create_group(&ofdev->dev.kobj, &bman_dev_ecr_grp);
921 + if (ret)
922 + goto del_group_0;
923 +
924 + name_attrs_pool_count = kmalloc(sizeof(char) * bman_pool_max * 3,
925 + GFP_KERNEL);
926 + if (!name_attrs_pool_count) {
927 + pr_err("Can't alloc name_attrs_pool_count\n");
928 + goto del_group_1;
929 + }
930 +
931 + dev_attr_buffer_pool_count = kmalloc(sizeof(struct device_attribute) *
932 + bman_pool_max, GFP_KERNEL);
933 + if (!dev_attr_buffer_pool_count) {
934 + pr_err("Can't alloc dev_attr-buffer_pool_count\n");
935 + goto del_group_2;
936 + }
937 +
938 + bman_dev_pool_count_attributes = kmalloc(sizeof(struct attribute *) *
939 + (bman_pool_max + 1), GFP_KERNEL);
940 + if (!bman_dev_pool_count_attributes) {
941 + pr_err("can't alloc bman_dev_pool_count_attributes\n");
942 + goto del_group_3;
943 + }
944 +
945 + for (i = 0; i < bman_pool_max; i++) {
946 + ret = scnprintf((name_attrs_pool_count + i * 3), 3, "%d", i);
947 + if (!ret)
948 + goto del_group_4;
949 + dev_attr_buffer_pool_count[i].attr.name =
950 + (name_attrs_pool_count + i * 3);
951 + dev_attr_buffer_pool_count[i].attr.mode = S_IRUSR;
952 + dev_attr_buffer_pool_count[i].show = show_pool_count;
953 + bman_dev_pool_count_attributes[i] =
954 + &dev_attr_buffer_pool_count[i].attr;
955 + sysfs_attr_init(bman_dev_pool_count_attributes[i]);
956 + }
957 + bman_dev_pool_count_attributes[bman_pool_max] = NULL;
958 +
959 + bman_dev_pool_countent_grp.attrs = bman_dev_pool_count_attributes;
960 +
961 + ret = sysfs_create_group(&ofdev->dev.kobj, &bman_dev_pool_countent_grp);
962 + if (ret)
963 + goto del_group_4;
964 +
965 + goto done;
966 +
967 +del_group_4:
968 + kfree(bman_dev_pool_count_attributes);
969 +del_group_3:
970 + kfree(dev_attr_buffer_pool_count);
971 +del_group_2:
972 + kfree(name_attrs_pool_count);
973 +del_group_1:
974 + sysfs_remove_group(&ofdev->dev.kobj, &bman_dev_ecr_grp);
975 +del_group_0:
976 + sysfs_remove_group(&ofdev->dev.kobj, &bman_dev_attr_grp);
977 +done:
978 + if (ret)
979 + dev_err(&ofdev->dev,
980 + "Cannot create dev attributes ret=%d\n", ret);
981 + return ret;
982 +};
983 +
984 +static struct of_device_id of_fsl_bman_ids[] = {
985 + {
986 + .compatible = "fsl,bman",
987 + },
988 + {}
989 +};
990 +MODULE_DEVICE_TABLE(of, of_fsl_bman_ids);
991 +
992 +#ifdef CONFIG_SUSPEND
993 +static u32 saved_isdr;
994 +
995 +static int bman_pm_suspend_noirq(struct device *dev)
996 +{
997 + uint32_t idle_state;
998 +
999 + suspend_unused_bportal();
1000 + /* save isdr, disable all, clear isr */
1001 + saved_isdr = bm_err_isr_disable_read(bm);
1002 + bm_err_isr_disable_write(bm, 0xffffffff);
1003 + bm_err_isr_status_clear(bm, 0xffffffff);
1004 +
1005 + if (bman_ip_rev < BMAN_REV21) {
1006 +#ifdef CONFIG_PM_DEBUG
1007 + pr_info("Bman version doesn't have STATE_IDLE\n");
1008 +#endif
1009 + return 0;
1010 + }
1011 + idle_state = bm_in(STATE_IDLE);
1012 + if (!(idle_state & 0x1)) {
1013 + pr_err("Bman not idle 0x%x aborting\n", idle_state);
1014 + bm_err_isr_disable_write(bm, saved_isdr);
1015 + resume_unused_bportal();
1016 + return -EBUSY;
1017 + }
1018 +#ifdef CONFIG_PM_DEBUG
1019 + pr_info("Bman suspend code, IDLE_STAT = 0x%x\n", idle_state);
1020 +#endif
1021 + return 0;
1022 +}
1023 +
1024 +static int bman_pm_resume_noirq(struct device *dev)
1025 +{
1026 + /* restore isdr */
1027 + bm_err_isr_disable_write(bm, saved_isdr);
1028 + resume_unused_bportal();
1029 + return 0;
1030 +}
1031 +#else
1032 +#define bman_pm_suspend_noirq NULL
1033 +#define bman_pm_resume_noirq NULL
1034 +#endif
1035 +
1036 +static const struct dev_pm_ops bman_pm_ops = {
1037 + .suspend_noirq = bman_pm_suspend_noirq,
1038 + .resume_noirq = bman_pm_resume_noirq,
1039 +};
1040 +
1041 +static struct platform_driver of_fsl_bman_driver = {
1042 + .driver = {
1043 + .owner = THIS_MODULE,
1044 + .name = DRV_NAME,
1045 + .of_match_table = of_fsl_bman_ids,
1046 + .pm = &bman_pm_ops,
1047 + },
1048 + .probe = of_fsl_bman_probe,
1049 + .remove = of_fsl_bman_remove,
1050 +};
1051 +
1052 +static int bman_ctrl_init(void)
1053 +{
1054 + return platform_driver_register(&of_fsl_bman_driver);
1055 +}
1056 +
1057 +static void bman_ctrl_exit(void)
1058 +{
1059 + platform_driver_unregister(&of_fsl_bman_driver);
1060 +}
1061 +
1062 +module_init(bman_ctrl_init);
1063 +module_exit(bman_ctrl_exit);
1064 +
1065 +#endif /* CONFIG_SYSFS */
1066 --- /dev/null
1067 +++ b/drivers/staging/fsl_qbman/bman_debugfs.c
1068 @@ -0,0 +1,119 @@
1069 +/* Copyright 2010-2011 Freescale Semiconductor, Inc.
1070 + *
1071 + * Redistribution and use in source and binary forms, with or without
1072 + * modification, are permitted provided that the following conditions are met:
1073 + * * Redistributions of source code must retain the above copyright
1074 + * notice, this list of conditions and the following disclaimer.
1075 + * * Redistributions in binary form must reproduce the above copyright
1076 + * notice, this list of conditions and the following disclaimer in the
1077 + * documentation and/or other materials provided with the distribution.
1078 + * * Neither the name of Freescale Semiconductor nor the
1079 + * names of its contributors may be used to endorse or promote products
1080 + * derived from this software without specific prior written permission.
1081 + *
1082 + *
1083 + * ALTERNATIVELY, this software may be distributed under the terms of the
1084 + * GNU General Public License ("GPL") as published by the Free Software
1085 + * Foundation, either version 2 of that License or (at your option) any
1086 + * later version.
1087 + *
1088 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
1089 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1090 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1091 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
1092 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1093 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1094 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1095 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1096 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1097 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1098 + */
1099 +#include <linux/module.h>
1100 +#include <linux/fsl_bman.h>
1101 +#include <linux/debugfs.h>
1102 +#include <linux/seq_file.h>
1103 +#include <linux/uaccess.h>
1104 +
1105 +static struct dentry *dfs_root; /* debugfs root directory */
1106 +
1107 +/*******************************************************************************
1108 + * Query Buffer Pool State
1109 + ******************************************************************************/
1110 +static int query_bp_state_show(struct seq_file *file, void *offset)
1111 +{
1112 + int ret;
1113 + struct bm_pool_state state;
1114 + int i, j;
1115 + u32 mask;
1116 +
1117 + memset(&state, 0, sizeof(struct bm_pool_state));
1118 + ret = bman_query_pools(&state);
1119 + if (ret) {
1120 + seq_printf(file, "Error %d\n", ret);
1121 + return 0;
1122 + }
1123 + seq_puts(file, "bp_id free_buffers_avail bp_depleted\n");
1124 + for (i = 0; i < 2; i++) {
1125 + mask = 0x80000000;
1126 + for (j = 0; j < 32; j++) {
1127 + seq_printf(file,
1128 + " %-2u %-3s %-3s\n",
1129 + (i*32)+j,
1130 + (state.as.state.__state[i] & mask) ? "no" : "yes",
1131 + (state.ds.state.__state[i] & mask) ? "yes" : "no");
1132 + mask >>= 1;
1133 + }
1134 + }
1135 + return 0;
1136 +}
1137 +
1138 +static int query_bp_state_open(struct inode *inode, struct file *file)
1139 +{
1140 + return single_open(file, query_bp_state_show, NULL);
1141 +}
1142 +
1143 +static const struct file_operations query_bp_state_fops = {
1144 + .owner = THIS_MODULE,
1145 + .open = query_bp_state_open,
1146 + .read = seq_read,
1147 + .release = single_release,
1148 +};
1149 +
1150 +static int __init bman_debugfs_module_init(void)
1151 +{
1152 + int ret = 0;
1153 + struct dentry *d;
1154 +
1155 + dfs_root = debugfs_create_dir("bman", NULL);
1156 +
1157 + if (dfs_root == NULL) {
1158 + ret = -ENOMEM;
1159 + pr_err("Cannot create bman debugfs dir\n");
1160 + goto _return;
1161 + }
1162 + d = debugfs_create_file("query_bp_state",
1163 + S_IRUGO,
1164 + dfs_root,
1165 + NULL,
1166 + &query_bp_state_fops);
1167 + if (d == NULL) {
1168 + ret = -ENOMEM;
1169 + pr_err("Cannot create query_bp_state\n");
1170 + goto _return;
1171 + }
1172 + return 0;
1173 +
1174 +_return:
1175 + debugfs_remove_recursive(dfs_root);
1176 + return ret;
1177 +}
1178 +
1179 +static void __exit bman_debugfs_module_exit(void)
1180 +{
1181 + debugfs_remove_recursive(dfs_root);
1182 +}
1183 +
1184 +
1185 +module_init(bman_debugfs_module_init);
1186 +module_exit(bman_debugfs_module_exit);
1187 +MODULE_LICENSE("Dual BSD/GPL");
1188 --- /dev/null
1189 +++ b/drivers/staging/fsl_qbman/bman_driver.c
1190 @@ -0,0 +1,559 @@
1191 +/* Copyright 2008-2012 Freescale Semiconductor, Inc.
1192 + *
1193 + * Redistribution and use in source and binary forms, with or without
1194 + * modification, are permitted provided that the following conditions are met:
1195 + * * Redistributions of source code must retain the above copyright
1196 + * notice, this list of conditions and the following disclaimer.
1197 + * * Redistributions in binary form must reproduce the above copyright
1198 + * notice, this list of conditions and the following disclaimer in the
1199 + * documentation and/or other materials provided with the distribution.
1200 + * * Neither the name of Freescale Semiconductor nor the
1201 + * names of its contributors may be used to endorse or promote products
1202 + * derived from this software without specific prior written permission.
1203 + *
1204 + *
1205 + * ALTERNATIVELY, this software may be distributed under the terms of the
1206 + * GNU General Public License ("GPL") as published by the Free Software
1207 + * Foundation, either version 2 of that License or (at your option) any
1208 + * later version.
1209 + *
1210 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
1211 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1212 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1213 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
1214 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1215 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1216 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1217 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1218 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1219 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1220 + */
1221 +#include "bman_low.h"
1222 +#ifdef CONFIG_HOTPLUG_CPU
1223 +#include <linux/cpu.h>
1224 +#endif
1225 +/*
1226 + * Global variables of the max portal/pool number this bman version supported
1227 + */
1228 +u16 bman_ip_rev;
1229 +EXPORT_SYMBOL(bman_ip_rev);
1230 +u16 bman_pool_max;
1231 +EXPORT_SYMBOL(bman_pool_max);
1232 +static u16 bman_portal_max;
1233 +
1234 +/* After initialising cpus that own shared portal configs, we cache the
1235 + * resulting portals (ie. not just the configs) in this array. Then we
1236 + * initialise slave cpus that don't have their own portals, redirecting them to
1237 + * portals from this cache in a round-robin assignment. */
1238 +static struct bman_portal *shared_portals[NR_CPUS];
1239 +static int num_shared_portals;
1240 +static int shared_portals_idx;
1241 +static LIST_HEAD(unused_pcfgs);
1242 +static DEFINE_SPINLOCK(unused_pcfgs_lock);
1243 +static void *affine_bportals[NR_CPUS];
1244 +
1245 +static int __init fsl_bpool_init(struct device_node *node)
1246 +{
1247 + int ret;
1248 + u32 *thresh, *bpid = (u32 *)of_get_property(node, "fsl,bpid", &ret);
1249 + if (!bpid || (ret != 4)) {
1250 + pr_err("Can't get %s property 'fsl,bpid'\n", node->full_name);
1251 + return -ENODEV;
1252 + }
1253 + thresh = (u32 *)of_get_property(node, "fsl,bpool-thresholds", &ret);
1254 + if (thresh) {
1255 + if (ret != 16) {
1256 + pr_err("Invalid %s property '%s'\n",
1257 + node->full_name, "fsl,bpool-thresholds");
1258 + return -ENODEV;
1259 + }
1260 + }
1261 + if (thresh) {
1262 +#ifdef CONFIG_FSL_BMAN_CONFIG
1263 + ret = bm_pool_set(be32_to_cpu(*bpid), thresh);
1264 + if (ret)
1265 + pr_err("No CCSR node for %s property '%s'\n",
1266 + node->full_name, "fsl,bpool-thresholds");
1267 + return ret;
1268 +#else
1269 + pr_err("Ignoring %s property '%s', no CCSR support\n",
1270 + node->full_name, "fsl,bpool-thresholds");
1271 +#endif
1272 + }
1273 + return 0;
1274 +}
1275 +
1276 +static int __init fsl_bpid_range_init(struct device_node *node)
1277 +{
1278 + int ret;
1279 + u32 *range = (u32 *)of_get_property(node, "fsl,bpid-range", &ret);
1280 + if (!range) {
1281 + pr_err("No 'fsl,bpid-range' property in node %s\n",
1282 + node->full_name);
1283 + return -EINVAL;
1284 + }
1285 + if (ret != 8) {
1286 + pr_err("'fsl,bpid-range' is not a 2-cell range in node %s\n",
1287 + node->full_name);
1288 + return -EINVAL;
1289 + }
1290 + bman_seed_bpid_range(be32_to_cpu(range[0]), be32_to_cpu(range[1]));
1291 + pr_info("Bman: BPID allocator includes range %d:%d\n",
1292 + be32_to_cpu(range[0]), be32_to_cpu(range[1]));
1293 + return 0;
1294 +}
1295 +
1296 +static struct bm_portal_config * __init parse_pcfg(struct device_node *node)
1297 +{
1298 + struct bm_portal_config *pcfg;
1299 + const u32 *index;
1300 + int irq, ret;
1301 + resource_size_t len;
1302 +
1303 + pcfg = kmalloc(sizeof(*pcfg), GFP_KERNEL);
1304 + if (!pcfg) {
1305 + pr_err("can't allocate portal config");
1306 + return NULL;
1307 + }
1308 +
1309 + if (of_device_is_compatible(node, "fsl,bman-portal-1.0") ||
1310 + of_device_is_compatible(node, "fsl,bman-portal-1.0.0")) {
1311 + bman_ip_rev = BMAN_REV10;
1312 + bman_pool_max = 64;
1313 + bman_portal_max = 10;
1314 + } else if (of_device_is_compatible(node, "fsl,bman-portal-2.0") ||
1315 + of_device_is_compatible(node, "fsl,bman-portal-2.0.8")) {
1316 + bman_ip_rev = BMAN_REV20;
1317 + bman_pool_max = 8;
1318 + bman_portal_max = 3;
1319 + } else if (of_device_is_compatible(node, "fsl,bman-portal-2.1.0")) {
1320 + bman_ip_rev = BMAN_REV21;
1321 + bman_pool_max = 64;
1322 + bman_portal_max = 50;
1323 + } else if (of_device_is_compatible(node, "fsl,bman-portal-2.1.1")) {
1324 + bman_ip_rev = BMAN_REV21;
1325 + bman_pool_max = 64;
1326 + bman_portal_max = 25;
1327 + } else if (of_device_is_compatible(node, "fsl,bman-portal-2.1.2")) {
1328 + bman_ip_rev = BMAN_REV21;
1329 + bman_pool_max = 64;
1330 + bman_portal_max = 18;
1331 + } else if (of_device_is_compatible(node, "fsl,bman-portal-2.1.3")) {
1332 + bman_ip_rev = BMAN_REV21;
1333 + bman_pool_max = 64;
1334 + bman_portal_max = 10;
1335 + } else {
1336 + pr_warn("unknown BMan version in portal node,"
1337 + "default to rev1.0\n");
1338 + bman_ip_rev = BMAN_REV10;
1339 + bman_pool_max = 64;
1340 + bman_portal_max = 10;
1341 + }
1342 +
1343 + ret = of_address_to_resource(node, DPA_PORTAL_CE,
1344 + &pcfg->addr_phys[DPA_PORTAL_CE]);
1345 + if (ret) {
1346 + pr_err("Can't get %s property 'reg::CE'\n", node->full_name);
1347 + goto err;
1348 + }
1349 + ret = of_address_to_resource(node, DPA_PORTAL_CI,
1350 + &pcfg->addr_phys[DPA_PORTAL_CI]);
1351 + if (ret) {
1352 + pr_err("Can't get %s property 'reg::CI'\n", node->full_name);
1353 + goto err;
1354 + }
1355 +
1356 + index = of_get_property(node, "cell-index", &ret);
1357 + if (!index || (ret != 4)) {
1358 + pr_err("Can't get %s property '%s'\n", node->full_name,
1359 + "cell-index");
1360 + goto err;
1361 + }
1362 + if (be32_to_cpu(*index) >= bman_portal_max) {
1363 + pr_err("BMan portal cell index %d out of range, max %d\n",
1364 + be32_to_cpu(*index), bman_portal_max);
1365 + goto err;
1366 + }
1367 +
1368 + pcfg->public_cfg.cpu = -1;
1369 +
1370 + irq = irq_of_parse_and_map(node, 0);
1371 + if (irq == 0) {
1372 + pr_err("Can't get %s property 'interrupts'\n", node->full_name);
1373 + goto err;
1374 + }
1375 + pcfg->public_cfg.irq = irq;
1376 + pcfg->public_cfg.index = be32_to_cpu(*index);
1377 + bman_depletion_fill(&pcfg->public_cfg.mask);
1378 +
1379 + len = resource_size(&pcfg->addr_phys[DPA_PORTAL_CE]);
1380 + if (len != (unsigned long)len)
1381 + goto err;
1382 +
1383 +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
1384 + pcfg->addr_virt[DPA_PORTAL_CE] = ioremap_cache_ns(
1385 + pcfg->addr_phys[DPA_PORTAL_CE].start,
1386 + resource_size(&pcfg->addr_phys[DPA_PORTAL_CE]));
1387 + pcfg->addr_virt[DPA_PORTAL_CI] = ioremap(
1388 + pcfg->addr_phys[DPA_PORTAL_CI].start,
1389 + resource_size(&pcfg->addr_phys[DPA_PORTAL_CI]));
1390 +
1391 +#else
1392 + pcfg->addr_virt[DPA_PORTAL_CE] = ioremap_prot(
1393 + pcfg->addr_phys[DPA_PORTAL_CE].start,
1394 + (unsigned long)len,
1395 + 0);
1396 + pcfg->addr_virt[DPA_PORTAL_CI] = ioremap_prot(
1397 + pcfg->addr_phys[DPA_PORTAL_CI].start,
1398 + resource_size(&pcfg->addr_phys[DPA_PORTAL_CI]),
1399 + _PAGE_GUARDED | _PAGE_NO_CACHE);
1400 +#endif
1401 + /* disable bp depletion */
1402 + __raw_writel(0x0, pcfg->addr_virt[DPA_PORTAL_CI] + BM_REG_SCN(0));
1403 + __raw_writel(0x0, pcfg->addr_virt[DPA_PORTAL_CI] + BM_REG_SCN(1));
1404 + return pcfg;
1405 +err:
1406 + kfree(pcfg);
1407 + return NULL;
1408 +}
1409 +
1410 +static struct bm_portal_config *get_pcfg(struct list_head *list)
1411 +{
1412 + struct bm_portal_config *pcfg;
1413 + if (list_empty(list))
1414 + return NULL;
1415 + pcfg = list_entry(list->prev, struct bm_portal_config, list);
1416 + list_del(&pcfg->list);
1417 + return pcfg;
1418 +}
1419 +
1420 +static struct bm_portal_config *get_pcfg_idx(struct list_head *list,
1421 + uint32_t idx)
1422 +{
1423 + struct bm_portal_config *pcfg;
1424 + if (list_empty(list))
1425 + return NULL;
1426 + list_for_each_entry(pcfg, list, list) {
1427 + if (pcfg->public_cfg.index == idx) {
1428 + list_del(&pcfg->list);
1429 + return pcfg;
1430 + }
1431 + }
1432 + return NULL;
1433 +}
1434 +
1435 +struct bm_portal_config *bm_get_unused_portal(void)
1436 +{
1437 + return bm_get_unused_portal_idx(QBMAN_ANY_PORTAL_IDX);
1438 +}
1439 +
1440 +struct bm_portal_config *bm_get_unused_portal_idx(uint32_t idx)
1441 +{
1442 + struct bm_portal_config *ret;
1443 + spin_lock(&unused_pcfgs_lock);
1444 + if (idx == QBMAN_ANY_PORTAL_IDX)
1445 + ret = get_pcfg(&unused_pcfgs);
1446 + else
1447 + ret = get_pcfg_idx(&unused_pcfgs, idx);
1448 + spin_unlock(&unused_pcfgs_lock);
1449 + return ret;
1450 +}
1451 +
1452 +void bm_put_unused_portal(struct bm_portal_config *pcfg)
1453 +{
1454 + spin_lock(&unused_pcfgs_lock);
1455 + list_add(&pcfg->list, &unused_pcfgs);
1456 + spin_unlock(&unused_pcfgs_lock);
1457 +}
1458 +
1459 +static struct bman_portal *init_pcfg(struct bm_portal_config *pcfg)
1460 +{
1461 + struct bman_portal *p;
1462 + p = bman_create_affine_portal(pcfg);
1463 + if (p) {
1464 +#ifdef CONFIG_FSL_DPA_PIRQ_SLOW
1465 + bman_p_irqsource_add(p, BM_PIRQ_RCRI | BM_PIRQ_BSCN);
1466 +#endif
1467 + pr_info("Bman portal %sinitialised, cpu %d\n",
1468 + pcfg->public_cfg.is_shared ? "(shared) " : "",
1469 + pcfg->public_cfg.cpu);
1470 + affine_bportals[pcfg->public_cfg.cpu] = p;
1471 + } else
1472 + pr_crit("Bman portal failure on cpu %d\n",
1473 + pcfg->public_cfg.cpu);
1474 + return p;
1475 +}
1476 +
1477 +static void init_slave(int cpu)
1478 +{
1479 + struct bman_portal *p;
1480 + p = bman_create_affine_slave(shared_portals[shared_portals_idx++], cpu);
1481 + if (!p)
1482 + pr_err("Bman slave portal failure on cpu %d\n", cpu);
1483 + else
1484 + pr_info("Bman portal %sinitialised, cpu %d\n", "(slave) ", cpu);
1485 + if (shared_portals_idx >= num_shared_portals)
1486 + shared_portals_idx = 0;
1487 + affine_bportals[cpu] = p;
1488 +}
1489 +
1490 +/* Bootarg "bportals=[...]" has the same syntax as "qportals=", and so the
1491 + * parsing is in dpa_sys.h. The syntax is a comma-separated list of indexes
1492 + * and/or ranges of indexes, with each being optionally prefixed by "s" to
1493 + * explicitly mark it or them for sharing.
1494 + * Eg;
1495 + * bportals=s0,1-3,s4
1496 + * means that cpus 1,2,3 get "unshared" portals, cpus 0 and 4 get "shared"
1497 + * portals, and any remaining cpus share the portals that are assigned to cpus 0
1498 + * or 4, selected in a round-robin fashion. (In this example, cpu 5 would share
1499 + * cpu 0's portal, cpu 6 would share cpu4's portal, and cpu 7 would share cpu
1500 + * 0's portal.) */
1501 +static struct cpumask want_unshared __initdata; /* cpus requested without "s" */
1502 +static struct cpumask want_shared __initdata; /* cpus requested with "s" */
1503 +
1504 +static int __init parse_bportals(char *str)
1505 +{
1506 + return parse_portals_bootarg(str, &want_shared, &want_unshared,
1507 + "bportals");
1508 +}
1509 +__setup("bportals=", parse_bportals);
1510 +
1511 +static int bman_offline_cpu(unsigned int cpu)
1512 +{
1513 + struct bman_portal *p;
1514 + const struct bm_portal_config *pcfg;
1515 + p = (struct bman_portal *)affine_bportals[cpu];
1516 + if (p) {
1517 + pcfg = bman_get_bm_portal_config(p);
1518 + if (pcfg)
1519 + irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(0));
1520 + }
1521 + return 0;
1522 +}
1523 +
1524 +#ifdef CONFIG_HOTPLUG_CPU
1525 +static int bman_online_cpu(unsigned int cpu)
1526 +{
1527 + struct bman_portal *p;
1528 + const struct bm_portal_config *pcfg;
1529 + p = (struct bman_portal *)affine_bportals[cpu];
1530 + if (p) {
1531 + pcfg = bman_get_bm_portal_config(p);
1532 + if (pcfg)
1533 + irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(cpu));
1534 + }
1535 + return 0;
1536 +}
1537 +#endif /* CONFIG_HOTPLUG_CPU */
1538 +
1539 +/* Initialise the Bman driver. The meat of this function deals with portals. The
1540 + * following describes the flow of portal-handling, the code "steps" refer to
1541 + * this description;
1542 + * 1. Portal configs are parsed from the device-tree into 'unused_pcfgs', with
1543 + * ::cpu==-1. Regions and interrupts are mapped (but interrupts are not
1544 + * bound).
1545 + * 2. The "want_shared" and "want_unshared" lists (as filled by the
1546 + * "bportals=[...]" bootarg) are processed, allocating portals and assigning
1547 + * them to cpus, placing them in the relevant list and setting ::cpu as
1548 + * appropriate. If no "bportals" bootarg was present, the defaut is to try to
1549 + * assign portals to all online cpus at the time of driver initialisation.
1550 + * Any failure to allocate portals (when parsing the "want" lists or when
1551 + * using default behaviour) will be silently tolerated (the "fixup" logic in
1552 + * step 3 will determine what happens in this case).
1553 + * 3. Do fixups relative to cpu_online_mask(). If no portals are marked for
1554 + * sharing and sharing is required (because not all cpus have been assigned
1555 + * portals), then one portal will marked for sharing. Conversely if no
1556 + * sharing is required, any portals marked for sharing will not be shared. It
1557 + * may be that sharing occurs when it wasn't expected, if portal allocation
1558 + * failed to honour all the requested assignments (including the default
1559 + * assignments if no bootarg is present).
1560 + * 4. Unshared portals are initialised on their respective cpus.
1561 + * 5. Shared portals are initialised on their respective cpus.
1562 + * 6. Each remaining cpu is initialised to slave to one of the shared portals,
1563 + * which are selected in a round-robin fashion.
1564 + * Any portal configs left unused are available for USDPAA allocation.
1565 + */
1566 +__init int bman_init(void)
1567 +{
1568 + struct cpumask slave_cpus;
1569 + struct cpumask unshared_cpus = *cpu_none_mask;
1570 + struct cpumask shared_cpus = *cpu_none_mask;
1571 + LIST_HEAD(unshared_pcfgs);
1572 + LIST_HEAD(shared_pcfgs);
1573 + struct device_node *dn;
1574 + struct bm_portal_config *pcfg;
1575 + struct bman_portal *p;
1576 + int cpu, ret;
1577 + struct cpumask offline_cpus;
1578 +
1579 + /* Initialise the Bman (CCSR) device */
1580 + for_each_compatible_node(dn, NULL, "fsl,bman") {
1581 + if (!bman_init_ccsr(dn))
1582 + pr_info("Bman err interrupt handler present\n");
1583 + else
1584 + pr_err("Bman CCSR setup failed\n");
1585 + }
1586 + /* Initialise any declared buffer pools */
1587 + for_each_compatible_node(dn, NULL, "fsl,bpool") {
1588 + ret = fsl_bpool_init(dn);
1589 + if (ret)
1590 + return ret;
1591 + }
1592 + /* Step 1. See comments at the beginning of the file. */
1593 + for_each_compatible_node(dn, NULL, "fsl,bman-portal") {
1594 + if (!of_device_is_available(dn))
1595 + continue;
1596 + pcfg = parse_pcfg(dn);
1597 + if (pcfg)
1598 + list_add_tail(&pcfg->list, &unused_pcfgs);
1599 + }
1600 + /* Step 2. */
1601 + for_each_possible_cpu(cpu) {
1602 + if (cpumask_test_cpu(cpu, &want_shared)) {
1603 + pcfg = get_pcfg(&unused_pcfgs);
1604 + if (!pcfg)
1605 + break;
1606 + pcfg->public_cfg.cpu = cpu;
1607 + list_add_tail(&pcfg->list, &shared_pcfgs);
1608 + cpumask_set_cpu(cpu, &shared_cpus);
1609 + }
1610 + if (cpumask_test_cpu(cpu, &want_unshared)) {
1611 + if (cpumask_test_cpu(cpu, &shared_cpus))
1612 + continue;
1613 + pcfg = get_pcfg(&unused_pcfgs);
1614 + if (!pcfg)
1615 + break;
1616 + pcfg->public_cfg.cpu = cpu;
1617 + list_add_tail(&pcfg->list, &unshared_pcfgs);
1618 + cpumask_set_cpu(cpu, &unshared_cpus);
1619 + }
1620 + }
1621 + if (list_empty(&shared_pcfgs) && list_empty(&unshared_pcfgs)) {
1622 + /* Default, give an unshared portal to each online cpu */
1623 + for_each_online_cpu(cpu) {
1624 + pcfg = get_pcfg(&unused_pcfgs);
1625 + if (!pcfg)
1626 + break;
1627 + pcfg->public_cfg.cpu = cpu;
1628 + list_add_tail(&pcfg->list, &unshared_pcfgs);
1629 + cpumask_set_cpu(cpu, &unshared_cpus);
1630 + }
1631 + }
1632 + /* Step 3. */
1633 + cpumask_andnot(&slave_cpus, cpu_possible_mask, &shared_cpus);
1634 + cpumask_andnot(&slave_cpus, &slave_cpus, &unshared_cpus);
1635 + if (cpumask_empty(&slave_cpus)) {
1636 + /* No sharing required */
1637 + if (!list_empty(&shared_pcfgs)) {
1638 + /* Migrate "shared" to "unshared" */
1639 + cpumask_or(&unshared_cpus, &unshared_cpus,
1640 + &shared_cpus);
1641 + cpumask_clear(&shared_cpus);
1642 + list_splice_tail(&shared_pcfgs, &unshared_pcfgs);
1643 + INIT_LIST_HEAD(&shared_pcfgs);
1644 + }
1645 + } else {
1646 + /* Sharing required */
1647 + if (list_empty(&shared_pcfgs)) {
1648 + /* Migrate one "unshared" to "shared" */
1649 + pcfg = get_pcfg(&unshared_pcfgs);
1650 + if (!pcfg) {
1651 + pr_crit("No BMan portals available!\n");
1652 + return 0;
1653 + }
1654 + cpumask_clear_cpu(pcfg->public_cfg.cpu, &unshared_cpus);
1655 + cpumask_set_cpu(pcfg->public_cfg.cpu, &shared_cpus);
1656 + list_add_tail(&pcfg->list, &shared_pcfgs);
1657 + }
1658 + }
1659 + /* Step 4. */
1660 + list_for_each_entry(pcfg, &unshared_pcfgs, list) {
1661 + pcfg->public_cfg.is_shared = 0;
1662 + p = init_pcfg(pcfg);
1663 + if (!p) {
1664 + pr_crit("Unable to initialize bman portal\n");
1665 + return 0;
1666 + }
1667 + }
1668 + /* Step 5. */
1669 + list_for_each_entry(pcfg, &shared_pcfgs, list) {
1670 + pcfg->public_cfg.is_shared = 1;
1671 + p = init_pcfg(pcfg);
1672 + if (p)
1673 + shared_portals[num_shared_portals++] = p;
1674 + }
1675 + /* Step 6. */
1676 + if (!cpumask_empty(&slave_cpus))
1677 + for_each_cpu(cpu, &slave_cpus)
1678 + init_slave(cpu);
1679 + pr_info("Bman portals initialised\n");
1680 + cpumask_andnot(&offline_cpus, cpu_possible_mask, cpu_online_mask);
1681 + for_each_cpu(cpu, &offline_cpus)
1682 + bman_offline_cpu(cpu);
1683 +#ifdef CONFIG_HOTPLUG_CPU
1684 + ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
1685 + "soc/qbman_portal:online",
1686 + bman_online_cpu, bman_offline_cpu);
1687 + if (ret < 0) {
1688 + pr_err("bman: failed to register hotplug callbacks.\n");
1689 + return 0;
1690 + }
1691 +#endif
1692 + return 0;
1693 +}
1694 +
1695 +__init int bman_resource_init(void)
1696 +{
1697 + struct device_node *dn;
1698 + int ret;
1699 +
1700 + /* Initialise BPID allocation ranges */
1701 + for_each_compatible_node(dn, NULL, "fsl,bpid-range") {
1702 + ret = fsl_bpid_range_init(dn);
1703 + if (ret)
1704 + return ret;
1705 + }
1706 + return 0;
1707 +}
1708 +
1709 +#ifdef CONFIG_SUSPEND
1710 +void suspend_unused_bportal(void)
1711 +{
1712 + struct bm_portal_config *pcfg;
1713 +
1714 + if (list_empty(&unused_pcfgs))
1715 + return;
1716 +
1717 + list_for_each_entry(pcfg, &unused_pcfgs, list) {
1718 +#ifdef CONFIG_PM_DEBUG
1719 + pr_info("Need to save bportal %d\n", pcfg->public_cfg.index);
1720 +#endif
1721 + /* save isdr, disable all via isdr, clear isr */
1722 + pcfg->saved_isdr =
1723 + __raw_readl(pcfg->addr_virt[DPA_PORTAL_CI] + 0xe08);
1724 + __raw_writel(0xffffffff, pcfg->addr_virt[DPA_PORTAL_CI] +
1725 + 0xe08);
1726 + __raw_writel(0xffffffff, pcfg->addr_virt[DPA_PORTAL_CI] +
1727 + 0xe00);
1728 + }
1729 + return;
1730 +}
1731 +
1732 +void resume_unused_bportal(void)
1733 +{
1734 + struct bm_portal_config *pcfg;
1735 +
1736 + if (list_empty(&unused_pcfgs))
1737 + return;
1738 +
1739 + list_for_each_entry(pcfg, &unused_pcfgs, list) {
1740 +#ifdef CONFIG_PM_DEBUG
1741 + pr_info("Need to resume bportal %d\n", pcfg->public_cfg.index);
1742 +#endif
1743 + /* restore isdr */
1744 + __raw_writel(pcfg->saved_isdr,
1745 + pcfg->addr_virt[DPA_PORTAL_CI] + 0xe08);
1746 + }
1747 + return;
1748 +}
1749 +#endif
1750 --- /dev/null
1751 +++ b/drivers/staging/fsl_qbman/bman_high.c
1752 @@ -0,0 +1,1145 @@
1753 +/* Copyright 2008-2012 Freescale Semiconductor, Inc.
1754 + *
1755 + * Redistribution and use in source and binary forms, with or without
1756 + * modification, are permitted provided that the following conditions are met:
1757 + * * Redistributions of source code must retain the above copyright
1758 + * notice, this list of conditions and the following disclaimer.
1759 + * * Redistributions in binary form must reproduce the above copyright
1760 + * notice, this list of conditions and the following disclaimer in the
1761 + * documentation and/or other materials provided with the distribution.
1762 + * * Neither the name of Freescale Semiconductor nor the
1763 + * names of its contributors may be used to endorse or promote products
1764 + * derived from this software without specific prior written permission.
1765 + *
1766 + *
1767 + * ALTERNATIVELY, this software may be distributed under the terms of the
1768 + * GNU General Public License ("GPL") as published by the Free Software
1769 + * Foundation, either version 2 of that License or (at your option) any
1770 + * later version.
1771 + *
1772 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
1773 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1774 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1775 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
1776 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1777 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1778 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1779 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1780 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1781 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1782 + */
1783 +
1784 +#include "bman_low.h"
1785 +
1786 +/* Compilation constants */
1787 +#define RCR_THRESH 2 /* reread h/w CI when running out of space */
1788 +#define IRQNAME "BMan portal %d"
1789 +#define MAX_IRQNAME 16 /* big enough for "BMan portal %d" */
1790 +
1791 +struct bman_portal {
1792 + struct bm_portal p;
1793 + /* 2-element array. pools[0] is mask, pools[1] is snapshot. */
1794 + struct bman_depletion *pools;
1795 + int thresh_set;
1796 + unsigned long irq_sources;
1797 + u32 slowpoll; /* only used when interrupts are off */
1798 +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
1799 + struct bman_pool *rcri_owned; /* only 1 release WAIT_SYNC at a time */
1800 +#endif
1801 +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
1802 + raw_spinlock_t sharing_lock; /* only used if is_shared */
1803 + int is_shared;
1804 + struct bman_portal *sharing_redirect;
1805 +#endif
1806 + /* When the cpu-affine portal is activated, this is non-NULL */
1807 + const struct bm_portal_config *config;
1808 + /* This is needed for power management */
1809 + struct platform_device *pdev;
1810 + /* 64-entry hash-table of pool objects that are tracking depletion
1811 + * entry/exit (ie. BMAN_POOL_FLAG_DEPLETION). This isn't fast-path, so
1812 + * we're not fussy about cache-misses and so forth - whereas the above
1813 + * members should all fit in one cacheline.
1814 + * BTW, with 64 entries in the hash table and 64 buffer pools to track,
1815 + * you'll never guess the hash-function ... */
1816 + struct bman_pool *cb[64];
1817 + char irqname[MAX_IRQNAME];
1818 + /* Track if the portal was alloced by the driver */
1819 + u8 alloced;
1820 + /* power management data */
1821 + u32 save_isdr;
1822 +};
1823 +
1824 +/* For an explanation of the locking, redirection, or affine-portal logic,
1825 + * please consult the Qman driver for details. This is the same, only simpler
1826 + * (no fiddly Qman-specific bits.) */
1827 +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
1828 +#define PORTAL_IRQ_LOCK(p, irqflags) \
1829 + do { \
1830 + if ((p)->is_shared) \
1831 + raw_spin_lock_irqsave(&(p)->sharing_lock, irqflags); \
1832 + else \
1833 + local_irq_save(irqflags); \
1834 + } while (0)
1835 +#define PORTAL_IRQ_UNLOCK(p, irqflags) \
1836 + do { \
1837 + if ((p)->is_shared) \
1838 + raw_spin_unlock_irqrestore(&(p)->sharing_lock, \
1839 + irqflags); \
1840 + else \
1841 + local_irq_restore(irqflags); \
1842 + } while (0)
1843 +#else
1844 +#define PORTAL_IRQ_LOCK(p, irqflags) local_irq_save(irqflags)
1845 +#define PORTAL_IRQ_UNLOCK(p, irqflags) local_irq_restore(irqflags)
1846 +#endif
1847 +
1848 +static cpumask_t affine_mask;
1849 +static DEFINE_SPINLOCK(affine_mask_lock);
1850 +static DEFINE_PER_CPU(struct bman_portal, bman_affine_portal);
1851 +static inline struct bman_portal *get_raw_affine_portal(void)
1852 +{
1853 + return &get_cpu_var(bman_affine_portal);
1854 +}
1855 +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
1856 +static inline struct bman_portal *get_affine_portal(void)
1857 +{
1858 + struct bman_portal *p = get_raw_affine_portal();
1859 + if (p->sharing_redirect)
1860 + return p->sharing_redirect;
1861 + return p;
1862 +}
1863 +#else
1864 +#define get_affine_portal() get_raw_affine_portal()
1865 +#endif
1866 +static inline void put_affine_portal(void)
1867 +{
1868 + put_cpu_var(bman_affine_portal);
1869 +}
1870 +static inline struct bman_portal *get_poll_portal(void)
1871 +{
1872 + return &get_cpu_var(bman_affine_portal);
1873 +}
1874 +#define put_poll_portal()
1875 +
1876 +/* GOTCHA: this object type refers to a pool, it isn't *the* pool. There may be
1877 + * more than one such object per Bman buffer pool, eg. if different users of the
1878 + * pool are operating via different portals. */
1879 +struct bman_pool {
1880 + struct bman_pool_params params;
1881 + /* Used for hash-table admin when using depletion notifications. */
1882 + struct bman_portal *portal;
1883 + struct bman_pool *next;
1884 + /* stockpile state - NULL unless BMAN_POOL_FLAG_STOCKPILE is set */
1885 + struct bm_buffer *sp;
1886 + unsigned int sp_fill;
1887 +#ifdef CONFIG_FSL_DPA_CHECKING
1888 + atomic_t in_use;
1889 +#endif
1890 +};
1891 +
1892 +/* (De)Registration of depletion notification callbacks */
1893 +static void depletion_link(struct bman_portal *portal, struct bman_pool *pool)
1894 +{
1895 + __maybe_unused unsigned long irqflags;
1896 + pool->portal = portal;
1897 + PORTAL_IRQ_LOCK(portal, irqflags);
1898 + pool->next = portal->cb[pool->params.bpid];
1899 + portal->cb[pool->params.bpid] = pool;
1900 + if (!pool->next)
1901 + /* First object for that bpid on this portal, enable the BSCN
1902 + * mask bit. */
1903 + bm_isr_bscn_mask(&portal->p, pool->params.bpid, 1);
1904 + PORTAL_IRQ_UNLOCK(portal, irqflags);
1905 +}
1906 +static void depletion_unlink(struct bman_pool *pool)
1907 +{
1908 + struct bman_pool *it, *last = NULL;
1909 + struct bman_pool **base = &pool->portal->cb[pool->params.bpid];
1910 + __maybe_unused unsigned long irqflags;
1911 + PORTAL_IRQ_LOCK(pool->portal, irqflags);
1912 + it = *base; /* <-- gotcha, don't do this prior to the irq_save */
1913 + while (it != pool) {
1914 + last = it;
1915 + it = it->next;
1916 + }
1917 + if (!last)
1918 + *base = pool->next;
1919 + else
1920 + last->next = pool->next;
1921 + if (!last && !pool->next) {
1922 + /* Last object for that bpid on this portal, disable the BSCN
1923 + * mask bit. */
1924 + bm_isr_bscn_mask(&pool->portal->p, pool->params.bpid, 0);
1925 + /* And "forget" that we last saw this pool as depleted */
1926 + bman_depletion_unset(&pool->portal->pools[1],
1927 + pool->params.bpid);
1928 + }
1929 + PORTAL_IRQ_UNLOCK(pool->portal, irqflags);
1930 +}
1931 +
1932 +/* In the case that the application's core loop calls qman_poll() and
1933 + * bman_poll(), we ought to balance how often we incur the overheads of the
1934 + * slow-path poll. We'll use two decrementer sources. The idle decrementer
1935 + * constant is used when the last slow-poll detected no work to do, and the busy
1936 + * decrementer constant when the last slow-poll had work to do. */
1937 +#define SLOW_POLL_IDLE 1000
1938 +#define SLOW_POLL_BUSY 10
1939 +static u32 __poll_portal_slow(struct bman_portal *p, u32 is);
1940 +
1941 +/* Portal interrupt handler */
1942 +static irqreturn_t portal_isr(__always_unused int irq, void *ptr)
1943 +{
1944 + struct bman_portal *p = ptr;
1945 + u32 clear = p->irq_sources;
1946 + u32 is = bm_isr_status_read(&p->p) & p->irq_sources;
1947 + clear |= __poll_portal_slow(p, is);
1948 + bm_isr_status_clear(&p->p, clear);
1949 + return IRQ_HANDLED;
1950 +}
1951 +
1952 +#ifdef CONFIG_SUSPEND
1953 +static int _bman_portal_suspend_noirq(struct device *dev)
1954 +{
1955 + struct bman_portal *p = (struct bman_portal *)dev->platform_data;
1956 +#ifdef CONFIG_PM_DEBUG
1957 + struct platform_device *pdev = to_platform_device(dev);
1958 +#endif
1959 + p->save_isdr = bm_isr_disable_read(&p->p);
1960 + bm_isr_disable_write(&p->p, 0xffffffff);
1961 + bm_isr_status_clear(&p->p, 0xffffffff);
1962 +#ifdef CONFIG_PM_DEBUG
1963 + pr_info("Suspend for %s\n", pdev->name);
1964 +#endif
1965 + return 0;
1966 +}
1967 +
1968 +static int _bman_portal_resume_noirq(struct device *dev)
1969 +{
1970 + struct bman_portal *p = (struct bman_portal *)dev->platform_data;
1971 +
1972 + /* restore isdr */
1973 + bm_isr_disable_write(&p->p, p->save_isdr);
1974 + return 0;
1975 +}
1976 +#else
1977 +#define _bman_portal_suspend_noirq NULL
1978 +#define _bman_portal_resume_noirq NULL
1979 +#endif
1980 +
1981 +struct dev_pm_domain bman_portal_device_pm_domain = {
1982 + .ops = {
1983 + USE_PLATFORM_PM_SLEEP_OPS
1984 + .suspend_noirq = _bman_portal_suspend_noirq,
1985 + .resume_noirq = _bman_portal_resume_noirq,
1986 + }
1987 +};
1988 +
1989 +struct bman_portal *bman_create_portal(
1990 + struct bman_portal *portal,
1991 + const struct bm_portal_config *config)
1992 +{
1993 + struct bm_portal *__p;
1994 + const struct bman_depletion *pools = &config->public_cfg.mask;
1995 + int ret;
1996 + u8 bpid = 0;
1997 + char buf[16];
1998 +
1999 + if (!portal) {
2000 + portal = kmalloc(sizeof(*portal), GFP_KERNEL);
2001 + if (!portal)
2002 + return portal;
2003 + portal->alloced = 1;
2004 + } else
2005 + portal->alloced = 0;
2006 +
2007 + __p = &portal->p;
2008 +
2009 + /* prep the low-level portal struct with the mapped addresses from the
2010 + * config, everything that follows depends on it and "config" is more
2011 + * for (de)reference... */
2012 + __p->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE];
2013 + __p->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI];
2014 + if (bm_rcr_init(__p, bm_rcr_pvb, bm_rcr_cce)) {
2015 + pr_err("Bman RCR initialisation failed\n");
2016 + goto fail_rcr;
2017 + }
2018 + if (bm_mc_init(__p)) {
2019 + pr_err("Bman MC initialisation failed\n");
2020 + goto fail_mc;
2021 + }
2022 + if (bm_isr_init(__p)) {
2023 + pr_err("Bman ISR initialisation failed\n");
2024 + goto fail_isr;
2025 + }
2026 + portal->pools = kmalloc(2 * sizeof(*pools), GFP_KERNEL);
2027 + if (!portal->pools)
2028 + goto fail_pools;
2029 + portal->pools[0] = *pools;
2030 + bman_depletion_init(portal->pools + 1);
2031 + while (bpid < bman_pool_max) {
2032 + /* Default to all BPIDs disabled, we enable as required at
2033 + * run-time. */
2034 + bm_isr_bscn_mask(__p, bpid, 0);
2035 + bpid++;
2036 + }
2037 + portal->slowpoll = 0;
2038 +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
2039 + portal->rcri_owned = NULL;
2040 +#endif
2041 +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
2042 + raw_spin_lock_init(&portal->sharing_lock);
2043 + portal->is_shared = config->public_cfg.is_shared;
2044 + portal->sharing_redirect = NULL;
2045 +#endif
2046 + sprintf(buf, "bportal-%u", config->public_cfg.index);
2047 + portal->pdev = platform_device_alloc(buf, -1);
2048 + if (!portal->pdev)
2049 + goto fail_devalloc;
2050 + portal->pdev->dev.pm_domain = &bman_portal_device_pm_domain;
2051 + portal->pdev->dev.platform_data = portal;
2052 + ret = platform_device_add(portal->pdev);
2053 + if (ret)
2054 + goto fail_devadd;
2055 + memset(&portal->cb, 0, sizeof(portal->cb));
2056 + /* Write-to-clear any stale interrupt status bits */
2057 + bm_isr_disable_write(__p, 0xffffffff);
2058 + portal->irq_sources = 0;
2059 + bm_isr_enable_write(__p, portal->irq_sources);
2060 + bm_isr_status_clear(__p, 0xffffffff);
2061 + snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, config->public_cfg.cpu);
2062 + if (request_irq(config->public_cfg.irq, portal_isr, 0, portal->irqname,
2063 + portal)) {
2064 + pr_err("request_irq() failed\n");
2065 + goto fail_irq;
2066 + }
2067 + if ((config->public_cfg.cpu != -1) &&
2068 + irq_can_set_affinity(config->public_cfg.irq) &&
2069 + irq_set_affinity(config->public_cfg.irq,
2070 + cpumask_of(config->public_cfg.cpu))) {
2071 + pr_err("irq_set_affinity() failed %s\n", portal->irqname);
2072 + goto fail_affinity;
2073 + }
2074 +
2075 + /* Need RCR to be empty before continuing */
2076 + ret = bm_rcr_get_fill(__p);
2077 + if (ret) {
2078 + pr_err("Bman RCR unclean\n");
2079 + goto fail_rcr_empty;
2080 + }
2081 + /* Success */
2082 + portal->config = config;
2083 +
2084 + bm_isr_disable_write(__p, 0);
2085 + bm_isr_uninhibit(__p);
2086 + return portal;
2087 +fail_rcr_empty:
2088 +fail_affinity:
2089 + free_irq(config->public_cfg.irq, portal);
2090 +fail_irq:
2091 + platform_device_del(portal->pdev);
2092 +fail_devadd:
2093 + platform_device_put(portal->pdev);
2094 +fail_devalloc:
2095 + kfree(portal->pools);
2096 +fail_pools:
2097 + bm_isr_finish(__p);
2098 +fail_isr:
2099 + bm_mc_finish(__p);
2100 +fail_mc:
2101 + bm_rcr_finish(__p);
2102 +fail_rcr:
2103 + if (portal->alloced)
2104 + kfree(portal);
2105 + return NULL;
2106 +}
2107 +
2108 +struct bman_portal *bman_create_affine_portal(
2109 + const struct bm_portal_config *config)
2110 +{
2111 + struct bman_portal *portal;
2112 +
2113 + portal = &per_cpu(bman_affine_portal, config->public_cfg.cpu);
2114 + portal = bman_create_portal(portal, config);
2115 + if (portal) {
2116 + spin_lock(&affine_mask_lock);
2117 + cpumask_set_cpu(config->public_cfg.cpu, &affine_mask);
2118 + spin_unlock(&affine_mask_lock);
2119 + }
2120 + return portal;
2121 +}
2122 +
2123 +
2124 +struct bman_portal *bman_create_affine_slave(struct bman_portal *redirect,
2125 + int cpu)
2126 +{
2127 +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
2128 + struct bman_portal *p;
2129 + p = &per_cpu(bman_affine_portal, cpu);
2130 + BUG_ON(p->config);
2131 + BUG_ON(p->is_shared);
2132 + BUG_ON(!redirect->config->public_cfg.is_shared);
2133 + p->irq_sources = 0;
2134 + p->sharing_redirect = redirect;
2135 + return p;
2136 +#else
2137 + BUG();
2138 + return NULL;
2139 +#endif
2140 +}
2141 +
2142 +void bman_destroy_portal(struct bman_portal *bm)
2143 +{
2144 + const struct bm_portal_config *pcfg;
2145 + pcfg = bm->config;
2146 + bm_rcr_cce_update(&bm->p);
2147 + bm_rcr_cce_update(&bm->p);
2148 +
2149 + free_irq(pcfg->public_cfg.irq, bm);
2150 +
2151 + kfree(bm->pools);
2152 + bm_isr_finish(&bm->p);
2153 + bm_mc_finish(&bm->p);
2154 + bm_rcr_finish(&bm->p);
2155 + bm->config = NULL;
2156 + if (bm->alloced)
2157 + kfree(bm);
2158 +}
2159 +
2160 +const struct bm_portal_config *bman_destroy_affine_portal(void)
2161 +{
2162 + struct bman_portal *bm = get_raw_affine_portal();
2163 + const struct bm_portal_config *pcfg;
2164 +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
2165 + if (bm->sharing_redirect) {
2166 + bm->sharing_redirect = NULL;
2167 + put_affine_portal();
2168 + return NULL;
2169 + }
2170 + bm->is_shared = 0;
2171 +#endif
2172 + pcfg = bm->config;
2173 + bman_destroy_portal(bm);
2174 + spin_lock(&affine_mask_lock);
2175 + cpumask_clear_cpu(pcfg->public_cfg.cpu, &affine_mask);
2176 + spin_unlock(&affine_mask_lock);
2177 + put_affine_portal();
2178 + return pcfg;
2179 +}
2180 +
2181 +/* When release logic waits on available RCR space, we need a global waitqueue
2182 + * in the case of "affine" use (as the waits wake on different cpus which means
2183 + * different portals - so we can't wait on any per-portal waitqueue). */
2184 +static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
2185 +
2186 +static u32 __poll_portal_slow(struct bman_portal *p, u32 is)
2187 +{
2188 + struct bman_depletion tmp;
2189 + u32 ret = is;
2190 +
2191 + /* There is a gotcha to be aware of. If we do the query before clearing
2192 + * the status register, we may miss state changes that occur between the
2193 + * two. If we write to clear the status register before the query, the
2194 + * cache-enabled query command may overtake the status register write
2195 + * unless we use a heavyweight sync (which we don't want). Instead, we
2196 + * write-to-clear the status register then *read it back* before doing
2197 + * the query, hence the odd while loop with the 'is' accumulation. */
2198 + if (is & BM_PIRQ_BSCN) {
2199 + struct bm_mc_result *mcr;
2200 + __maybe_unused unsigned long irqflags;
2201 + unsigned int i, j;
2202 + u32 __is;
2203 + bm_isr_status_clear(&p->p, BM_PIRQ_BSCN);
2204 + while ((__is = bm_isr_status_read(&p->p)) & BM_PIRQ_BSCN) {
2205 + is |= __is;
2206 + bm_isr_status_clear(&p->p, BM_PIRQ_BSCN);
2207 + }
2208 + is &= ~BM_PIRQ_BSCN;
2209 + PORTAL_IRQ_LOCK(p, irqflags);
2210 + bm_mc_start(&p->p);
2211 + bm_mc_commit(&p->p, BM_MCC_VERB_CMD_QUERY);
2212 + while (!(mcr = bm_mc_result(&p->p)))
2213 + cpu_relax();
2214 + tmp = mcr->query.ds.state;
2215 + tmp.__state[0] = be32_to_cpu(tmp.__state[0]);
2216 + tmp.__state[1] = be32_to_cpu(tmp.__state[1]);
2217 + PORTAL_IRQ_UNLOCK(p, irqflags);
2218 + for (i = 0; i < 2; i++) {
2219 + int idx = i * 32;
2220 + /* tmp is a mask of currently-depleted pools.
2221 + * pools[0] is mask of those we care about.
2222 + * pools[1] is our previous view (we only want to
2223 + * be told about changes). */
2224 + tmp.__state[i] &= p->pools[0].__state[i];
2225 + if (tmp.__state[i] == p->pools[1].__state[i])
2226 + /* fast-path, nothing to see, move along */
2227 + continue;
2228 + for (j = 0; j <= 31; j++, idx++) {
2229 + struct bman_pool *pool = p->cb[idx];
2230 + int b4 = bman_depletion_get(&p->pools[1], idx);
2231 + int af = bman_depletion_get(&tmp, idx);
2232 + if (b4 == af)
2233 + continue;
2234 + while (pool) {
2235 + pool->params.cb(p, pool,
2236 + pool->params.cb_ctx, af);
2237 + pool = pool->next;
2238 + }
2239 + }
2240 + }
2241 + p->pools[1] = tmp;
2242 + }
2243 +
2244 + if (is & BM_PIRQ_RCRI) {
2245 + __maybe_unused unsigned long irqflags;
2246 + PORTAL_IRQ_LOCK(p, irqflags);
2247 + bm_rcr_cce_update(&p->p);
2248 +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
2249 + /* If waiting for sync, we only cancel the interrupt threshold
2250 + * when the ring utilisation hits zero. */
2251 + if (p->rcri_owned) {
2252 + if (!bm_rcr_get_fill(&p->p)) {
2253 + p->rcri_owned = NULL;
2254 + bm_rcr_set_ithresh(&p->p, 0);
2255 + }
2256 + } else
2257 +#endif
2258 + bm_rcr_set_ithresh(&p->p, 0);
2259 + PORTAL_IRQ_UNLOCK(p, irqflags);
2260 + wake_up(&affine_queue);
2261 + bm_isr_status_clear(&p->p, BM_PIRQ_RCRI);
2262 + is &= ~BM_PIRQ_RCRI;
2263 + }
2264 +
2265 + /* There should be no status register bits left undefined */
2266 + DPA_ASSERT(!is);
2267 + return ret;
2268 +}
2269 +
2270 +const struct bman_portal_config *bman_get_portal_config(void)
2271 +{
2272 + struct bman_portal *p = get_affine_portal();
2273 + const struct bman_portal_config *ret = &p->config->public_cfg;
2274 + put_affine_portal();
2275 + return ret;
2276 +}
2277 +EXPORT_SYMBOL(bman_get_portal_config);
2278 +
2279 +u32 bman_irqsource_get(void)
2280 +{
2281 + struct bman_portal *p = get_raw_affine_portal();
2282 + u32 ret = p->irq_sources & BM_PIRQ_VISIBLE;
2283 + put_affine_portal();
2284 + return ret;
2285 +}
2286 +EXPORT_SYMBOL(bman_irqsource_get);
2287 +
2288 +int bman_p_irqsource_add(struct bman_portal *p, __maybe_unused u32 bits)
2289 +{
2290 +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
2291 + if (p->sharing_redirect)
2292 + return -EINVAL;
2293 + else
2294 +#endif
2295 + {
2296 + __maybe_unused unsigned long irqflags;
2297 + PORTAL_IRQ_LOCK(p, irqflags);
2298 + set_bits(bits & BM_PIRQ_VISIBLE, &p->irq_sources);
2299 + bm_isr_enable_write(&p->p, p->irq_sources);
2300 + PORTAL_IRQ_UNLOCK(p, irqflags);
2301 + }
2302 + return 0;
2303 +}
2304 +EXPORT_SYMBOL(bman_p_irqsource_add);
2305 +
2306 +int bman_irqsource_add(__maybe_unused u32 bits)
2307 +{
2308 + struct bman_portal *p = get_raw_affine_portal();
2309 + int ret = 0;
2310 + ret = bman_p_irqsource_add(p, bits);
2311 + put_affine_portal();
2312 + return ret;
2313 +}
2314 +EXPORT_SYMBOL(bman_irqsource_add);
2315 +
2316 +int bman_irqsource_remove(u32 bits)
2317 +{
2318 + struct bman_portal *p = get_raw_affine_portal();
2319 + __maybe_unused unsigned long irqflags;
2320 + u32 ier;
2321 +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
2322 + if (p->sharing_redirect) {
2323 + put_affine_portal();
2324 + return -EINVAL;
2325 + }
2326 +#endif
2327 + /* Our interrupt handler only processes+clears status register bits that
2328 + * are in p->irq_sources. As we're trimming that mask, if one of them
2329 + * were to assert in the status register just before we remove it from
2330 + * the enable register, there would be an interrupt-storm when we
2331 + * release the IRQ lock. So we wait for the enable register update to
2332 + * take effect in h/w (by reading it back) and then clear all other bits
2333 + * in the status register. Ie. we clear them from ISR once it's certain
2334 + * IER won't allow them to reassert. */
2335 + PORTAL_IRQ_LOCK(p, irqflags);
2336 + bits &= BM_PIRQ_VISIBLE;
2337 + clear_bits(bits, &p->irq_sources);
2338 + bm_isr_enable_write(&p->p, p->irq_sources);
2339 + ier = bm_isr_enable_read(&p->p);
2340 + /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
2341 + * data-dependency, ie. to protect against re-ordering. */
2342 + bm_isr_status_clear(&p->p, ~ier);
2343 + PORTAL_IRQ_UNLOCK(p, irqflags);
2344 + put_affine_portal();
2345 + return 0;
2346 +}
2347 +EXPORT_SYMBOL(bman_irqsource_remove);
2348 +
2349 +const cpumask_t *bman_affine_cpus(void)
2350 +{
2351 + return &affine_mask;
2352 +}
2353 +EXPORT_SYMBOL(bman_affine_cpus);
2354 +
2355 +u32 bman_poll_slow(void)
2356 +{
2357 + struct bman_portal *p = get_poll_portal();
2358 + u32 ret;
2359 +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
2360 + if (unlikely(p->sharing_redirect))
2361 + ret = (u32)-1;
2362 + else
2363 +#endif
2364 + {
2365 + u32 is = bm_isr_status_read(&p->p) & ~p->irq_sources;
2366 + ret = __poll_portal_slow(p, is);
2367 + bm_isr_status_clear(&p->p, ret);
2368 + }
2369 + put_poll_portal();
2370 + return ret;
2371 +}
2372 +EXPORT_SYMBOL(bman_poll_slow);
2373 +
2374 +/* Legacy wrapper */
2375 +void bman_poll(void)
2376 +{
2377 + struct bman_portal *p = get_poll_portal();
2378 +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
2379 + if (unlikely(p->sharing_redirect))
2380 + goto done;
2381 +#endif
2382 + if (!(p->slowpoll--)) {
2383 + u32 is = bm_isr_status_read(&p->p) & ~p->irq_sources;
2384 + u32 active = __poll_portal_slow(p, is);
2385 + if (active)
2386 + p->slowpoll = SLOW_POLL_BUSY;
2387 + else
2388 + p->slowpoll = SLOW_POLL_IDLE;
2389 + }
2390 +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
2391 +done:
2392 +#endif
2393 + put_poll_portal();
2394 +}
2395 +EXPORT_SYMBOL(bman_poll);
2396 +
2397 +static const u32 zero_thresholds[4] = {0, 0, 0, 0};
2398 +
2399 +struct bman_pool *bman_new_pool(const struct bman_pool_params *params)
2400 +{
2401 + struct bman_pool *pool = NULL;
2402 + u32 bpid;
2403 +
2404 + if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID) {
2405 + int ret = bman_alloc_bpid(&bpid);
2406 + if (ret)
2407 + return NULL;
2408 + } else {
2409 + if (params->bpid >= bman_pool_max)
2410 + return NULL;
2411 + bpid = params->bpid;
2412 + }
2413 +#ifdef CONFIG_FSL_BMAN_CONFIG
2414 + if (params->flags & BMAN_POOL_FLAG_THRESH) {
2415 + int ret = bm_pool_set(bpid, params->thresholds);
2416 + if (ret)
2417 + goto err;
2418 + }
2419 +#else
2420 + if (params->flags & BMAN_POOL_FLAG_THRESH)
2421 + goto err;
2422 +#endif
2423 + pool = kmalloc(sizeof(*pool), GFP_KERNEL);
2424 + if (!pool)
2425 + goto err;
2426 + pool->sp = NULL;
2427 + pool->sp_fill = 0;
2428 + pool->params = *params;
2429 +#ifdef CONFIG_FSL_DPA_CHECKING
2430 + atomic_set(&pool->in_use, 1);
2431 +#endif
2432 + if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
2433 + pool->params.bpid = bpid;
2434 + if (params->flags & BMAN_POOL_FLAG_STOCKPILE) {
2435 + pool->sp = kmalloc(sizeof(struct bm_buffer) * BMAN_STOCKPILE_SZ,
2436 + GFP_KERNEL);
2437 + if (!pool->sp)
2438 + goto err;
2439 + }
2440 + if (pool->params.flags & BMAN_POOL_FLAG_DEPLETION) {
2441 + struct bman_portal *p = get_affine_portal();
2442 + if (!p->pools || !bman_depletion_get(&p->pools[0], bpid)) {
2443 + pr_err("Depletion events disabled for bpid %d\n", bpid);
2444 + goto err;
2445 + }
2446 + depletion_link(p, pool);
2447 + put_affine_portal();
2448 + }
2449 + return pool;
2450 +err:
2451 +#ifdef CONFIG_FSL_BMAN_CONFIG
2452 + if (params->flags & BMAN_POOL_FLAG_THRESH)
2453 + bm_pool_set(bpid, zero_thresholds);
2454 +#endif
2455 + if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
2456 + bman_release_bpid(bpid);
2457 + if (pool) {
2458 + kfree(pool->sp);
2459 + kfree(pool);
2460 + }
2461 + return NULL;
2462 +}
2463 +EXPORT_SYMBOL(bman_new_pool);
2464 +
2465 +void bman_free_pool(struct bman_pool *pool)
2466 +{
2467 +#ifdef CONFIG_FSL_BMAN_CONFIG
2468 + if (pool->params.flags & BMAN_POOL_FLAG_THRESH)
2469 + bm_pool_set(pool->params.bpid, zero_thresholds);
2470 +#endif
2471 + if (pool->params.flags & BMAN_POOL_FLAG_DEPLETION)
2472 + depletion_unlink(pool);
2473 + if (pool->params.flags & BMAN_POOL_FLAG_STOCKPILE) {
2474 + if (pool->sp_fill)
2475 + pr_err("Stockpile not flushed, has %u in bpid %u.\n",
2476 + pool->sp_fill, pool->params.bpid);
2477 + kfree(pool->sp);
2478 + pool->sp = NULL;
2479 + pool->params.flags ^= BMAN_POOL_FLAG_STOCKPILE;
2480 + }
2481 + if (pool->params.flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
2482 + bman_release_bpid(pool->params.bpid);
2483 + kfree(pool);
2484 +}
2485 +EXPORT_SYMBOL(bman_free_pool);
2486 +
2487 +const struct bman_pool_params *bman_get_params(const struct bman_pool *pool)
2488 +{
2489 + return &pool->params;
2490 +}
2491 +EXPORT_SYMBOL(bman_get_params);
2492 +
2493 +static noinline void update_rcr_ci(struct bman_portal *p, u8 avail)
2494 +{
2495 + if (avail)
2496 + bm_rcr_cce_prefetch(&p->p);
2497 + else
2498 + bm_rcr_cce_update(&p->p);
2499 +}
2500 +
2501 +int bman_rcr_is_empty(void)
2502 +{
2503 + __maybe_unused unsigned long irqflags;
2504 + struct bman_portal *p = get_affine_portal();
2505 + u8 avail;
2506 +
2507 + PORTAL_IRQ_LOCK(p, irqflags);
2508 + update_rcr_ci(p, 0);
2509 + avail = bm_rcr_get_fill(&p->p);
2510 + PORTAL_IRQ_UNLOCK(p, irqflags);
2511 + put_affine_portal();
2512 + return avail == 0;
2513 +}
2514 +EXPORT_SYMBOL(bman_rcr_is_empty);
2515 +
2516 +static inline struct bm_rcr_entry *try_rel_start(struct bman_portal **p,
2517 +#ifdef CONFIG_FSL_DPA_CAN_WAIT
2518 + __maybe_unused struct bman_pool *pool,
2519 +#endif
2520 + __maybe_unused unsigned long *irqflags,
2521 + __maybe_unused u32 flags)
2522 +{
2523 + struct bm_rcr_entry *r;
2524 + u8 avail;
2525 +
2526 + *p = get_affine_portal();
2527 + PORTAL_IRQ_LOCK(*p, (*irqflags));
2528 +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
2529 + if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) &&
2530 + (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) {
2531 + if ((*p)->rcri_owned) {
2532 + PORTAL_IRQ_UNLOCK(*p, (*irqflags));
2533 + put_affine_portal();
2534 + return NULL;
2535 + }
2536 + (*p)->rcri_owned = pool;
2537 + }
2538 +#endif
2539 + avail = bm_rcr_get_avail(&(*p)->p);
2540 + if (avail < 2)
2541 + update_rcr_ci(*p, avail);
2542 + r = bm_rcr_start(&(*p)->p);
2543 + if (unlikely(!r)) {
2544 +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
2545 + if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) &&
2546 + (flags & BMAN_RELEASE_FLAG_WAIT_SYNC)))
2547 + (*p)->rcri_owned = NULL;
2548 +#endif
2549 + PORTAL_IRQ_UNLOCK(*p, (*irqflags));
2550 + put_affine_portal();
2551 + }
2552 + return r;
2553 +}
2554 +
2555 +#ifdef CONFIG_FSL_DPA_CAN_WAIT
2556 +static noinline struct bm_rcr_entry *__wait_rel_start(struct bman_portal **p,
2557 + struct bman_pool *pool,
2558 + __maybe_unused unsigned long *irqflags,
2559 + u32 flags)
2560 +{
2561 + struct bm_rcr_entry *rcr = try_rel_start(p, pool, irqflags, flags);
2562 + if (!rcr)
2563 + bm_rcr_set_ithresh(&(*p)->p, 1);
2564 + return rcr;
2565 +}
2566 +
2567 +static noinline struct bm_rcr_entry *wait_rel_start(struct bman_portal **p,
2568 + struct bman_pool *pool,
2569 + __maybe_unused unsigned long *irqflags,
2570 + u32 flags)
2571 +{
2572 + struct bm_rcr_entry *rcr;
2573 +#ifndef CONFIG_FSL_DPA_CAN_WAIT_SYNC
2574 + pool = NULL;
2575 +#endif
2576 + if (flags & BMAN_RELEASE_FLAG_WAIT_INT)
2577 + /* NB: return NULL if signal occurs before completion. Signal
2578 + * can occur during return. Caller must check for signal */
2579 + wait_event_interruptible(affine_queue,
2580 + (rcr = __wait_rel_start(p, pool, irqflags, flags)));
2581 + else
2582 + wait_event(affine_queue,
2583 + (rcr = __wait_rel_start(p, pool, irqflags, flags)));
2584 + return rcr;
2585 +}
2586 +#endif
2587 +
2588 +static inline int __bman_release(struct bman_pool *pool,
2589 + const struct bm_buffer *bufs, u8 num, u32 flags)
2590 +{
2591 + struct bman_portal *p;
2592 + struct bm_rcr_entry *r;
2593 + __maybe_unused unsigned long irqflags;
2594 + u32 i = num - 1;
2595 +
2596 +#ifdef CONFIG_FSL_DPA_CAN_WAIT
2597 + if (flags & BMAN_RELEASE_FLAG_WAIT)
2598 + r = wait_rel_start(&p, pool, &irqflags, flags);
2599 + else
2600 + r = try_rel_start(&p, pool, &irqflags, flags);
2601 +#else
2602 + r = try_rel_start(&p, &irqflags, flags);
2603 +#endif
2604 + if (!r)
2605 + return -EBUSY;
2606 + /* We can copy all but the first entry, as this can trigger badness
2607 + * with the valid-bit. Use the overlay to mask the verb byte. */
2608 + r->bufs[0].opaque =
2609 + ((cpu_to_be64((bufs[0].opaque |
2610 + ((u64)pool->params.bpid<<48))
2611 + & 0x00ffffffffffffff)));
2612 + if (i) {
2613 + for (i = 1; i < num; i++)
2614 + r->bufs[i].opaque =
2615 + cpu_to_be64(bufs[i].opaque);
2616 + }
2617 +
2618 + bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE |
2619 + (num & BM_RCR_VERB_BUFCOUNT_MASK));
2620 +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
2621 + /* if we wish to sync we need to set the threshold after h/w sees the
2622 + * new ring entry. As we're mixing cache-enabled and cache-inhibited
2623 + * accesses, this requires a heavy-weight sync. */
2624 + if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) &&
2625 + (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) {
2626 + hwsync();
2627 + bm_rcr_set_ithresh(&p->p, 1);
2628 + }
2629 +#endif
2630 + PORTAL_IRQ_UNLOCK(p, irqflags);
2631 + put_affine_portal();
2632 +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
2633 + if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) &&
2634 + (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) {
2635 + if (flags & BMAN_RELEASE_FLAG_WAIT_INT)
2636 + /* NB: return success even if signal occurs before
2637 + * condition is true. pvb_commit guarantees success */
2638 + wait_event_interruptible(affine_queue,
2639 + (p->rcri_owned != pool));
2640 + else
2641 + wait_event(affine_queue, (p->rcri_owned != pool));
2642 + }
2643 +#endif
2644 + return 0;
2645 +}
2646 +
2647 +int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num,
2648 + u32 flags)
2649 +{
2650 + int ret;
2651 +#ifdef CONFIG_FSL_DPA_CHECKING
2652 + if (!num || (num > 8))
2653 + return -EINVAL;
2654 + if (pool->params.flags & BMAN_POOL_FLAG_NO_RELEASE)
2655 + return -EINVAL;
2656 +#endif
2657 + /* Without stockpile, this API is a pass-through to the h/w operation */
2658 + if (!(pool->params.flags & BMAN_POOL_FLAG_STOCKPILE))
2659 + return __bman_release(pool, bufs, num, flags);
2660 +#ifdef CONFIG_FSL_DPA_CHECKING
2661 + if (!atomic_dec_and_test(&pool->in_use)) {
2662 + pr_crit("Parallel attempts to enter bman_released() detected.");
2663 + panic("only one instance of bman_released/acquired allowed");
2664 + }
2665 +#endif
2666 + /* Two movements of buffers are possible, and can occur in either order.
2667 + * A: moving buffers from the caller to the stockpile.
2668 + * B: moving buffers from the stockpile to hardware.
2669 + * Order 1: if there is already enough space in the stockpile for A
2670 + * then we want to do A first, and only do B if we trigger the
2671 + * stockpile-high threshold.
2672 + * Order 2: if there is not enough space in the stockpile for A, then
2673 + * we want to do B first, then do A if B had succeeded. However in this
2674 + * case B is dependent on how many buffers the user needs to release,
2675 + * not the stockpile-high threshold.
2676 + * Due to the different handling of B between the two cases, putting A
2677 + * and B in a while() loop would require quite obscure logic, so handle
2678 + * the different sequences explicitly. */
2679 + if ((pool->sp_fill + num) <= BMAN_STOCKPILE_SZ) {
2680 + /* Order 1: do A */
2681 + copy_words(pool->sp + pool->sp_fill, bufs,
2682 + sizeof(struct bm_buffer) * num);
2683 + pool->sp_fill += num;
2684 + /* do B relative to STOCKPILE_HIGH */
2685 + while (pool->sp_fill >= BMAN_STOCKPILE_HIGH) {
2686 + ret = __bman_release(pool,
2687 + pool->sp + (pool->sp_fill - 8), 8,
2688 + flags);
2689 + if (ret >= 0)
2690 + pool->sp_fill -= 8;
2691 + }
2692 + } else {
2693 + /* Order 2: do B relative to 'num' */
2694 + do {
2695 + ret = __bman_release(pool,
2696 + pool->sp + (pool->sp_fill - 8), 8,
2697 + flags);
2698 + if (ret < 0)
2699 + /* failure */
2700 + goto release_done;
2701 + pool->sp_fill -= 8;
2702 + } while ((pool->sp_fill + num) > BMAN_STOCKPILE_SZ);
2703 + /* do A */
2704 + copy_words(pool->sp + pool->sp_fill, bufs,
2705 + sizeof(struct bm_buffer) * num);
2706 + pool->sp_fill += num;
2707 + }
2708 + /* success */
2709 + ret = 0;
2710 +release_done:
2711 +#ifdef CONFIG_FSL_DPA_CHECKING
2712 + atomic_inc(&pool->in_use);
2713 +#endif
2714 + return ret;
2715 +}
2716 +EXPORT_SYMBOL(bman_release);
2717 +
2718 +static inline int __bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs,
2719 + u8 num)
2720 +{
2721 + struct bman_portal *p = get_affine_portal();
2722 + struct bm_mc_command *mcc;
2723 + struct bm_mc_result *mcr;
2724 + __maybe_unused unsigned long irqflags;
2725 + int ret, i;
2726 +
2727 + PORTAL_IRQ_LOCK(p, irqflags);
2728 + mcc = bm_mc_start(&p->p);
2729 + mcc->acquire.bpid = pool->params.bpid;
2730 + bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE |
2731 + (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT));
2732 + while (!(mcr = bm_mc_result(&p->p)))
2733 + cpu_relax();
2734 + ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT;
2735 + if (bufs) {
2736 + for (i = 0; i < num; i++)
2737 + bufs[i].opaque =
2738 + be64_to_cpu(mcr->acquire.bufs[i].opaque);
2739 + }
2740 + PORTAL_IRQ_UNLOCK(p, irqflags);
2741 + put_affine_portal();
2742 + if (ret != num)
2743 + ret = -ENOMEM;
2744 + return ret;
2745 +}
2746 +
2747 +int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num,
2748 + u32 flags)
2749 +{
2750 + int ret;
2751 +#ifdef CONFIG_FSL_DPA_CHECKING
2752 + if (!num || (num > 8))
2753 + return -EINVAL;
2754 + if (pool->params.flags & BMAN_POOL_FLAG_ONLY_RELEASE)
2755 + return -EINVAL;
2756 +#endif
2757 + /* Without stockpile, this API is a pass-through to the h/w operation */
2758 + if (!(pool->params.flags & BMAN_POOL_FLAG_STOCKPILE))
2759 + return __bman_acquire(pool, bufs, num);
2760 +#ifdef CONFIG_FSL_DPA_CHECKING
2761 + if (!atomic_dec_and_test(&pool->in_use)) {
2762 + pr_crit("Parallel attempts to enter bman_acquire() detected.");
2763 + panic("only one instance of bman_released/acquired allowed");
2764 + }
2765 +#endif
2766 + /* Two movements of buffers are possible, and can occur in either order.
2767 + * A: moving buffers from stockpile to the caller.
2768 + * B: moving buffers from hardware to the stockpile.
2769 + * Order 1: if there are already enough buffers in the stockpile for A
2770 + * then we want to do A first, and only do B if we trigger the
2771 + * stockpile-low threshold.
2772 + * Order 2: if there are not enough buffers in the stockpile for A,
2773 + * then we want to do B first, then do A if B had succeeded. However in
2774 + * this case B is dependent on how many buffers the user needs, not the
2775 + * stockpile-low threshold.
2776 + * Due to the different handling of B between the two cases, putting A
2777 + * and B in a while() loop would require quite obscure logic, so handle
2778 + * the different sequences explicitly. */
2779 + if (num <= pool->sp_fill) {
2780 + /* Order 1: do A */
2781 + copy_words(bufs, pool->sp + (pool->sp_fill - num),
2782 + sizeof(struct bm_buffer) * num);
2783 + pool->sp_fill -= num;
2784 + /* do B relative to STOCKPILE_LOW */
2785 + while (pool->sp_fill <= BMAN_STOCKPILE_LOW) {
2786 + ret = __bman_acquire(pool, pool->sp + pool->sp_fill, 8);
2787 + if (ret < 0)
2788 + ret = __bman_acquire(pool,
2789 + pool->sp + pool->sp_fill, 1);
2790 + if (ret < 0)
2791 + break;
2792 + pool->sp_fill += ret;
2793 + }
2794 + } else {
2795 + /* Order 2: do B relative to 'num' */
2796 + do {
2797 + ret = __bman_acquire(pool, pool->sp + pool->sp_fill, 8);
2798 + if (ret < 0)
2799 + ret = __bman_acquire(pool,
2800 + pool->sp + pool->sp_fill, 1);
2801 + if (ret < 0)
2802 + /* failure */
2803 + goto acquire_done;
2804 + pool->sp_fill += ret;
2805 + } while (pool->sp_fill < num);
2806 + /* do A */
2807 + copy_words(bufs, pool->sp + (pool->sp_fill - num),
2808 + sizeof(struct bm_buffer) * num);
2809 + pool->sp_fill -= num;
2810 + }
2811 + /* success */
2812 + ret = num;
2813 +acquire_done:
2814 +#ifdef CONFIG_FSL_DPA_CHECKING
2815 + atomic_inc(&pool->in_use);
2816 +#endif
2817 + return ret;
2818 +}
2819 +EXPORT_SYMBOL(bman_acquire);
2820 +
2821 +int bman_flush_stockpile(struct bman_pool *pool, u32 flags)
2822 +{
2823 + u8 num;
2824 + int ret;
2825 +
2826 + while (pool->sp_fill) {
2827 + num = ((pool->sp_fill > 8) ? 8 : pool->sp_fill);
2828 + ret = __bman_release(pool, pool->sp + (pool->sp_fill - num),
2829 + num, flags);
2830 + if (ret)
2831 + return ret;
2832 + pool->sp_fill -= num;
2833 + }
2834 + return 0;
2835 +}
2836 +EXPORT_SYMBOL(bman_flush_stockpile);
2837 +
2838 +int bman_query_pools(struct bm_pool_state *state)
2839 +{
2840 + struct bman_portal *p = get_affine_portal();
2841 + struct bm_mc_result *mcr;
2842 + __maybe_unused unsigned long irqflags;
2843 +
2844 + PORTAL_IRQ_LOCK(p, irqflags);
2845 + bm_mc_start(&p->p);
2846 + bm_mc_commit(&p->p, BM_MCC_VERB_CMD_QUERY);
2847 + while (!(mcr = bm_mc_result(&p->p)))
2848 + cpu_relax();
2849 + DPA_ASSERT((mcr->verb & BM_MCR_VERB_CMD_MASK) == BM_MCR_VERB_CMD_QUERY);
2850 + *state = mcr->query;
2851 + state->as.state.__state[0] = be32_to_cpu(state->as.state.__state[0]);
2852 + state->as.state.__state[1] = be32_to_cpu(state->as.state.__state[1]);
2853 + state->ds.state.__state[0] = be32_to_cpu(state->ds.state.__state[0]);
2854 + state->ds.state.__state[1] = be32_to_cpu(state->ds.state.__state[1]);
2855 + PORTAL_IRQ_UNLOCK(p, irqflags);
2856 + put_affine_portal();
2857 + return 0;
2858 +}
2859 +EXPORT_SYMBOL(bman_query_pools);
2860 +
2861 +#ifdef CONFIG_FSL_BMAN_CONFIG
2862 +u32 bman_query_free_buffers(struct bman_pool *pool)
2863 +{
2864 + return bm_pool_free_buffers(pool->params.bpid);
2865 +}
2866 +EXPORT_SYMBOL(bman_query_free_buffers);
2867 +
2868 +int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds)
2869 +{
2870 + u32 bpid;
2871 +
2872 + bpid = bman_get_params(pool)->bpid;
2873 +
2874 + return bm_pool_set(bpid, thresholds);
2875 +}
2876 +EXPORT_SYMBOL(bman_update_pool_thresholds);
2877 +#endif
2878 +
2879 +int bman_shutdown_pool(u32 bpid)
2880 +{
2881 + struct bman_portal *p = get_affine_portal();
2882 + __maybe_unused unsigned long irqflags;
2883 + int ret;
2884 +
2885 + PORTAL_IRQ_LOCK(p, irqflags);
2886 + ret = bm_shutdown_pool(&p->p, bpid);
2887 + PORTAL_IRQ_UNLOCK(p, irqflags);
2888 + put_affine_portal();
2889 + return ret;
2890 +}
2891 +EXPORT_SYMBOL(bman_shutdown_pool);
2892 +
2893 +const struct bm_portal_config *bman_get_bm_portal_config(
2894 + struct bman_portal *portal)
2895 +{
2896 + return portal->sharing_redirect ? NULL : portal->config;
2897 +}
2898 --- /dev/null
2899 +++ b/drivers/staging/fsl_qbman/bman_low.h
2900 @@ -0,0 +1,565 @@
2901 +/* Copyright 2008-2011 Freescale Semiconductor, Inc.
2902 + *
2903 + * Redistribution and use in source and binary forms, with or without
2904 + * modification, are permitted provided that the following conditions are met:
2905 + * * Redistributions of source code must retain the above copyright
2906 + * notice, this list of conditions and the following disclaimer.
2907 + * * Redistributions in binary form must reproduce the above copyright
2908 + * notice, this list of conditions and the following disclaimer in the
2909 + * documentation and/or other materials provided with the distribution.
2910 + * * Neither the name of Freescale Semiconductor nor the
2911 + * names of its contributors may be used to endorse or promote products
2912 + * derived from this software without specific prior written permission.
2913 + *
2914 + *
2915 + * ALTERNATIVELY, this software may be distributed under the terms of the
2916 + * GNU General Public License ("GPL") as published by the Free Software
2917 + * Foundation, either version 2 of that License or (at your option) any
2918 + * later version.
2919 + *
2920 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
2921 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
2922 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
2923 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
2924 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
2925 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
2926 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
2927 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2928 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
2929 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2930 + */
2931 +
2932 +#include "bman_private.h"
2933 +
2934 +/***************************/
2935 +/* Portal register assists */
2936 +/***************************/
2937 +
2938 +#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
2939 +
2940 +/* Cache-inhibited register offsets */
2941 +#define BM_REG_RCR_PI_CINH 0x0000
2942 +#define BM_REG_RCR_CI_CINH 0x0004
2943 +#define BM_REG_RCR_ITR 0x0008
2944 +#define BM_REG_CFG 0x0100
2945 +#define BM_REG_SCN(n) (0x0200 + ((n) << 2))
2946 +#define BM_REG_ISR 0x0e00
2947 +#define BM_REG_IIR 0x0e0c
2948 +
2949 +/* Cache-enabled register offsets */
2950 +#define BM_CL_CR 0x0000
2951 +#define BM_CL_RR0 0x0100
2952 +#define BM_CL_RR1 0x0140
2953 +#define BM_CL_RCR 0x1000
2954 +#define BM_CL_RCR_PI_CENA 0x3000
2955 +#define BM_CL_RCR_CI_CENA 0x3100
2956 +
2957 +#endif
2958 +
2959 +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
2960 +
2961 +/* Cache-inhibited register offsets */
2962 +#define BM_REG_RCR_PI_CINH 0x3000
2963 +#define BM_REG_RCR_CI_CINH 0x3100
2964 +#define BM_REG_RCR_ITR 0x3200
2965 +#define BM_REG_CFG 0x3300
2966 +#define BM_REG_SCN(n) (0x3400 + ((n) << 6))
2967 +#define BM_REG_ISR 0x3e00
2968 +#define BM_REG_IIR 0x3ec0
2969 +
2970 +/* Cache-enabled register offsets */
2971 +#define BM_CL_CR 0x0000
2972 +#define BM_CL_RR0 0x0100
2973 +#define BM_CL_RR1 0x0140
2974 +#define BM_CL_RCR 0x1000
2975 +#define BM_CL_RCR_PI_CENA 0x3000
2976 +#define BM_CL_RCR_CI_CENA 0x3100
2977 +
2978 +#endif
2979 +
2980 +/* BTW, the drivers (and h/w programming model) already obtain the required
2981 + * synchronisation for portal accesses via lwsync(), hwsync(), and
2982 + * data-dependencies. Use of barrier()s or other order-preserving primitives
2983 + * simply degrade performance. Hence the use of the __raw_*() interfaces, which
2984 + * simply ensure that the compiler treats the portal registers as volatile (ie.
2985 + * non-coherent). */
2986 +
2987 +/* Cache-inhibited register access. */
2988 +#define __bm_in(bm, o) be32_to_cpu(__raw_readl((bm)->addr_ci + (o)))
2989 +#define __bm_out(bm, o, val) __raw_writel(cpu_to_be32(val), \
2990 + (bm)->addr_ci + (o));
2991 +#define bm_in(reg) __bm_in(&portal->addr, BM_REG_##reg)
2992 +#define bm_out(reg, val) __bm_out(&portal->addr, BM_REG_##reg, val)
2993 +
2994 +/* Cache-enabled (index) register access */
2995 +#define __bm_cl_touch_ro(bm, o) dcbt_ro((bm)->addr_ce + (o))
2996 +#define __bm_cl_touch_rw(bm, o) dcbt_rw((bm)->addr_ce + (o))
2997 +#define __bm_cl_in(bm, o) be32_to_cpu(__raw_readl((bm)->addr_ce + (o)))
2998 +#define __bm_cl_out(bm, o, val) \
2999 + do { \
3000 + u32 *__tmpclout = (bm)->addr_ce + (o); \
3001 + __raw_writel(cpu_to_be32(val), __tmpclout); \
3002 + dcbf(__tmpclout); \
3003 + } while (0)
3004 +#define __bm_cl_invalidate(bm, o) dcbi((bm)->addr_ce + (o))
3005 +#define bm_cl_touch_ro(reg) __bm_cl_touch_ro(&portal->addr, BM_CL_##reg##_CENA)
3006 +#define bm_cl_touch_rw(reg) __bm_cl_touch_rw(&portal->addr, BM_CL_##reg##_CENA)
3007 +#define bm_cl_in(reg) __bm_cl_in(&portal->addr, BM_CL_##reg##_CENA)
3008 +#define bm_cl_out(reg, val) __bm_cl_out(&portal->addr, BM_CL_##reg##_CENA, val)
3009 +#define bm_cl_invalidate(reg)\
3010 + __bm_cl_invalidate(&portal->addr, BM_CL_##reg##_CENA)
3011 +
3012 +/* Cyclic helper for rings. FIXME: once we are able to do fine-grain perf
3013 + * analysis, look at using the "extra" bit in the ring index registers to avoid
3014 + * cyclic issues. */
3015 +static inline u8 bm_cyc_diff(u8 ringsize, u8 first, u8 last)
3016 +{
3017 + /* 'first' is included, 'last' is excluded */
3018 + if (first <= last)
3019 + return last - first;
3020 + return ringsize + last - first;
3021 +}
3022 +
3023 +/* Portal modes.
3024 + * Enum types;
3025 + * pmode == production mode
3026 + * cmode == consumption mode,
3027 + * Enum values use 3 letter codes. First letter matches the portal mode,
3028 + * remaining two letters indicate;
3029 + * ci == cache-inhibited portal register
3030 + * ce == cache-enabled portal register
3031 + * vb == in-band valid-bit (cache-enabled)
3032 + */
3033 +enum bm_rcr_pmode { /* matches BCSP_CFG::RPM */
3034 + bm_rcr_pci = 0, /* PI index, cache-inhibited */
3035 + bm_rcr_pce = 1, /* PI index, cache-enabled */
3036 + bm_rcr_pvb = 2 /* valid-bit */
3037 +};
3038 +enum bm_rcr_cmode { /* s/w-only */
3039 + bm_rcr_cci, /* CI index, cache-inhibited */
3040 + bm_rcr_cce /* CI index, cache-enabled */
3041 +};
3042 +
3043 +
3044 +/* ------------------------- */
3045 +/* --- Portal structures --- */
3046 +
3047 +#define BM_RCR_SIZE 8
3048 +
3049 +struct bm_rcr {
3050 + struct bm_rcr_entry *ring, *cursor;
3051 + u8 ci, available, ithresh, vbit;
3052 +#ifdef CONFIG_FSL_DPA_CHECKING
3053 + u32 busy;
3054 + enum bm_rcr_pmode pmode;
3055 + enum bm_rcr_cmode cmode;
3056 +#endif
3057 +};
3058 +
3059 +struct bm_mc {
3060 + struct bm_mc_command *cr;
3061 + struct bm_mc_result *rr;
3062 + u8 rridx, vbit;
3063 +#ifdef CONFIG_FSL_DPA_CHECKING
3064 + enum {
3065 + /* Can only be _mc_start()ed */
3066 + mc_idle,
3067 + /* Can only be _mc_commit()ed or _mc_abort()ed */
3068 + mc_user,
3069 + /* Can only be _mc_retry()ed */
3070 + mc_hw
3071 + } state;
3072 +#endif
3073 +};
3074 +
3075 +struct bm_addr {
3076 + void __iomem *addr_ce; /* cache-enabled */
3077 + void __iomem *addr_ci; /* cache-inhibited */
3078 +};
3079 +
3080 +struct bm_portal {
3081 + struct bm_addr addr;
3082 + struct bm_rcr rcr;
3083 + struct bm_mc mc;
3084 + struct bm_portal_config config;
3085 +} ____cacheline_aligned;
3086 +
3087 +
3088 +/* --------------- */
3089 +/* --- RCR API --- */
3090 +
3091 +/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
3092 +#define RCR_CARRYCLEAR(p) \
3093 + (void *)((unsigned long)(p) & (~(unsigned long)(BM_RCR_SIZE << 6)))
3094 +
3095 +/* Bit-wise logic to convert a ring pointer to a ring index */
3096 +static inline u8 RCR_PTR2IDX(struct bm_rcr_entry *e)
3097 +{
3098 + return ((uintptr_t)e >> 6) & (BM_RCR_SIZE - 1);
3099 +}
3100 +
3101 +/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
3102 +static inline void RCR_INC(struct bm_rcr *rcr)
3103 +{
3104 + /* NB: this is odd-looking, but experiments show that it generates
3105 + * fast code with essentially no branching overheads. We increment to
3106 + * the next RCR pointer and handle overflow and 'vbit'. */
3107 + struct bm_rcr_entry *partial = rcr->cursor + 1;
3108 + rcr->cursor = RCR_CARRYCLEAR(partial);
3109 + if (partial != rcr->cursor)
3110 + rcr->vbit ^= BM_RCR_VERB_VBIT;
3111 +}
3112 +
3113 +static inline int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode,
3114 + __maybe_unused enum bm_rcr_cmode cmode)
3115 +{
3116 + /* This use of 'register', as well as all other occurrences, is because
3117 + * it has been observed to generate much faster code with gcc than is
3118 + * otherwise the case. */
3119 + register struct bm_rcr *rcr = &portal->rcr;
3120 + u32 cfg;
3121 + u8 pi;
3122 +
3123 + rcr->ring = portal->addr.addr_ce + BM_CL_RCR;
3124 + rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
3125 +
3126 + pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1);
3127 + rcr->cursor = rcr->ring + pi;
3128 + rcr->vbit = (bm_in(RCR_PI_CINH) & BM_RCR_SIZE) ? BM_RCR_VERB_VBIT : 0;
3129 + rcr->available = BM_RCR_SIZE - 1
3130 + - bm_cyc_diff(BM_RCR_SIZE, rcr->ci, pi);
3131 + rcr->ithresh = bm_in(RCR_ITR);
3132 +#ifdef CONFIG_FSL_DPA_CHECKING
3133 + rcr->busy = 0;
3134 + rcr->pmode = pmode;
3135 + rcr->cmode = cmode;
3136 +#endif
3137 + cfg = (bm_in(CFG) & 0xffffffe0) | (pmode & 0x3); /* BCSP_CFG::RPM */
3138 + bm_out(CFG, cfg);
3139 + return 0;
3140 +}
3141 +
3142 +static inline void bm_rcr_finish(struct bm_portal *portal)
3143 +{
3144 + register struct bm_rcr *rcr = &portal->rcr;
3145 + u8 pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1);
3146 + u8 ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
3147 + DPA_ASSERT(!rcr->busy);
3148 + if (pi != RCR_PTR2IDX(rcr->cursor))
3149 + pr_crit("losing uncommited RCR entries\n");
3150 + if (ci != rcr->ci)
3151 + pr_crit("missing existing RCR completions\n");
3152 + if (rcr->ci != RCR_PTR2IDX(rcr->cursor))
3153 + pr_crit("RCR destroyed unquiesced\n");
3154 +}
3155 +
3156 +static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal)
3157 +{
3158 + register struct bm_rcr *rcr = &portal->rcr;
3159 + DPA_ASSERT(!rcr->busy);
3160 + if (!rcr->available)
3161 + return NULL;
3162 +#ifdef CONFIG_FSL_DPA_CHECKING
3163 + rcr->busy = 1;
3164 +#endif
3165 +#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
3166 + dcbz_64(rcr->cursor);
3167 +#endif
3168 + return rcr->cursor;
3169 +}
3170 +
3171 +static inline void bm_rcr_abort(struct bm_portal *portal)
3172 +{
3173 + __maybe_unused register struct bm_rcr *rcr = &portal->rcr;
3174 + DPA_ASSERT(rcr->busy);
3175 +#ifdef CONFIG_FSL_DPA_CHECKING
3176 + rcr->busy = 0;
3177 +#endif
3178 +}
3179 +
3180 +static inline struct bm_rcr_entry *bm_rcr_pend_and_next(
3181 + struct bm_portal *portal, u8 myverb)
3182 +{
3183 + register struct bm_rcr *rcr = &portal->rcr;
3184 + DPA_ASSERT(rcr->busy);
3185 + DPA_ASSERT(rcr->pmode != bm_rcr_pvb);
3186 + if (rcr->available == 1)
3187 + return NULL;
3188 + rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
3189 + dcbf_64(rcr->cursor);
3190 + RCR_INC(rcr);
3191 + rcr->available--;
3192 +#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
3193 + dcbz_64(rcr->cursor);
3194 +#endif
3195 + return rcr->cursor;
3196 +}
3197 +
3198 +static inline void bm_rcr_pci_commit(struct bm_portal *portal, u8 myverb)
3199 +{
3200 + register struct bm_rcr *rcr = &portal->rcr;
3201 + DPA_ASSERT(rcr->busy);
3202 + DPA_ASSERT(rcr->pmode == bm_rcr_pci);
3203 + rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
3204 + RCR_INC(rcr);
3205 + rcr->available--;
3206 + hwsync();
3207 + bm_out(RCR_PI_CINH, RCR_PTR2IDX(rcr->cursor));
3208 +#ifdef CONFIG_FSL_DPA_CHECKING
3209 + rcr->busy = 0;
3210 +#endif
3211 +}
3212 +
3213 +static inline void bm_rcr_pce_prefetch(struct bm_portal *portal)
3214 +{
3215 + __maybe_unused register struct bm_rcr *rcr = &portal->rcr;
3216 + DPA_ASSERT(rcr->pmode == bm_rcr_pce);
3217 + bm_cl_invalidate(RCR_PI);
3218 + bm_cl_touch_rw(RCR_PI);
3219 +}
3220 +
3221 +static inline void bm_rcr_pce_commit(struct bm_portal *portal, u8 myverb)
3222 +{
3223 + register struct bm_rcr *rcr = &portal->rcr;
3224 + DPA_ASSERT(rcr->busy);
3225 + DPA_ASSERT(rcr->pmode == bm_rcr_pce);
3226 + rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
3227 + RCR_INC(rcr);
3228 + rcr->available--;
3229 + lwsync();
3230 + bm_cl_out(RCR_PI, RCR_PTR2IDX(rcr->cursor));
3231 +#ifdef CONFIG_FSL_DPA_CHECKING
3232 + rcr->busy = 0;
3233 +#endif
3234 +}
3235 +
3236 +static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb)
3237 +{
3238 + register struct bm_rcr *rcr = &portal->rcr;
3239 + struct bm_rcr_entry *rcursor;
3240 + DPA_ASSERT(rcr->busy);
3241 + DPA_ASSERT(rcr->pmode == bm_rcr_pvb);
3242 + lwsync();
3243 + rcursor = rcr->cursor;
3244 + rcursor->__dont_write_directly__verb = myverb | rcr->vbit;
3245 + dcbf_64(rcursor);
3246 + RCR_INC(rcr);
3247 + rcr->available--;
3248 +#ifdef CONFIG_FSL_DPA_CHECKING
3249 + rcr->busy = 0;
3250 +#endif
3251 +}
3252 +
3253 +static inline u8 bm_rcr_cci_update(struct bm_portal *portal)
3254 +{
3255 + register struct bm_rcr *rcr = &portal->rcr;
3256 + u8 diff, old_ci = rcr->ci;
3257 + DPA_ASSERT(rcr->cmode == bm_rcr_cci);
3258 + rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
3259 + diff = bm_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
3260 + rcr->available += diff;
3261 + return diff;
3262 +}
3263 +
3264 +static inline void bm_rcr_cce_prefetch(struct bm_portal *portal)
3265 +{
3266 + __maybe_unused register struct bm_rcr *rcr = &portal->rcr;
3267 + DPA_ASSERT(rcr->cmode == bm_rcr_cce);
3268 + bm_cl_touch_ro(RCR_CI);
3269 +}
3270 +
3271 +static inline u8 bm_rcr_cce_update(struct bm_portal *portal)
3272 +{
3273 + register struct bm_rcr *rcr = &portal->rcr;
3274 + u8 diff, old_ci = rcr->ci;
3275 + DPA_ASSERT(rcr->cmode == bm_rcr_cce);
3276 + rcr->ci = bm_cl_in(RCR_CI) & (BM_RCR_SIZE - 1);
3277 + bm_cl_invalidate(RCR_CI);
3278 + diff = bm_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
3279 + rcr->available += diff;
3280 + return diff;
3281 +}
3282 +
3283 +static inline u8 bm_rcr_get_ithresh(struct bm_portal *portal)
3284 +{
3285 + register struct bm_rcr *rcr = &portal->rcr;
3286 + return rcr->ithresh;
3287 +}
3288 +
3289 +static inline void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh)
3290 +{
3291 + register struct bm_rcr *rcr = &portal->rcr;
3292 + rcr->ithresh = ithresh;
3293 + bm_out(RCR_ITR, ithresh);
3294 +}
3295 +
3296 +static inline u8 bm_rcr_get_avail(struct bm_portal *portal)
3297 +{
3298 + register struct bm_rcr *rcr = &portal->rcr;
3299 + return rcr->available;
3300 +}
3301 +
3302 +static inline u8 bm_rcr_get_fill(struct bm_portal *portal)
3303 +{
3304 + register struct bm_rcr *rcr = &portal->rcr;
3305 + return BM_RCR_SIZE - 1 - rcr->available;
3306 +}
3307 +
3308 +
3309 +/* ------------------------------ */
3310 +/* --- Management command API --- */
3311 +
3312 +static inline int bm_mc_init(struct bm_portal *portal)
3313 +{
3314 + register struct bm_mc *mc = &portal->mc;
3315 + mc->cr = portal->addr.addr_ce + BM_CL_CR;
3316 + mc->rr = portal->addr.addr_ce + BM_CL_RR0;
3317 + mc->rridx = (__raw_readb(&mc->cr->__dont_write_directly__verb) &
3318 + BM_MCC_VERB_VBIT) ? 0 : 1;
3319 + mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0;
3320 +#ifdef CONFIG_FSL_DPA_CHECKING
3321 + mc->state = mc_idle;
3322 +#endif
3323 + return 0;
3324 +}
3325 +
3326 +static inline void bm_mc_finish(struct bm_portal *portal)
3327 +{
3328 + __maybe_unused register struct bm_mc *mc = &portal->mc;
3329 + DPA_ASSERT(mc->state == mc_idle);
3330 +#ifdef CONFIG_FSL_DPA_CHECKING
3331 + if (mc->state != mc_idle)
3332 + pr_crit("Losing incomplete MC command\n");
3333 +#endif
3334 +}
3335 +
3336 +static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal)
3337 +{
3338 + register struct bm_mc *mc = &portal->mc;
3339 + DPA_ASSERT(mc->state == mc_idle);
3340 +#ifdef CONFIG_FSL_DPA_CHECKING
3341 + mc->state = mc_user;
3342 +#endif
3343 +#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
3344 + dcbz_64(mc->cr);
3345 +#endif
3346 + return mc->cr;
3347 +}
3348 +
3349 +static inline void bm_mc_abort(struct bm_portal *portal)
3350 +{
3351 + __maybe_unused register struct bm_mc *mc = &portal->mc;
3352 + DPA_ASSERT(mc->state == mc_user);
3353 +#ifdef CONFIG_FSL_DPA_CHECKING
3354 + mc->state = mc_idle;
3355 +#endif
3356 +}
3357 +
3358 +static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb)
3359 +{
3360 + register struct bm_mc *mc = &portal->mc;
3361 + struct bm_mc_result *rr = mc->rr + mc->rridx;
3362 + DPA_ASSERT(mc->state == mc_user);
3363 + lwsync();
3364 + mc->cr->__dont_write_directly__verb = myverb | mc->vbit;
3365 + dcbf(mc->cr);
3366 + dcbit_ro(rr);
3367 +#ifdef CONFIG_FSL_DPA_CHECKING
3368 + mc->state = mc_hw;
3369 +#endif
3370 +}
3371 +
3372 +static inline struct bm_mc_result *bm_mc_result(struct bm_portal *portal)
3373 +{
3374 + register struct bm_mc *mc = &portal->mc;
3375 + struct bm_mc_result *rr = mc->rr + mc->rridx;
3376 + DPA_ASSERT(mc->state == mc_hw);
3377 + /* The inactive response register's verb byte always returns zero until
3378 + * its command is submitted and completed. This includes the valid-bit,
3379 + * in case you were wondering... */
3380 + if (!__raw_readb(&rr->verb)) {
3381 + dcbit_ro(rr);
3382 + return NULL;
3383 + }
3384 + mc->rridx ^= 1;
3385 + mc->vbit ^= BM_MCC_VERB_VBIT;
3386 +#ifdef CONFIG_FSL_DPA_CHECKING
3387 + mc->state = mc_idle;
3388 +#endif
3389 + return rr;
3390 +}
3391 +
3392 +
3393 +/* ------------------------------------- */
3394 +/* --- Portal interrupt register API --- */
3395 +
3396 +static inline int bm_isr_init(__always_unused struct bm_portal *portal)
3397 +{
3398 + return 0;
3399 +}
3400 +
3401 +static inline void bm_isr_finish(__always_unused struct bm_portal *portal)
3402 +{
3403 +}
3404 +
3405 +#define SCN_REG(bpid) BM_REG_SCN((bpid) / 32)
3406 +#define SCN_BIT(bpid) (0x80000000 >> (bpid & 31))
3407 +static inline void bm_isr_bscn_mask(struct bm_portal *portal, u8 bpid,
3408 + int enable)
3409 +{
3410 + u32 val;
3411 + DPA_ASSERT(bpid < bman_pool_max);
3412 + /* REG_SCN for bpid=0..31, REG_SCN+4 for bpid=32..63 */
3413 + val = __bm_in(&portal->addr, SCN_REG(bpid));
3414 + if (enable)
3415 + val |= SCN_BIT(bpid);
3416 + else
3417 + val &= ~SCN_BIT(bpid);
3418 + __bm_out(&portal->addr, SCN_REG(bpid), val);
3419 +}
3420 +
3421 +static inline u32 __bm_isr_read(struct bm_portal *portal, enum bm_isr_reg n)
3422 +{
3423 +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
3424 + return __bm_in(&portal->addr, BM_REG_ISR + (n << 6));
3425 +#else
3426 + return __bm_in(&portal->addr, BM_REG_ISR + (n << 2));
3427 +#endif
3428 +}
3429 +
3430 +static inline void __bm_isr_write(struct bm_portal *portal, enum bm_isr_reg n,
3431 + u32 val)
3432 +{
3433 +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
3434 + __bm_out(&portal->addr, BM_REG_ISR + (n << 6), val);
3435 +#else
3436 + __bm_out(&portal->addr, BM_REG_ISR + (n << 2), val);
3437 +#endif
3438 +}
3439 +
3440 +/* Buffer Pool Cleanup */
3441 +static inline int bm_shutdown_pool(struct bm_portal *p, u32 bpid)
3442 +{
3443 + struct bm_mc_command *bm_cmd;
3444 + struct bm_mc_result *bm_res;
3445 +
3446 + int aq_count = 0;
3447 + bool stop = false;
3448 + while (!stop) {
3449 + /* Acquire buffers until empty */
3450 + bm_cmd = bm_mc_start(p);
3451 + bm_cmd->acquire.bpid = bpid;
3452 + bm_mc_commit(p, BM_MCC_VERB_CMD_ACQUIRE | 1);
3453 + while (!(bm_res = bm_mc_result(p)))
3454 + cpu_relax();
3455 + if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) {
3456 + /* Pool is empty */
3457 + /* TBD : Should we do a few extra iterations in
3458 + case some other some blocks keep buffers 'on deck',
3459 + which may also be problematic */
3460 + stop = true;
3461 + } else
3462 + ++aq_count;
3463 + }
3464 + return 0;
3465 +}
3466 --- /dev/null
3467 +++ b/drivers/staging/fsl_qbman/bman_private.h
3468 @@ -0,0 +1,166 @@
3469 +/* Copyright 2008-2012 Freescale Semiconductor, Inc.
3470 + *
3471 + * Redistribution and use in source and binary forms, with or without
3472 + * modification, are permitted provided that the following conditions are met:
3473 + * * Redistributions of source code must retain the above copyright
3474 + * notice, this list of conditions and the following disclaimer.
3475 + * * Redistributions in binary form must reproduce the above copyright
3476 + * notice, this list of conditions and the following disclaimer in the
3477 + * documentation and/or other materials provided with the distribution.
3478 + * * Neither the name of Freescale Semiconductor nor the
3479 + * names of its contributors may be used to endorse or promote products
3480 + * derived from this software without specific prior written permission.
3481 + *
3482 + *
3483 + * ALTERNATIVELY, this software may be distributed under the terms of the
3484 + * GNU General Public License ("GPL") as published by the Free Software
3485 + * Foundation, either version 2 of that License or (at your option) any
3486 + * later version.
3487 + *
3488 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
3489 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
3490 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
3491 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
3492 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
3493 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
3494 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
3495 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
3496 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
3497 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3498 + */
3499 +
3500 +#include "dpa_sys.h"
3501 +#include <linux/fsl_bman.h>
3502 +
3503 +/* Revision info (for errata and feature handling) */
3504 +#define BMAN_REV10 0x0100
3505 +#define BMAN_REV20 0x0200
3506 +#define BMAN_REV21 0x0201
3507 +#define QBMAN_ANY_PORTAL_IDX 0xffffffff
3508 +extern u16 bman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
3509 +
3510 +/*
3511 + * Global variables of the max portal/pool number this bman version supported
3512 + */
3513 +extern u16 bman_pool_max;
3514 +
3515 +/* used by CCSR and portal interrupt code */
3516 +enum bm_isr_reg {
3517 + bm_isr_status = 0,
3518 + bm_isr_enable = 1,
3519 + bm_isr_disable = 2,
3520 + bm_isr_inhibit = 3
3521 +};
3522 +
3523 +struct bm_portal_config {
3524 + /* Corenet portal addresses;
3525 + * [0]==cache-enabled, [1]==cache-inhibited. */
3526 + __iomem void *addr_virt[2];
3527 + struct resource addr_phys[2];
3528 + /* Allow these to be joined in lists */
3529 + struct list_head list;
3530 + /* User-visible portal configuration settings */
3531 + struct bman_portal_config public_cfg;
3532 + /* power management saved data */
3533 + u32 saved_isdr;
3534 +};
3535 +
3536 +#ifdef CONFIG_FSL_BMAN_CONFIG
3537 +/* Hooks from bman_driver.c to bman_config.c */
3538 +int bman_init_ccsr(struct device_node *node);
3539 +#endif
3540 +
3541 +/* Hooks from bman_driver.c in to bman_high.c */
3542 +struct bman_portal *bman_create_portal(
3543 + struct bman_portal *portal,
3544 + const struct bm_portal_config *config);
3545 +struct bman_portal *bman_create_affine_portal(
3546 + const struct bm_portal_config *config);
3547 +struct bman_portal *bman_create_affine_slave(struct bman_portal *redirect,
3548 + int cpu);
3549 +void bman_destroy_portal(struct bman_portal *bm);
3550 +
3551 +const struct bm_portal_config *bman_destroy_affine_portal(void);
3552 +
3553 +/* Hooks from fsl_usdpaa.c to bman_driver.c */
3554 +struct bm_portal_config *bm_get_unused_portal(void);
3555 +struct bm_portal_config *bm_get_unused_portal_idx(uint32_t idx);
3556 +void bm_put_unused_portal(struct bm_portal_config *pcfg);
3557 +void bm_set_liodns(struct bm_portal_config *pcfg);
3558 +
3559 +/* Pool logic in the portal driver, during initialisation, needs to know if
3560 + * there's access to CCSR or not (if not, it'll cripple the pool allocator). */
3561 +#ifdef CONFIG_FSL_BMAN_CONFIG
3562 +int bman_have_ccsr(void);
3563 +#else
3564 +#define bman_have_ccsr() 0
3565 +#endif
3566 +
3567 +/* Stockpile build constants. The _LOW value: when bman_acquire() is called and
3568 + * the stockpile fill-level is <= _LOW, an acquire is attempted from h/w but it
3569 + * might fail (if the buffer pool is depleted). So this value provides some
3570 + * "stagger" in that the bman_acquire() function will only fail if lots of bufs
3571 + * are requested at once or if h/w has been tested a couple of times without
3572 + * luck. The _HIGH value: when bman_release() is called and the stockpile
3573 + * fill-level is >= _HIGH, a release is attempted to h/w but it might fail (if
3574 + * the release ring is full). So this value provides some "stagger" so that
3575 + * ring-access is retried a couple of times prior to the API returning a
3576 + * failure. The following *must* be true;
3577 + * BMAN_STOCKPILE_HIGH-BMAN_STOCKPILE_LOW > 8
3578 + * (to avoid thrashing)
3579 + * BMAN_STOCKPILE_SZ >= 16
3580 + * (as the release logic expects to either send 8 buffers to hw prior to
3581 + * adding the given buffers to the stockpile or add the buffers to the
3582 + * stockpile before sending 8 to hw, as the API must be an all-or-nothing
3583 + * success/fail.)
3584 + */
3585 +#define BMAN_STOCKPILE_SZ 16u /* number of bufs in per-pool cache */
3586 +#define BMAN_STOCKPILE_LOW 2u /* when fill is <= this, acquire from hw */
3587 +#define BMAN_STOCKPILE_HIGH 14u /* when fill is >= this, release to hw */
3588 +
3589 +/*************************************************/
3590 +/* BMan s/w corenet portal, low-level i/face */
3591 +/*************************************************/
3592 +
3593 +/* Used by all portal interrupt registers except 'inhibit'
3594 + * This mask contains all the "irqsource" bits visible to API users
3595 + */
3596 +#define BM_PIRQ_VISIBLE (BM_PIRQ_RCRI | BM_PIRQ_BSCN)
3597 +
3598 +/* These are bm_<reg>_<verb>(). So for example, bm_disable_write() means "write
3599 + * the disable register" rather than "disable the ability to write". */
3600 +#define bm_isr_status_read(bm) __bm_isr_read(bm, bm_isr_status)
3601 +#define bm_isr_status_clear(bm, m) __bm_isr_write(bm, bm_isr_status, m)
3602 +#define bm_isr_enable_read(bm) __bm_isr_read(bm, bm_isr_enable)
3603 +#define bm_isr_enable_write(bm, v) __bm_isr_write(bm, bm_isr_enable, v)
3604 +#define bm_isr_disable_read(bm) __bm_isr_read(bm, bm_isr_disable)
3605 +#define bm_isr_disable_write(bm, v) __bm_isr_write(bm, bm_isr_disable, v)
3606 +#define bm_isr_inhibit(bm) __bm_isr_write(bm, bm_isr_inhibit, 1)
3607 +#define bm_isr_uninhibit(bm) __bm_isr_write(bm, bm_isr_inhibit, 0)
3608 +
3609 +#ifdef CONFIG_FSL_BMAN_CONFIG
3610 +/* Set depletion thresholds associated with a buffer pool. Requires that the
3611 + * operating system have access to Bman CCSR (ie. compiled in support and
3612 + * run-time access courtesy of the device-tree). */
3613 +int bm_pool_set(u32 bpid, const u32 *thresholds);
3614 +#define BM_POOL_THRESH_SW_ENTER 0
3615 +#define BM_POOL_THRESH_SW_EXIT 1
3616 +#define BM_POOL_THRESH_HW_ENTER 2
3617 +#define BM_POOL_THRESH_HW_EXIT 3
3618 +
3619 +/* Read the free buffer count for a given buffer */
3620 +u32 bm_pool_free_buffers(u32 bpid);
3621 +
3622 +__init int bman_init(void);
3623 +__init int bman_resource_init(void);
3624 +
3625 +const struct bm_portal_config *bman_get_bm_portal_config(
3626 + struct bman_portal *portal);
3627 +
3628 +/* power management */
3629 +#ifdef CONFIG_SUSPEND
3630 +void suspend_unused_bportal(void);
3631 +void resume_unused_bportal(void);
3632 +#endif
3633 +
3634 +#endif /* CONFIG_FSL_BMAN_CONFIG */
3635 --- /dev/null
3636 +++ b/drivers/staging/fsl_qbman/bman_test.c
3637 @@ -0,0 +1,56 @@
3638 +/* Copyright 2008-2011 Freescale Semiconductor, Inc.
3639 + *
3640 + * Redistribution and use in source and binary forms, with or without
3641 + * modification, are permitted provided that the following conditions are met:
3642 + * * Redistributions of source code must retain the above copyright
3643 + * notice, this list of conditions and the following disclaimer.
3644 + * * Redistributions in binary form must reproduce the above copyright
3645 + * notice, this list of conditions and the following disclaimer in the
3646 + * documentation and/or other materials provided with the distribution.
3647 + * * Neither the name of Freescale Semiconductor nor the
3648 + * names of its contributors may be used to endorse or promote products
3649 + * derived from this software without specific prior written permission.
3650 + *
3651 + *
3652 + * ALTERNATIVELY, this software may be distributed under the terms of the
3653 + * GNU General Public License ("GPL") as published by the Free Software
3654 + * Foundation, either version 2 of that License or (at your option) any
3655 + * later version.
3656 + *
3657 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
3658 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
3659 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
3660 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
3661 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
3662 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
3663 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
3664 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
3665 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
3666 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3667 + */
3668 +
3669 +#include "bman_test.h"
3670 +
3671 +MODULE_AUTHOR("Geoff Thorpe");
3672 +MODULE_LICENSE("Dual BSD/GPL");
3673 +MODULE_DESCRIPTION("Bman testing");
3674 +
3675 +static int test_init(void)
3676 +{
3677 +#ifdef CONFIG_FSL_BMAN_TEST_HIGH
3678 + int loop = 1;
3679 + while (loop--)
3680 + bman_test_high();
3681 +#endif
3682 +#ifdef CONFIG_FSL_BMAN_TEST_THRESH
3683 + bman_test_thresh();
3684 +#endif
3685 + return 0;
3686 +}
3687 +
3688 +static void test_exit(void)
3689 +{
3690 +}
3691 +
3692 +module_init(test_init);
3693 +module_exit(test_exit);
3694 --- /dev/null
3695 +++ b/drivers/staging/fsl_qbman/bman_test.h
3696 @@ -0,0 +1,44 @@
3697 +/* Copyright 2008-2011 Freescale Semiconductor, Inc.
3698 + *
3699 + * Redistribution and use in source and binary forms, with or without
3700 + * modification, are permitted provided that the following conditions are met:
3701 + * * Redistributions of source code must retain the above copyright
3702 + * notice, this list of conditions and the following disclaimer.
3703 + * * Redistributions in binary form must reproduce the above copyright
3704 + * notice, this list of conditions and the following disclaimer in the
3705 + * documentation and/or other materials provided with the distribution.
3706 + * * Neither the name of Freescale Semiconductor nor the
3707 + * names of its contributors may be used to endorse or promote products
3708 + * derived from this software without specific prior written permission.
3709 + *
3710 + *
3711 + * ALTERNATIVELY, this software may be distributed under the terms of the
3712 + * GNU General Public License ("GPL") as published by the Free Software
3713 + * Foundation, either version 2 of that License or (at your option) any
3714 + * later version.
3715 + *
3716 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
3717 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
3718 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
3719 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
3720 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
3721 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
3722 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
3723 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
3724 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
3725 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3726 + */
3727 +
3728 +#include <linux/kernel.h>
3729 +#include <linux/errno.h>
3730 +#include <linux/io.h>
3731 +#include <linux/slab.h>
3732 +#include <linux/module.h>
3733 +#include <linux/interrupt.h>
3734 +#include <linux/delay.h>
3735 +#include <linux/kthread.h>
3736 +
3737 +#include <linux/fsl_bman.h>
3738 +
3739 +void bman_test_high(void);
3740 +void bman_test_thresh(void);
3741 --- /dev/null
3742 +++ b/drivers/staging/fsl_qbman/bman_test_high.c
3743 @@ -0,0 +1,183 @@
3744 +/* Copyright 2008-2011 Freescale Semiconductor, Inc.
3745 + *
3746 + * Redistribution and use in source and binary forms, with or without
3747 + * modification, are permitted provided that the following conditions are met:
3748 + * * Redistributions of source code must retain the above copyright
3749 + * notice, this list of conditions and the following disclaimer.
3750 + * * Redistributions in binary form must reproduce the above copyright
3751 + * notice, this list of conditions and the following disclaimer in the
3752 + * documentation and/or other materials provided with the distribution.
3753 + * * Neither the name of Freescale Semiconductor nor the
3754 + * names of its contributors may be used to endorse or promote products
3755 + * derived from this software without specific prior written permission.
3756 + *
3757 + *
3758 + * ALTERNATIVELY, this software may be distributed under the terms of the
3759 + * GNU General Public License ("GPL") as published by the Free Software
3760 + * Foundation, either version 2 of that License or (at your option) any
3761 + * later version.
3762 + *
3763 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
3764 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
3765 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
3766 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
3767 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
3768 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
3769 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
3770 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
3771 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
3772 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3773 + */
3774 +
3775 +#include "bman_test.h"
3776 +#include "bman_private.h"
3777 +
3778 +/*************/
3779 +/* constants */
3780 +/*************/
3781 +
3782 +#define PORTAL_OPAQUE ((void *)0xf00dbeef)
3783 +#define POOL_OPAQUE ((void *)0xdeadabba)
3784 +#define NUM_BUFS 93
3785 +#define LOOPS 3
3786 +#define BMAN_TOKEN_MASK 0x00FFFFFFFFFFLLU
3787 +
3788 +/***************/
3789 +/* global vars */
3790 +/***************/
3791 +
3792 +static struct bman_pool *pool;
3793 +static int depleted;
3794 +static struct bm_buffer bufs_in[NUM_BUFS] ____cacheline_aligned;
3795 +static struct bm_buffer bufs_out[NUM_BUFS] ____cacheline_aligned;
3796 +static int bufs_received;
3797 +
3798 +/* Predeclare the callback so we can instantiate pool parameters */
3799 +static void depletion_cb(struct bman_portal *, struct bman_pool *, void *, int);
3800 +
3801 +/**********************/
3802 +/* internal functions */
3803 +/**********************/
3804 +
3805 +static void bufs_init(void)
3806 +{
3807 + int i;
3808 + for (i = 0; i < NUM_BUFS; i++)
3809 + bm_buffer_set64(&bufs_in[i], 0xfedc01234567LLU * i);
3810 + bufs_received = 0;
3811 +}
3812 +
3813 +static inline int bufs_cmp(const struct bm_buffer *a, const struct bm_buffer *b)
3814 +{
3815 + if ((bman_ip_rev == BMAN_REV20) || (bman_ip_rev == BMAN_REV21)) {
3816 +
3817 + /* On SoCs with Bman revison 2.0, Bman only respects the 40
3818 + * LS-bits of buffer addresses, masking off the upper 8-bits on
3819 + * release commands. The API provides for 48-bit addresses
3820 + * because some SoCs support all 48-bits. When generating
3821 + * garbage addresses for testing, we either need to zero the
3822 + * upper 8-bits when releasing to Bman (otherwise we'll be
3823 + * disappointed when the buffers we acquire back from Bman
3824 + * don't match), or we need to mask the upper 8-bits off when
3825 + * comparing. We do the latter.
3826 + */
3827 + if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK)
3828 + < (bm_buffer_get64(b) & BMAN_TOKEN_MASK))
3829 + return -1;
3830 + if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK)
3831 + > (bm_buffer_get64(b) & BMAN_TOKEN_MASK))
3832 + return 1;
3833 + } else {
3834 + if (bm_buffer_get64(a) < bm_buffer_get64(b))
3835 + return -1;
3836 + if (bm_buffer_get64(a) > bm_buffer_get64(b))
3837 + return 1;
3838 + }
3839 +
3840 + return 0;
3841 +}
3842 +
3843 +static void bufs_confirm(void)
3844 +{
3845 + int i, j;
3846 + for (i = 0; i < NUM_BUFS; i++) {
3847 + int matches = 0;
3848 + for (j = 0; j < NUM_BUFS; j++)
3849 + if (!bufs_cmp(&bufs_in[i], &bufs_out[j]))
3850 + matches++;
3851 + BUG_ON(matches != 1);
3852 + }
3853 +}
3854 +
3855 +/********/
3856 +/* test */
3857 +/********/
3858 +
3859 +static void depletion_cb(struct bman_portal *__portal, struct bman_pool *__pool,
3860 + void *pool_ctx, int __depleted)
3861 +{
3862 + BUG_ON(__pool != pool);
3863 + BUG_ON(pool_ctx != POOL_OPAQUE);
3864 + depleted = __depleted;
3865 +}
3866 +
3867 +void bman_test_high(void)
3868 +{
3869 + struct bman_pool_params pparams = {
3870 + .flags = BMAN_POOL_FLAG_DEPLETION | BMAN_POOL_FLAG_DYNAMIC_BPID,
3871 + .cb = depletion_cb,
3872 + .cb_ctx = POOL_OPAQUE,
3873 + };
3874 + int i, loops = LOOPS;
3875 + struct bm_buffer tmp_buf;
3876 +
3877 + bufs_init();
3878 +
3879 + pr_info("BMAN: --- starting high-level test ---\n");
3880 +
3881 + pool = bman_new_pool(&pparams);
3882 + BUG_ON(!pool);
3883 +
3884 + /*******************/
3885 + /* Release buffers */
3886 + /*******************/
3887 +do_loop:
3888 + i = 0;
3889 + while (i < NUM_BUFS) {
3890 + u32 flags = BMAN_RELEASE_FLAG_WAIT;
3891 + int num = 8;
3892 + if ((i + num) > NUM_BUFS)
3893 + num = NUM_BUFS - i;
3894 + if ((i + num) == NUM_BUFS)
3895 + flags |= BMAN_RELEASE_FLAG_WAIT_SYNC;
3896 + if (bman_release(pool, bufs_in + i, num, flags))
3897 + panic("bman_release() failed\n");
3898 + i += num;
3899 + }
3900 +
3901 + /*******************/
3902 + /* Acquire buffers */
3903 + /*******************/
3904 + while (i > 0) {
3905 + int tmp, num = 8;
3906 + if (num > i)
3907 + num = i;
3908 + tmp = bman_acquire(pool, bufs_out + i - num, num, 0);
3909 + BUG_ON(tmp != num);
3910 + i -= num;
3911 + }
3912 +
3913 + i = bman_acquire(pool, &tmp_buf, 1, 0);
3914 + BUG_ON(i > 0);
3915 +
3916 + bufs_confirm();
3917 +
3918 + if (--loops)
3919 + goto do_loop;
3920 +
3921 + /************/
3922 + /* Clean up */
3923 + /************/
3924 + bman_free_pool(pool);
3925 + pr_info("BMAN: --- finished high-level test ---\n");
3926 +}
3927 --- /dev/null
3928 +++ b/drivers/staging/fsl_qbman/bman_test_thresh.c
3929 @@ -0,0 +1,196 @@
3930 +/* Copyright 2010-2011 Freescale Semiconductor, Inc.
3931 + *
3932 + * Redistribution and use in source and binary forms, with or without
3933 + * modification, are permitted provided that the following conditions are met:
3934 + * * Redistributions of source code must retain the above copyright
3935 + * notice, this list of conditions and the following disclaimer.
3936 + * * Redistributions in binary form must reproduce the above copyright
3937 + * notice, this list of conditions and the following disclaimer in the
3938 + * documentation and/or other materials provided with the distribution.
3939 + * * Neither the name of Freescale Semiconductor nor the
3940 + * names of its contributors may be used to endorse or promote products
3941 + * derived from this software without specific prior written permission.
3942 + *
3943 + *
3944 + * ALTERNATIVELY, this software may be distributed under the terms of the
3945 + * GNU General Public License ("GPL") as published by the Free Software
3946 + * Foundation, either version 2 of that License or (at your option) any
3947 + * later version.
3948 + *
3949 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
3950 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
3951 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
3952 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
3953 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
3954 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
3955 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
3956 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
3957 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
3958 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3959 + */
3960 +
3961 +#include "bman_test.h"
3962 +
3963 +/* Test constants */
3964 +#define TEST_NUMBUFS 129728
3965 +#define TEST_EXIT 129536
3966 +#define TEST_ENTRY 129024
3967 +
3968 +struct affine_test_data {
3969 + struct task_struct *t;
3970 + int cpu;
3971 + int expect_affinity;
3972 + int drain;
3973 + int num_enter;
3974 + int num_exit;
3975 + struct list_head node;
3976 + struct completion wakethread;
3977 + struct completion wakeparent;
3978 +};
3979 +
3980 +static void cb_depletion(struct bman_portal *portal,
3981 + struct bman_pool *pool,
3982 + void *opaque,
3983 + int depleted)
3984 +{
3985 + struct affine_test_data *data = opaque;
3986 + int c = smp_processor_id();
3987 + pr_info("cb_depletion: bpid=%d, depleted=%d, cpu=%d, original=%d\n",
3988 + bman_get_params(pool)->bpid, !!depleted, c, data->cpu);
3989 + /* We should be executing on the CPU of the thread that owns the pool if
3990 + * and that CPU has an affine portal (ie. it isn't slaved). */
3991 + BUG_ON((c != data->cpu) && data->expect_affinity);
3992 + BUG_ON((c == data->cpu) && !data->expect_affinity);
3993 + if (depleted)
3994 + data->num_enter++;
3995 + else
3996 + data->num_exit++;
3997 +}
3998 +
3999 +/* Params used to set up a pool, this also dynamically allocates a BPID */
4000 +static const struct bman_pool_params params_nocb = {
4001 + .flags = BMAN_POOL_FLAG_DYNAMIC_BPID | BMAN_POOL_FLAG_THRESH,
4002 + .thresholds = { TEST_ENTRY, TEST_EXIT, 0, 0 }
4003 +};
4004 +
4005 +/* Params used to set up each cpu's pool with callbacks enabled */
4006 +static struct bman_pool_params params_cb = {
4007 + .bpid = 0, /* will be replaced to match pool_nocb */
4008 + .flags = BMAN_POOL_FLAG_DEPLETION,
4009 + .cb = cb_depletion
4010 +};
4011 +
4012 +static struct bman_pool *pool_nocb;
4013 +static LIST_HEAD(threads);
4014 +
4015 +static int affine_test(void *__data)
4016 +{
4017 + struct bman_pool *pool;
4018 + struct affine_test_data *data = __data;
4019 + struct bman_pool_params my_params = params_cb;
4020 +
4021 + pr_info("thread %d: starting\n", data->cpu);
4022 + /* create the pool */
4023 + my_params.cb_ctx = data;
4024 + pool = bman_new_pool(&my_params);
4025 + BUG_ON(!pool);
4026 + complete(&data->wakeparent);
4027 + wait_for_completion(&data->wakethread);
4028 + init_completion(&data->wakethread);
4029 +
4030 + /* if we're the drainer, we get signalled for that */
4031 + if (data->drain) {
4032 + struct bm_buffer buf;
4033 + int ret;
4034 + pr_info("thread %d: draining...\n", data->cpu);
4035 + do {
4036 + ret = bman_acquire(pool, &buf, 1, 0);
4037 + } while (ret > 0);
4038 + pr_info("thread %d: draining done.\n", data->cpu);
4039 + complete(&data->wakeparent);
4040 + wait_for_completion(&data->wakethread);
4041 + init_completion(&data->wakethread);
4042 + }
4043 +
4044 + /* cleanup */
4045 + bman_free_pool(pool);
4046 + while (!kthread_should_stop())
4047 + cpu_relax();
4048 + pr_info("thread %d: exiting\n", data->cpu);
4049 + return 0;
4050 +}
4051 +
4052 +static struct affine_test_data *start_affine_test(int cpu, int drain)
4053 +{
4054 + struct affine_test_data *data = kmalloc(sizeof(*data), GFP_KERNEL);
4055 +
4056 + if (!data)
4057 + return NULL;
4058 + data->cpu = cpu;
4059 + data->expect_affinity = cpumask_test_cpu(cpu, bman_affine_cpus());
4060 + data->drain = drain;
4061 + data->num_enter = 0;
4062 + data->num_exit = 0;
4063 + init_completion(&data->wakethread);
4064 + init_completion(&data->wakeparent);
4065 + list_add_tail(&data->node, &threads);
4066 + data->t = kthread_create(affine_test, data, "threshtest%d", cpu);
4067 + BUG_ON(IS_ERR(data->t));
4068 + kthread_bind(data->t, cpu);
4069 + wake_up_process(data->t);
4070 + return data;
4071 +}
4072 +
4073 +void bman_test_thresh(void)
4074 +{
4075 + int loop = TEST_NUMBUFS;
4076 + int ret, num_cpus = 0;
4077 + struct affine_test_data *data, *drainer = NULL;
4078 +
4079 + pr_info("bman_test_thresh: start\n");
4080 +
4081 + /* allocate a BPID and seed it */
4082 + pool_nocb = bman_new_pool(&params_nocb);
4083 + BUG_ON(!pool_nocb);
4084 + while (loop--) {
4085 + struct bm_buffer buf;
4086 + bm_buffer_set64(&buf, 0x0badbeef + loop);
4087 + ret = bman_release(pool_nocb, &buf, 1,
4088 + BMAN_RELEASE_FLAG_WAIT);
4089 + BUG_ON(ret);
4090 + }
4091 + while (!bman_rcr_is_empty())
4092 + cpu_relax();
4093 + pr_info("bman_test_thresh: buffers are in\n");
4094 +
4095 + /* create threads and wait for them to create pools */
4096 + params_cb.bpid = bman_get_params(pool_nocb)->bpid;
4097 + for_each_cpu(loop, cpu_online_mask) {
4098 + data = start_affine_test(loop, drainer ? 0 : 1);
4099 + BUG_ON(!data);
4100 + if (!drainer)
4101 + drainer = data;
4102 + num_cpus++;
4103 + wait_for_completion(&data->wakeparent);
4104 + }
4105 +
4106 + /* signal the drainer to start draining */
4107 + complete(&drainer->wakethread);
4108 + wait_for_completion(&drainer->wakeparent);
4109 + init_completion(&drainer->wakeparent);
4110 +
4111 + /* tear down */
4112 + list_for_each_entry_safe(data, drainer, &threads, node) {
4113 + complete(&data->wakethread);
4114 + ret = kthread_stop(data->t);
4115 + BUG_ON(ret);
4116 + list_del(&data->node);
4117 + /* check that we get the expected callbacks (and no others) */
4118 + BUG_ON(data->num_enter != 1);
4119 + BUG_ON(data->num_exit != 0);
4120 + kfree(data);
4121 + }
4122 + bman_free_pool(pool_nocb);
4123 +
4124 + pr_info("bman_test_thresh: done\n");
4125 +}
4126 --- /dev/null
4127 +++ b/drivers/staging/fsl_qbman/dpa_alloc.c
4128 @@ -0,0 +1,706 @@
4129 +/* Copyright 2009-2012 Freescale Semiconductor, Inc.
4130 + *
4131 + * Redistribution and use in source and binary forms, with or without
4132 + * modification, are permitted provided that the following conditions are met:
4133 + * * Redistributions of source code must retain the above copyright
4134 + * notice, this list of conditions and the following disclaimer.
4135 + * * Redistributions in binary form must reproduce the above copyright
4136 + * notice, this list of conditions and the following disclaimer in the
4137 + * documentation and/or other materials provided with the distribution.
4138 + * * Neither the name of Freescale Semiconductor nor the
4139 + * names of its contributors may be used to endorse or promote products
4140 + * derived from this software without specific prior written permission.
4141 + *
4142 + *
4143 + * ALTERNATIVELY, this software may be distributed under the terms of the
4144 + * GNU General Public License ("GPL") as published by the Free Software
4145 + * Foundation, either version 2 of that License or (at your option) any
4146 + * later version.
4147 + *
4148 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
4149 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
4150 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
4151 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
4152 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
4153 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
4154 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
4155 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
4156 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
4157 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
4158 + */
4159 +
4160 +#include "dpa_sys.h"
4161 +#include <linux/fsl_qman.h>
4162 +#include <linux/fsl_bman.h>
4163 +
4164 +/* Qman and Bman APIs are front-ends to the common code; */
4165 +
4166 +static DECLARE_DPA_ALLOC(bpalloc); /* BPID allocator */
4167 +static DECLARE_DPA_ALLOC(fqalloc); /* FQID allocator */
4168 +static DECLARE_DPA_ALLOC(qpalloc); /* pool-channel allocator */
4169 +static DECLARE_DPA_ALLOC(cgralloc); /* CGR ID allocator */
4170 +static DECLARE_DPA_ALLOC(ceetm0_challoc); /* CEETM Channel ID allocator */
4171 +static DECLARE_DPA_ALLOC(ceetm0_lfqidalloc); /* CEETM LFQID allocator */
4172 +static DECLARE_DPA_ALLOC(ceetm1_challoc); /* CEETM Channel ID allocator */
4173 +static DECLARE_DPA_ALLOC(ceetm1_lfqidalloc); /* CEETM LFQID allocator */
4174 +
4175 +/* This is a sort-of-conditional dpa_alloc_free() routine. Eg. when releasing
4176 + * FQIDs (probably from user-space), it can filter out those that aren't in the
4177 + * OOS state (better to leak a h/w resource than to crash). This function
4178 + * returns the number of invalid IDs that were not released. */
4179 +static u32 release_id_range(struct dpa_alloc *alloc, u32 id, u32 count,
4180 + int (*is_valid)(u32 id))
4181 +{
4182 + int valid_mode = 0;
4183 + u32 loop = id, total_invalid = 0;
4184 + while (loop < (id + count)) {
4185 + int isvalid = is_valid ? is_valid(loop) : 1;
4186 + if (!valid_mode) {
4187 + /* We're looking for a valid ID to terminate an invalid
4188 + * range */
4189 + if (isvalid) {
4190 + /* We finished a range of invalid IDs, a valid
4191 + * range is now underway */
4192 + valid_mode = 1;
4193 + count -= (loop - id);
4194 + id = loop;
4195 + } else
4196 + total_invalid++;
4197 + } else {
4198 + /* We're looking for an invalid ID to terminate a
4199 + * valid range */
4200 + if (!isvalid) {
4201 + /* Release the range of valid IDs, an unvalid
4202 + * range is now underway */
4203 + if (loop > id)
4204 + dpa_alloc_free(alloc, id, loop - id);
4205 + valid_mode = 0;
4206 + }
4207 + }
4208 + loop++;
4209 + }
4210 + /* Release any unterminated range of valid IDs */
4211 + if (valid_mode && count)
4212 + dpa_alloc_free(alloc, id, count);
4213 + return total_invalid;
4214 +}
4215 +
4216 +/* BPID allocator front-end */
4217 +
4218 +int bman_alloc_bpid_range(u32 *result, u32 count, u32 align, int partial)
4219 +{
4220 + return dpa_alloc_new(&bpalloc, result, count, align, partial);
4221 +}
4222 +EXPORT_SYMBOL(bman_alloc_bpid_range);
4223 +
4224 +static int bp_cleanup(u32 bpid)
4225 +{
4226 + return bman_shutdown_pool(bpid) == 0;
4227 +}
4228 +void bman_release_bpid_range(u32 bpid, u32 count)
4229 +{
4230 + u32 total_invalid = release_id_range(&bpalloc, bpid, count, bp_cleanup);
4231 + if (total_invalid)
4232 + pr_err("BPID range [%d..%d] (%d) had %d leaks\n",
4233 + bpid, bpid + count - 1, count, total_invalid);
4234 +}
4235 +EXPORT_SYMBOL(bman_release_bpid_range);
4236 +
4237 +void bman_seed_bpid_range(u32 bpid, u32 count)
4238 +{
4239 + dpa_alloc_seed(&bpalloc, bpid, count);
4240 +}
4241 +EXPORT_SYMBOL(bman_seed_bpid_range);
4242 +
4243 +int bman_reserve_bpid_range(u32 bpid, u32 count)
4244 +{
4245 + return dpa_alloc_reserve(&bpalloc, bpid, count);
4246 +}
4247 +EXPORT_SYMBOL(bman_reserve_bpid_range);
4248 +
4249 +
4250 +/* FQID allocator front-end */
4251 +
4252 +int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial)
4253 +{
4254 + return dpa_alloc_new(&fqalloc, result, count, align, partial);
4255 +}
4256 +EXPORT_SYMBOL(qman_alloc_fqid_range);
4257 +
4258 +static int fq_cleanup(u32 fqid)
4259 +{
4260 + return qman_shutdown_fq(fqid) == 0;
4261 +}
4262 +void qman_release_fqid_range(u32 fqid, u32 count)
4263 +{
4264 + u32 total_invalid = release_id_range(&fqalloc, fqid, count, fq_cleanup);
4265 + if (total_invalid)
4266 + pr_err("FQID range [%d..%d] (%d) had %d leaks\n",
4267 + fqid, fqid + count - 1, count, total_invalid);
4268 +}
4269 +EXPORT_SYMBOL(qman_release_fqid_range);
4270 +
4271 +int qman_reserve_fqid_range(u32 fqid, u32 count)
4272 +{
4273 + return dpa_alloc_reserve(&fqalloc, fqid, count);
4274 +}
4275 +EXPORT_SYMBOL(qman_reserve_fqid_range);
4276 +
4277 +void qman_seed_fqid_range(u32 fqid, u32 count)
4278 +{
4279 + dpa_alloc_seed(&fqalloc, fqid, count);
4280 +}
4281 +EXPORT_SYMBOL(qman_seed_fqid_range);
4282 +
4283 +/* Pool-channel allocator front-end */
4284 +
4285 +int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial)
4286 +{
4287 + return dpa_alloc_new(&qpalloc, result, count, align, partial);
4288 +}
4289 +EXPORT_SYMBOL(qman_alloc_pool_range);
4290 +
4291 +static int qpool_cleanup(u32 qp)
4292 +{
4293 + /* We query all FQDs starting from
4294 + * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
4295 + * whose destination channel is the pool-channel being released.
4296 + * When a non-OOS FQD is found we attempt to clean it up */
4297 + struct qman_fq fq = {
4298 + .fqid = 1
4299 + };
4300 + int err;
4301 + do {
4302 + struct qm_mcr_queryfq_np np;
4303 + err = qman_query_fq_np(&fq, &np);
4304 + if (err)
4305 + /* FQID range exceeded, found no problems */
4306 + return 1;
4307 + if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
4308 + struct qm_fqd fqd;
4309 + err = qman_query_fq(&fq, &fqd);
4310 + BUG_ON(err);
4311 + if (fqd.dest.channel == qp) {
4312 + /* The channel is the FQ's target, clean it */
4313 + if (qman_shutdown_fq(fq.fqid) != 0)
4314 + /* Couldn't shut down the FQ
4315 + so the pool must be leaked */
4316 + return 0;
4317 + }
4318 + }
4319 + /* Move to the next FQID */
4320 + fq.fqid++;
4321 + } while (1);
4322 +}
4323 +void qman_release_pool_range(u32 qp, u32 count)
4324 +{
4325 + u32 total_invalid = release_id_range(&qpalloc, qp,
4326 + count, qpool_cleanup);
4327 + if (total_invalid) {
4328 + /* Pool channels are almost always used individually */
4329 + if (count == 1)
4330 + pr_err("Pool channel 0x%x had %d leaks\n",
4331 + qp, total_invalid);
4332 + else
4333 + pr_err("Pool channels [%d..%d] (%d) had %d leaks\n",
4334 + qp, qp + count - 1, count, total_invalid);
4335 + }
4336 +}
4337 +EXPORT_SYMBOL(qman_release_pool_range);
4338 +
4339 +
4340 +void qman_seed_pool_range(u32 poolid, u32 count)
4341 +{
4342 + dpa_alloc_seed(&qpalloc, poolid, count);
4343 +
4344 +}
4345 +EXPORT_SYMBOL(qman_seed_pool_range);
4346 +
4347 +int qman_reserve_pool_range(u32 poolid, u32 count)
4348 +{
4349 + return dpa_alloc_reserve(&qpalloc, poolid, count);
4350 +}
4351 +EXPORT_SYMBOL(qman_reserve_pool_range);
4352 +
4353 +
4354 +/* CGR ID allocator front-end */
4355 +
4356 +int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial)
4357 +{
4358 + return dpa_alloc_new(&cgralloc, result, count, align, partial);
4359 +}
4360 +EXPORT_SYMBOL(qman_alloc_cgrid_range);
4361 +
4362 +static int cqr_cleanup(u32 cgrid)
4363 +{
4364 + /* We query all FQDs starting from
4365 + * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
4366 + * whose CGR is the CGR being released.
4367 + */
4368 + struct qman_fq fq = {
4369 + .fqid = 1
4370 + };
4371 + int err;
4372 + do {
4373 + struct qm_mcr_queryfq_np np;
4374 + err = qman_query_fq_np(&fq, &np);
4375 + if (err)
4376 + /* FQID range exceeded, found no problems */
4377 + return 1;
4378 + if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
4379 + struct qm_fqd fqd;
4380 + err = qman_query_fq(&fq, &fqd);
4381 + BUG_ON(err);
4382 + if ((fqd.fq_ctrl & QM_FQCTRL_CGE) &&
4383 + (fqd.cgid == cgrid)) {
4384 + pr_err("CRGID 0x%x is being used by FQID 0x%x,"
4385 + " CGR will be leaked\n",
4386 + cgrid, fq.fqid);
4387 + return 1;
4388 + }
4389 + }
4390 + /* Move to the next FQID */
4391 + fq.fqid++;
4392 + } while (1);
4393 +}
4394 +
4395 +void qman_release_cgrid_range(u32 cgrid, u32 count)
4396 +{
4397 + u32 total_invalid = release_id_range(&cgralloc, cgrid,
4398 + count, cqr_cleanup);
4399 + if (total_invalid)
4400 + pr_err("CGRID range [%d..%d] (%d) had %d leaks\n",
4401 + cgrid, cgrid + count - 1, count, total_invalid);
4402 +}
4403 +EXPORT_SYMBOL(qman_release_cgrid_range);
4404 +
4405 +void qman_seed_cgrid_range(u32 cgrid, u32 count)
4406 +{
4407 + dpa_alloc_seed(&cgralloc, cgrid, count);
4408 +
4409 +}
4410 +EXPORT_SYMBOL(qman_seed_cgrid_range);
4411 +
4412 +/* CEETM CHANNEL ID allocator front-end */
4413 +int qman_alloc_ceetm0_channel_range(u32 *result, u32 count, u32 align,
4414 + int partial)
4415 +{
4416 + return dpa_alloc_new(&ceetm0_challoc, result, count, align, partial);
4417 +}
4418 +EXPORT_SYMBOL(qman_alloc_ceetm0_channel_range);
4419 +
4420 +int qman_alloc_ceetm1_channel_range(u32 *result, u32 count, u32 align,
4421 + int partial)
4422 +{
4423 + return dpa_alloc_new(&ceetm1_challoc, result, count, align, partial);
4424 +}
4425 +EXPORT_SYMBOL(qman_alloc_ceetm1_channel_range);
4426 +
4427 +void qman_release_ceetm0_channel_range(u32 channelid, u32 count)
4428 +{
4429 + u32 total_invalid;
4430 +
4431 + total_invalid = release_id_range(&ceetm0_challoc, channelid, count,
4432 + NULL);
4433 + if (total_invalid)
4434 + pr_err("CEETM channel range [%d..%d] (%d) had %d leaks\n",
4435 + channelid, channelid + count - 1, count, total_invalid);
4436 +}
4437 +EXPORT_SYMBOL(qman_release_ceetm0_channel_range);
4438 +
4439 +void qman_seed_ceetm0_channel_range(u32 channelid, u32 count)
4440 +{
4441 + dpa_alloc_seed(&ceetm0_challoc, channelid, count);
4442 +
4443 +}
4444 +EXPORT_SYMBOL(qman_seed_ceetm0_channel_range);
4445 +
4446 +void qman_release_ceetm1_channel_range(u32 channelid, u32 count)
4447 +{
4448 + u32 total_invalid;
4449 + total_invalid = release_id_range(&ceetm1_challoc, channelid, count,
4450 + NULL);
4451 + if (total_invalid)
4452 + pr_err("CEETM channel range [%d..%d] (%d) had %d leaks\n",
4453 + channelid, channelid + count - 1, count, total_invalid);
4454 +}
4455 +EXPORT_SYMBOL(qman_release_ceetm1_channel_range);
4456 +
4457 +void qman_seed_ceetm1_channel_range(u32 channelid, u32 count)
4458 +{
4459 + dpa_alloc_seed(&ceetm1_challoc, channelid, count);
4460 +
4461 +}
4462 +EXPORT_SYMBOL(qman_seed_ceetm1_channel_range);
4463 +
4464 +/* CEETM LFQID allocator front-end */
4465 +int qman_alloc_ceetm0_lfqid_range(u32 *result, u32 count, u32 align,
4466 + int partial)
4467 +{
4468 + return dpa_alloc_new(&ceetm0_lfqidalloc, result, count, align, partial);
4469 +}
4470 +EXPORT_SYMBOL(qman_alloc_ceetm0_lfqid_range);
4471 +
4472 +int qman_alloc_ceetm1_lfqid_range(u32 *result, u32 count, u32 align,
4473 + int partial)
4474 +{
4475 + return dpa_alloc_new(&ceetm1_lfqidalloc, result, count, align, partial);
4476 +}
4477 +EXPORT_SYMBOL(qman_alloc_ceetm1_lfqid_range);
4478 +
4479 +void qman_release_ceetm0_lfqid_range(u32 lfqid, u32 count)
4480 +{
4481 + u32 total_invalid;
4482 +
4483 + total_invalid = release_id_range(&ceetm0_lfqidalloc, lfqid, count,
4484 + NULL);
4485 + if (total_invalid)
4486 + pr_err("CEETM LFQID range [0x%x..0x%x] (%d) had %d leaks\n",
4487 + lfqid, lfqid + count - 1, count, total_invalid);
4488 +}
4489 +EXPORT_SYMBOL(qman_release_ceetm0_lfqid_range);
4490 +
4491 +void qman_seed_ceetm0_lfqid_range(u32 lfqid, u32 count)
4492 +{
4493 + dpa_alloc_seed(&ceetm0_lfqidalloc, lfqid, count);
4494 +
4495 +}
4496 +EXPORT_SYMBOL(qman_seed_ceetm0_lfqid_range);
4497 +
4498 +void qman_release_ceetm1_lfqid_range(u32 lfqid, u32 count)
4499 +{
4500 + u32 total_invalid;
4501 +
4502 + total_invalid = release_id_range(&ceetm1_lfqidalloc, lfqid, count,
4503 + NULL);
4504 + if (total_invalid)
4505 + pr_err("CEETM LFQID range [0x%x..0x%x] (%d) had %d leaks\n",
4506 + lfqid, lfqid + count - 1, count, total_invalid);
4507 +}
4508 +EXPORT_SYMBOL(qman_release_ceetm1_lfqid_range);
4509 +
4510 +void qman_seed_ceetm1_lfqid_range(u32 lfqid, u32 count)
4511 +{
4512 + dpa_alloc_seed(&ceetm1_lfqidalloc, lfqid, count);
4513 +
4514 +}
4515 +EXPORT_SYMBOL(qman_seed_ceetm1_lfqid_range);
4516 +
4517 +
4518 +/* Everything else is the common backend to all the allocators */
4519 +
4520 +/* The allocator is a (possibly-empty) list of these; */
4521 +struct alloc_node {
4522 + struct list_head list;
4523 + u32 base;
4524 + u32 num;
4525 + /* refcount and is_alloced are only set
4526 + when the node is in the used list */
4527 + unsigned int refcount;
4528 + int is_alloced;
4529 +};
4530 +
4531 +/* #define DPA_ALLOC_DEBUG */
4532 +
4533 +#ifdef DPA_ALLOC_DEBUG
4534 +#define DPRINT pr_info
4535 +static void DUMP(struct dpa_alloc *alloc)
4536 +{
4537 + int off = 0;
4538 + char buf[256];
4539 + struct alloc_node *p;
4540 + pr_info("Free Nodes\n");
4541 + list_for_each_entry(p, &alloc->free, list) {
4542 + if (off < 255)
4543 + off += snprintf(buf + off, 255-off, "{%d,%d}",
4544 + p->base, p->base + p->num - 1);
4545 + }
4546 + pr_info("%s\n", buf);
4547 +
4548 + off = 0;
4549 + pr_info("Used Nodes\n");
4550 + list_for_each_entry(p, &alloc->used, list) {
4551 + if (off < 255)
4552 + off += snprintf(buf + off, 255-off, "{%d,%d}",
4553 + p->base, p->base + p->num - 1);
4554 + }
4555 + pr_info("%s\n", buf);
4556 +
4557 +
4558 +
4559 +}
4560 +#else
4561 +#define DPRINT(x...)
4562 +#define DUMP(a)
4563 +#endif
4564 +
4565 +int dpa_alloc_new(struct dpa_alloc *alloc, u32 *result, u32 count, u32 align,
4566 + int partial)
4567 +{
4568 + struct alloc_node *i = NULL, *next_best = NULL, *used_node = NULL;
4569 + u32 base, next_best_base = 0, num = 0, next_best_num = 0;
4570 + struct alloc_node *margin_left, *margin_right;
4571 +
4572 + *result = (u32)-1;
4573 + DPRINT("alloc_range(%d,%d,%d)\n", count, align, partial);
4574 + DUMP(alloc);
4575 + /* If 'align' is 0, it should behave as though it was 1 */
4576 + if (!align)
4577 + align = 1;
4578 + margin_left = kmalloc(sizeof(*margin_left), GFP_KERNEL);
4579 + if (!margin_left)
4580 + goto err;
4581 + margin_right = kmalloc(sizeof(*margin_right), GFP_KERNEL);
4582 + if (!margin_right) {
4583 + kfree(margin_left);
4584 + goto err;
4585 + }
4586 + spin_lock_irq(&alloc->lock);
4587 + list_for_each_entry(i, &alloc->free, list) {
4588 + base = (i->base + align - 1) / align;
4589 + base *= align;
4590 + if ((base - i->base) >= i->num)
4591 + /* alignment is impossible, regardless of count */
4592 + continue;
4593 + num = i->num - (base - i->base);
4594 + if (num >= count) {
4595 + /* this one will do nicely */
4596 + num = count;
4597 + goto done;
4598 + }
4599 + if (num > next_best_num) {
4600 + next_best = i;
4601 + next_best_base = base;
4602 + next_best_num = num;
4603 + }
4604 + }
4605 + if (partial && next_best) {
4606 + i = next_best;
4607 + base = next_best_base;
4608 + num = next_best_num;
4609 + } else
4610 + i = NULL;
4611 +done:
4612 + if (i) {
4613 + if (base != i->base) {
4614 + margin_left->base = i->base;
4615 + margin_left->num = base - i->base;
4616 + list_add_tail(&margin_left->list, &i->list);
4617 + } else
4618 + kfree(margin_left);
4619 + if ((base + num) < (i->base + i->num)) {
4620 + margin_right->base = base + num;
4621 + margin_right->num = (i->base + i->num) -
4622 + (base + num);
4623 + list_add(&margin_right->list, &i->list);
4624 + } else
4625 + kfree(margin_right);
4626 + list_del(&i->list);
4627 + kfree(i);
4628 + *result = base;
4629 + } else {
4630 + spin_unlock_irq(&alloc->lock);
4631 + kfree(margin_left);
4632 + kfree(margin_right);
4633 + }
4634 +
4635 +err:
4636 + DPRINT("returning %d\n", i ? num : -ENOMEM);
4637 + DUMP(alloc);
4638 + if (!i)
4639 + return -ENOMEM;
4640 +
4641 + /* Add the allocation to the used list with a refcount of 1 */
4642 + used_node = kmalloc(sizeof(*used_node), GFP_KERNEL);
4643 + if (!used_node) {
4644 + spin_unlock_irq(&alloc->lock);
4645 + return -ENOMEM;
4646 + }
4647 + used_node->base = *result;
4648 + used_node->num = num;
4649 + used_node->refcount = 1;
4650 + used_node->is_alloced = 1;
4651 + list_add_tail(&used_node->list, &alloc->used);
4652 + spin_unlock_irq(&alloc->lock);
4653 + return (int)num;
4654 +}
4655 +
4656 +/* Allocate the list node using GFP_ATOMIC, because we *really* want to avoid
4657 + * forcing error-handling on to users in the deallocation path. */
4658 +static void _dpa_alloc_free(struct dpa_alloc *alloc, u32 base_id, u32 count)
4659 +{
4660 + struct alloc_node *i, *node = kmalloc(sizeof(*node), GFP_ATOMIC);
4661 + BUG_ON(!node);
4662 + DPRINT("release_range(%d,%d)\n", base_id, count);
4663 + DUMP(alloc);
4664 + BUG_ON(!count);
4665 + spin_lock_irq(&alloc->lock);
4666 +
4667 +
4668 + node->base = base_id;
4669 + node->num = count;
4670 + list_for_each_entry(i, &alloc->free, list) {
4671 + if (i->base >= node->base) {
4672 + /* BUG_ON(any overlapping) */
4673 + BUG_ON(i->base < (node->base + node->num));
4674 + list_add_tail(&node->list, &i->list);
4675 + goto done;
4676 + }
4677 + }
4678 + list_add_tail(&node->list, &alloc->free);
4679 +done:
4680 + /* Merge to the left */
4681 + i = list_entry(node->list.prev, struct alloc_node, list);
4682 + if (node->list.prev != &alloc->free) {
4683 + BUG_ON((i->base + i->num) > node->base);
4684 + if ((i->base + i->num) == node->base) {
4685 + node->base = i->base;
4686 + node->num += i->num;
4687 + list_del(&i->list);
4688 + kfree(i);
4689 + }
4690 + }
4691 + /* Merge to the right */
4692 + i = list_entry(node->list.next, struct alloc_node, list);
4693 + if (node->list.next != &alloc->free) {
4694 + BUG_ON((node->base + node->num) > i->base);
4695 + if ((node->base + node->num) == i->base) {
4696 + node->num += i->num;
4697 + list_del(&i->list);
4698 + kfree(i);
4699 + }
4700 + }
4701 + spin_unlock_irq(&alloc->lock);
4702 + DUMP(alloc);
4703 +}
4704 +
4705 +
4706 +void dpa_alloc_free(struct dpa_alloc *alloc, u32 base_id, u32 count)
4707 +{
4708 + struct alloc_node *i = NULL;
4709 + spin_lock_irq(&alloc->lock);
4710 +
4711 + /* First find the node in the used list and decrement its ref count */
4712 + list_for_each_entry(i, &alloc->used, list) {
4713 + if (i->base == base_id && i->num == count) {
4714 + --i->refcount;
4715 + if (i->refcount == 0) {
4716 + list_del(&i->list);
4717 + spin_unlock_irq(&alloc->lock);
4718 + if (i->is_alloced)
4719 + _dpa_alloc_free(alloc, base_id, count);
4720 + kfree(i);
4721 + return;
4722 + }
4723 + spin_unlock_irq(&alloc->lock);
4724 + return;
4725 + }
4726 + }
4727 + /* Couldn't find the allocation */
4728 + pr_err("Attempt to free ID 0x%x COUNT %d that wasn't alloc'd or reserved\n",
4729 + base_id, count);
4730 + spin_unlock_irq(&alloc->lock);
4731 +}
4732 +
4733 +void dpa_alloc_seed(struct dpa_alloc *alloc, u32 base_id, u32 count)
4734 +{
4735 + /* Same as free but no previous allocation checking is needed */
4736 + _dpa_alloc_free(alloc, base_id, count);
4737 +}
4738 +
4739 +
4740 +int dpa_alloc_reserve(struct dpa_alloc *alloc, u32 base, u32 num)
4741 +{
4742 + struct alloc_node *i = NULL, *used_node;
4743 +
4744 + DPRINT("alloc_reserve(%d,%d)\n", base, num);
4745 + DUMP(alloc);
4746 +
4747 + spin_lock_irq(&alloc->lock);
4748 +
4749 + /* Check for the node in the used list.
4750 + If found, increase it's refcount */
4751 + list_for_each_entry(i, &alloc->used, list) {
4752 + if ((i->base == base) && (i->num == num)) {
4753 + ++i->refcount;
4754 + spin_unlock_irq(&alloc->lock);
4755 + return 0;
4756 + }
4757 + if ((base >= i->base) && (base < (i->base + i->num))) {
4758 + /* This is an attempt to reserve a region that was
4759 + already reserved or alloced with a different
4760 + base or num */
4761 + pr_err("Cannot reserve %d - %d, it overlaps with"
4762 + " existing reservation from %d - %d\n",
4763 + base, base + num - 1, i->base,
4764 + i->base + i->num - 1);
4765 + spin_unlock_irq(&alloc->lock);
4766 + return -1;
4767 + }
4768 + }
4769 + /* Check to make sure this ID isn't in the free list */
4770 + list_for_each_entry(i, &alloc->free, list) {
4771 + if ((base >= i->base) && (base < (i->base + i->num))) {
4772 + /* yep, the reservation is within this node */
4773 + pr_err("Cannot reserve %d - %d, it overlaps with"
4774 + " free range %d - %d and must be alloced\n",
4775 + base, base + num - 1,
4776 + i->base, i->base + i->num - 1);
4777 + spin_unlock_irq(&alloc->lock);
4778 + return -1;
4779 + }
4780 + }
4781 + /* Add the allocation to the used list with a refcount of 1 */
4782 + used_node = kmalloc(sizeof(*used_node), GFP_KERNEL);
4783 + if (!used_node) {
4784 + spin_unlock_irq(&alloc->lock);
4785 + return -ENOMEM;
4786 +
4787 + }
4788 + used_node->base = base;
4789 + used_node->num = num;
4790 + used_node->refcount = 1;
4791 + used_node->is_alloced = 0;
4792 + list_add_tail(&used_node->list, &alloc->used);
4793 + spin_unlock_irq(&alloc->lock);
4794 + return 0;
4795 +}
4796 +
4797 +
4798 +int dpa_alloc_pop(struct dpa_alloc *alloc, u32 *result, u32 *count)
4799 +{
4800 + struct alloc_node *i = NULL;
4801 + DPRINT("alloc_pop()\n");
4802 + DUMP(alloc);
4803 + spin_lock_irq(&alloc->lock);
4804 + if (!list_empty(&alloc->free)) {
4805 + i = list_entry(alloc->free.next, struct alloc_node, list);
4806 + list_del(&i->list);
4807 + }
4808 + spin_unlock_irq(&alloc->lock);
4809 + DPRINT("returning %d\n", i ? 0 : -ENOMEM);
4810 + DUMP(alloc);
4811 + if (!i)
4812 + return -ENOMEM;
4813 + *result = i->base;
4814 + *count = i->num;
4815 + kfree(i);
4816 + return 0;
4817 +}
4818 +
4819 +int dpa_alloc_check(struct dpa_alloc *list_head, u32 item)
4820 +{
4821 + struct alloc_node *i = NULL;
4822 + int res = 0;
4823 + DPRINT("alloc_check()\n");
4824 + spin_lock_irq(&list_head->lock);
4825 +
4826 + list_for_each_entry(i, &list_head->free, list) {
4827 + if ((item >= i->base) && (item < (i->base + i->num))) {
4828 + res = 1;
4829 + break;
4830 + }
4831 + }
4832 + spin_unlock_irq(&list_head->lock);
4833 + return res;
4834 +}
4835 --- /dev/null
4836 +++ b/drivers/staging/fsl_qbman/dpa_sys.h
4837 @@ -0,0 +1,259 @@
4838 +/* Copyright 2008-2012 Freescale Semiconductor, Inc.
4839 + *
4840 + * Redistribution and use in source and binary forms, with or without
4841 + * modification, are permitted provided that the following conditions are met:
4842 + * * Redistributions of source code must retain the above copyright
4843 + * notice, this list of conditions and the following disclaimer.
4844 + * * Redistributions in binary form must reproduce the above copyright
4845 + * notice, this list of conditions and the following disclaimer in the
4846 + * documentation and/or other materials provided with the distribution.
4847 + * * Neither the name of Freescale Semiconductor nor the
4848 + * names of its contributors may be used to endorse or promote products
4849 + * derived from this software without specific prior written permission.
4850 + *
4851 + *
4852 + * ALTERNATIVELY, this software may be distributed under the terms of the
4853 + * GNU General Public License ("GPL") as published by the Free Software
4854 + * Foundation, either version 2 of that License or (at your option) any
4855 + * later version.
4856 + *
4857 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
4858 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
4859 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
4860 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
4861 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
4862 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
4863 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
4864 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
4865 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
4866 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
4867 + */
4868 +
4869 +#ifndef DPA_SYS_H
4870 +#define DPA_SYS_H
4871 +
4872 +#include <linux/kernel.h>
4873 +#include <linux/errno.h>
4874 +#include <linux/io.h>
4875 +#include <linux/dma-mapping.h>
4876 +#include <linux/bootmem.h>
4877 +#include <linux/slab.h>
4878 +#include <linux/module.h>
4879 +#include <linux/init.h>
4880 +#include <linux/interrupt.h>
4881 +#include <linux/delay.h>
4882 +#include <linux/of_platform.h>
4883 +#include <linux/of_address.h>
4884 +#include <linux/of_irq.h>
4885 +#include <linux/kthread.h>
4886 +#include <linux/memblock.h>
4887 +#include <linux/completion.h>
4888 +#include <linux/log2.h>
4889 +#include <linux/types.h>
4890 +#include <linux/ioctl.h>
4891 +#include <linux/miscdevice.h>
4892 +#include <linux/uaccess.h>
4893 +#include <linux/debugfs.h>
4894 +#include <linux/seq_file.h>
4895 +#include <linux/device.h>
4896 +#include <linux/uio_driver.h>
4897 +#include <linux/smp.h>
4898 +#include <linux/fsl_hypervisor.h>
4899 +#include <linux/vmalloc.h>
4900 +#include <linux/ctype.h>
4901 +#include <linux/math64.h>
4902 +#include <linux/bitops.h>
4903 +
4904 +#include <linux/fsl_usdpaa.h>
4905 +
4906 +/* When copying aligned words or shorts, try to avoid memcpy() */
4907 +#define CONFIG_TRY_BETTER_MEMCPY
4908 +
4909 +/* For 2-element tables related to cache-inhibited and cache-enabled mappings */
4910 +#define DPA_PORTAL_CE 0
4911 +#define DPA_PORTAL_CI 1
4912 +
4913 +/***********************/
4914 +/* Misc inline assists */
4915 +/***********************/
4916 +
4917 +#if defined CONFIG_PPC32
4918 +#include "dpa_sys_ppc32.h"
4919 +#elif defined CONFIG_PPC64
4920 +#include "dpa_sys_ppc64.h"
4921 +#elif defined CONFIG_ARM
4922 +#include "dpa_sys_arm.h"
4923 +#elif defined CONFIG_ARM64
4924 +#include "dpa_sys_arm64.h"
4925 +#endif
4926 +
4927 +
4928 +#ifdef CONFIG_FSL_DPA_CHECKING
4929 +#define DPA_ASSERT(x) \
4930 + do { \
4931 + if (!(x)) { \
4932 + pr_crit("ASSERT: (%s:%d) %s\n", __FILE__, __LINE__, \
4933 + __stringify_1(x)); \
4934 + dump_stack(); \
4935 + panic("assertion failure"); \
4936 + } \
4937 + } while (0)
4938 +#else
4939 +#define DPA_ASSERT(x)
4940 +#endif
4941 +
4942 +/* memcpy() stuff - when you know alignments in advance */
4943 +#ifdef CONFIG_TRY_BETTER_MEMCPY
4944 +static inline void copy_words(void *dest, const void *src, size_t sz)
4945 +{
4946 + u32 *__dest = dest;
4947 + const u32 *__src = src;
4948 + size_t __sz = sz >> 2;
4949 + BUG_ON((unsigned long)dest & 0x3);
4950 + BUG_ON((unsigned long)src & 0x3);
4951 + BUG_ON(sz & 0x3);
4952 + while (__sz--)
4953 + *(__dest++) = *(__src++);
4954 +}
4955 +static inline void copy_shorts(void *dest, const void *src, size_t sz)
4956 +{
4957 + u16 *__dest = dest;
4958 + const u16 *__src = src;
4959 + size_t __sz = sz >> 1;
4960 + BUG_ON((unsigned long)dest & 0x1);
4961 + BUG_ON((unsigned long)src & 0x1);
4962 + BUG_ON(sz & 0x1);
4963 + while (__sz--)
4964 + *(__dest++) = *(__src++);
4965 +}
4966 +static inline void copy_bytes(void *dest, const void *src, size_t sz)
4967 +{
4968 + u8 *__dest = dest;
4969 + const u8 *__src = src;
4970 + while (sz--)
4971 + *(__dest++) = *(__src++);
4972 +}
4973 +#else
4974 +#define copy_words memcpy
4975 +#define copy_shorts memcpy
4976 +#define copy_bytes memcpy
4977 +#endif
4978 +
4979 +/************/
4980 +/* RB-trees */
4981 +/************/
4982 +
4983 +/* We encapsulate RB-trees so that its easier to use non-linux forms in
4984 + * non-linux systems. This also encapsulates the extra plumbing that linux code
4985 + * usually provides when using RB-trees. This encapsulation assumes that the
4986 + * data type held by the tree is u32. */
4987 +
4988 +struct dpa_rbtree {
4989 + struct rb_root root;
4990 +};
4991 +#define DPA_RBTREE { .root = RB_ROOT }
4992 +
4993 +static inline void dpa_rbtree_init(struct dpa_rbtree *tree)
4994 +{
4995 + tree->root = RB_ROOT;
4996 +}
4997 +
4998 +#define IMPLEMENT_DPA_RBTREE(name, type, node_field, val_field) \
4999 +static inline int name##_push(struct dpa_rbtree *tree, type *obj) \
5000 +{ \
5001 + struct rb_node *parent = NULL, **p = &tree->root.rb_node; \
5002 + while (*p) { \
5003 + u32 item; \
5004 + parent = *p; \
5005 + item = rb_entry(parent, type, node_field)->val_field; \
5006 + if (obj->val_field < item) \
5007 + p = &parent->rb_left; \
5008 + else if (obj->val_field > item) \
5009 + p = &parent->rb_right; \
5010 + else \
5011 + return -EBUSY; \
5012 + } \
5013 + rb_link_node(&obj->node_field, parent, p); \
5014 + rb_insert_color(&obj->node_field, &tree->root); \
5015 + return 0; \
5016 +} \
5017 +static inline void name##_del(struct dpa_rbtree *tree, type *obj) \
5018 +{ \
5019 + rb_erase(&obj->node_field, &tree->root); \
5020 +} \
5021 +static inline type *name##_find(struct dpa_rbtree *tree, u32 val) \
5022 +{ \
5023 + type *ret; \
5024 + struct rb_node *p = tree->root.rb_node; \
5025 + while (p) { \
5026 + ret = rb_entry(p, type, node_field); \
5027 + if (val < ret->val_field) \
5028 + p = p->rb_left; \
5029 + else if (val > ret->val_field) \
5030 + p = p->rb_right; \
5031 + else \
5032 + return ret; \
5033 + } \
5034 + return NULL; \
5035 +}
5036 +
5037 +/************/
5038 +/* Bootargs */
5039 +/************/
5040 +
5041 +/* Qman has "qportals=" and Bman has "bportals=", they use the same syntax
5042 + * though; a comma-separated list of items, each item being a cpu index and/or a
5043 + * range of cpu indices, and each item optionally be prefixed by "s" to indicate
5044 + * that the portal associated with that cpu should be shared. See bman_driver.c
5045 + * for more specifics. */
5046 +static int __parse_portals_cpu(const char **s, unsigned int *cpu)
5047 +{
5048 + *cpu = 0;
5049 + if (!isdigit(**s))
5050 + return -EINVAL;
5051 + while (isdigit(**s))
5052 + *cpu = *cpu * 10 + (*((*s)++) - '0');
5053 + return 0;
5054 +}
5055 +static inline int parse_portals_bootarg(char *str, struct cpumask *want_shared,
5056 + struct cpumask *want_unshared,
5057 + const char *argname)
5058 +{
5059 + const char *s = str;
5060 + unsigned int shared, cpu1, cpu2, loop;
5061 +
5062 +keep_going:
5063 + if (*s == 's') {
5064 + shared = 1;
5065 + s++;
5066 + } else
5067 + shared = 0;
5068 + if (__parse_portals_cpu(&s, &cpu1))
5069 + goto err;
5070 + if (*s == '-') {
5071 + s++;
5072 + if (__parse_portals_cpu(&s, &cpu2))
5073 + goto err;
5074 + if (cpu2 < cpu1)
5075 + goto err;
5076 + } else
5077 + cpu2 = cpu1;
5078 + for (loop = cpu1; loop <= cpu2; loop++)
5079 + cpumask_set_cpu(loop, shared ? want_shared : want_unshared);
5080 + if (*s == ',') {
5081 + s++;
5082 + goto keep_going;
5083 + } else if ((*s == '\0') || isspace(*s))
5084 + return 0;
5085 +err:
5086 + pr_crit("Malformed %s argument: %s, offset: %lu\n", argname, str,
5087 + (unsigned long)s - (unsigned long)str);
5088 + return -EINVAL;
5089 +}
5090 +#ifdef CONFIG_FSL_USDPAA
5091 +/* Hooks from fsl_usdpaa_irq.c to fsl_usdpaa.c */
5092 +int usdpaa_get_portal_config(struct file *filp, void *cinh,
5093 + enum usdpaa_portal_type ptype, unsigned int *irq,
5094 + void **iir_reg);
5095 +#endif
5096 +#endif /* DPA_SYS_H */
5097 --- /dev/null
5098 +++ b/drivers/staging/fsl_qbman/dpa_sys_arm.h
5099 @@ -0,0 +1,95 @@
5100 +/* Copyright 2016 Freescale Semiconductor, Inc.
5101 + *
5102 + * Redistribution and use in source and binary forms, with or without
5103 + * modification, are permitted provided that the following conditions are met:
5104 + * * Redistributions of source code must retain the above copyright
5105 + * notice, this list of conditions and the following disclaimer.
5106 + * * Redistributions in binary form must reproduce the above copyright
5107 + * notice, this list of conditions and the following disclaimer in the
5108 + * documentation and/or other materials provided with the distribution.
5109 + * * Neither the name of Freescale Semiconductor nor the
5110 + * names of its contributors may be used to endorse or promote products
5111 + * derived from this software without specific prior written permission.
5112 + *
5113 + *
5114 + * ALTERNATIVELY, this software may be distributed under the terms of the
5115 + * GNU General Public License ("GPL") as published by the Free Software
5116 + * Foundation, either version 2 of that License or (at your option) any
5117 + * later version.
5118 + *
5119 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
5120 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
5121 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
5122 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
5123 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
5124 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
5125 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
5126 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
5127 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5128 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5129 + */
5130 +
5131 +#ifndef DPA_SYS_ARM_H
5132 +#define DPA_SYS_ARM_H
5133 +
5134 +#include <asm/cacheflush.h>
5135 +#include <asm/barrier.h>
5136 +
5137 +/* Implementation of ARM specific routines */
5138 +
5139 +/* TODO: NB, we currently assume that hwsync() and lwsync() imply compiler
5140 + * barriers and that dcb*() won't fall victim to compiler or execution
5141 + * reordering with respect to other code/instructions that manipulate the same
5142 + * cacheline. */
5143 +#define hwsync() { asm volatile("dmb st" : : : "memory"); }
5144 +#define lwsync() { asm volatile("dmb st" : : : "memory"); }
5145 +#define dcbf(p) { asm volatile("mcr p15, 0, %0, c7, c10, 1" : : "r" (p) : "memory"); }
5146 +#define dcbt_ro(p) { asm volatile("pld [%0, #64];": : "r" (p)); }
5147 +#define dcbt_rw(p) { asm volatile("pldw [%0, #64];": : "r" (p)); }
5148 +#define dcbi(p) { asm volatile("mcr p15, 0, %0, c7, c6, 1" : : "r" (p) : "memory"); }
5149 +
5150 +#define dcbz_64(p) { memset(p, 0, sizeof(*p)); }
5151 +
5152 +#define dcbf_64(p) \
5153 + do { \
5154 + dcbf((u32)p); \
5155 + } while (0)
5156 +/* Commonly used combo */
5157 +#define dcbit_ro(p) \
5158 + do { \
5159 + dcbi((u32)p); \
5160 + dcbt_ro((u32)p); \
5161 + } while (0)
5162 +
5163 +static inline u64 mfatb(void)
5164 +{
5165 + return get_cycles();
5166 +}
5167 +
5168 +static inline u32 in_be32(volatile void *addr)
5169 +{
5170 + return be32_to_cpu(*((volatile u32 *) addr));
5171 +}
5172 +
5173 +static inline void out_be32(void *addr, u32 val)
5174 +{
5175 + *((u32 *) addr) = cpu_to_be32(val);
5176 +}
5177 +
5178 +
5179 +static inline void set_bits(unsigned long mask, volatile unsigned long *p)
5180 +{
5181 + *p |= mask;
5182 +}
5183 +static inline void clear_bits(unsigned long mask, volatile unsigned long *p)
5184 +{
5185 + *p &= ~mask;
5186 +}
5187 +
5188 +static inline void flush_dcache_range(unsigned long start, unsigned long stop)
5189 +{
5190 + __cpuc_flush_dcache_area((void *) start, stop - start);
5191 +}
5192 +
5193 +#define hard_smp_processor_id() raw_smp_processor_id()
5194 +#endif
5195 --- /dev/null
5196 +++ b/drivers/staging/fsl_qbman/dpa_sys_arm64.h
5197 @@ -0,0 +1,102 @@
5198 +/* Copyright 2014 Freescale Semiconductor, Inc.
5199 + *
5200 + * Redistribution and use in source and binary forms, with or without
5201 + * modification, are permitted provided that the following conditions are met:
5202 + * * Redistributions of source code must retain the above copyright
5203 + * notice, this list of conditions and the following disclaimer.
5204 + * * Redistributions in binary form must reproduce the above copyright
5205 + * notice, this list of conditions and the following disclaimer in the
5206 + * documentation and/or other materials provided with the distribution.
5207 + * * Neither the name of Freescale Semiconductor nor the
5208 + * names of its contributors may be used to endorse or promote products
5209 + * derived from this software without specific prior written permission.
5210 + *
5211 + *
5212 + * ALTERNATIVELY, this software may be distributed under the terms of the
5213 + * GNU General Public License ("GPL") as published by the Free Software
5214 + * Foundation, either version 2 of that License or (at your option) any
5215 + * later version.
5216 + *
5217 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
5218 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
5219 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
5220 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
5221 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
5222 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
5223 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
5224 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
5225 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5226 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5227 + */
5228 +
5229 +#ifndef DPA_SYS_ARM64_H
5230 +#define DPA_SYS_ARM64_H
5231 +
5232 +#include <asm/cacheflush.h>
5233 +#include <asm/barrier.h>
5234 +
5235 +/* Implementation of ARM 64 bit specific routines */
5236 +
5237 +/* TODO: NB, we currently assume that hwsync() and lwsync() imply compiler
5238 + * barriers and that dcb*() won't fall victim to compiler or execution
5239 + * reordering with respect to other code/instructions that manipulate the same
5240 + * cacheline. */
5241 +#define hwsync() { asm volatile("dmb st" : : : "memory"); }
5242 +#define lwsync() { asm volatile("dmb st" : : : "memory"); }
5243 +#define dcbf(p) { asm volatile("dc cvac, %0;" : : "r" (p) : "memory"); }
5244 +#define dcbt_ro(p) { asm volatile("prfm pldl1keep, [%0, #0]" : : "r" (p)); }
5245 +#define dcbt_rw(p) { asm volatile("prfm pstl1keep, [%0, #0]" : : "r" (p)); }
5246 +#define dcbi(p) { asm volatile("dc ivac, %0" : : "r"(p) : "memory"); }
5247 +#define dcbz(p) { asm volatile("dc zva, %0" : : "r" (p) : "memory"); }
5248 +
5249 +#define dcbz_64(p) \
5250 + do { \
5251 + dcbz(p); \
5252 + } while (0)
5253 +
5254 +#define dcbf_64(p) \
5255 + do { \
5256 + dcbf(p); \
5257 + } while (0)
5258 +/* Commonly used combo */
5259 +#define dcbit_ro(p) \
5260 + do { \
5261 + dcbi(p); \
5262 + dcbt_ro(p); \
5263 + } while (0)
5264 +
5265 +static inline u64 mfatb(void)
5266 +{
5267 + return get_cycles();
5268 +}
5269 +
5270 +static inline u32 in_be32(volatile void *addr)
5271 +{
5272 + return be32_to_cpu(*((volatile u32 *) addr));
5273 +}
5274 +
5275 +static inline void out_be32(void *addr, u32 val)
5276 +{
5277 + *((u32 *) addr) = cpu_to_be32(val);
5278 +}
5279 +
5280 +
5281 +static inline void set_bits(unsigned long mask, volatile unsigned long *p)
5282 +{
5283 + *p |= mask;
5284 +}
5285 +static inline void clear_bits(unsigned long mask, volatile unsigned long *p)
5286 +{
5287 + *p &= ~mask;
5288 +}
5289 +
5290 +static inline void flush_dcache_range(unsigned long start, unsigned long stop)
5291 +{
5292 + __flush_dcache_area((void *) start, stop - start);
5293 +}
5294 +
5295 +#define hard_smp_processor_id() raw_smp_processor_id()
5296 +
5297 +
5298 +
5299 +#endif
5300 --- /dev/null
5301 +++ b/drivers/staging/fsl_qbman/dpa_sys_ppc32.h
5302 @@ -0,0 +1,70 @@
5303 +/* Copyright 2014 Freescale Semiconductor, Inc.
5304 + *
5305 + * Redistribution and use in source and binary forms, with or without
5306 + * modification, are permitted provided that the following conditions are met:
5307 + * * Redistributions of source code must retain the above copyright
5308 + * notice, this list of conditions and the following disclaimer.
5309 + * * Redistributions in binary form must reproduce the above copyright
5310 + * notice, this list of conditions and the following disclaimer in the
5311 + * documentation and/or other materials provided with the distribution.
5312 + * * Neither the name of Freescale Semiconductor nor the
5313 + * names of its contributors may be used to endorse or promote products
5314 + * derived from this software without specific prior written permission.
5315 + *
5316 + *
5317 + * ALTERNATIVELY, this software may be distributed under the terms of the
5318 + * GNU General Public License ("GPL") as published by the Free Software
5319 + * Foundation, either version 2 of that License or (at your option) any
5320 + * later version.
5321 + *
5322 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
5323 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
5324 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
5325 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
5326 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
5327 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
5328 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
5329 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
5330 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5331 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5332 + */
5333 +
5334 +#ifndef DPA_SYS_PPC32_H
5335 +#define DPA_SYS_PPC32_H
5336 +
5337 +/* Implementation of PowerPC 32 bit specific routines */
5338 +
5339 +/* TODO: NB, we currently assume that hwsync() and lwsync() imply compiler
5340 + * barriers and that dcb*() won't fall victim to compiler or execution
5341 + * reordering with respect to other code/instructions that manipulate the same
5342 + * cacheline. */
5343 +#define hwsync() __asm__ __volatile__ ("sync" : : : "memory")
5344 +#define lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : : "memory")
5345 +#define dcbf(p) __asm__ __volatile__ ("dcbf 0,%0" : : "r" (p) : "memory")
5346 +#define dcbt_ro(p) __asm__ __volatile__ ("dcbt 0,%0" : : "r" (p))
5347 +#define dcbt_rw(p) __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (p))
5348 +#define dcbi(p) dcbf(p)
5349 +
5350 +#define dcbzl(p) __asm__ __volatile__ ("dcbzl 0,%0" : : "r" (p))
5351 +#define dcbz_64(p) dcbzl(p)
5352 +#define dcbf_64(p) dcbf(p)
5353 +
5354 +/* Commonly used combo */
5355 +#define dcbit_ro(p) \
5356 + do { \
5357 + dcbi(p); \
5358 + dcbt_ro(p); \
5359 + } while (0)
5360 +
5361 +static inline u64 mfatb(void)
5362 +{
5363 + u32 hi, lo, chk;
5364 + do {
5365 + hi = mfspr(SPRN_ATBU);
5366 + lo = mfspr(SPRN_ATBL);
5367 + chk = mfspr(SPRN_ATBU);
5368 + } while (unlikely(hi != chk));
5369 + return ((u64)hi << 32) | (u64)lo;
5370 +}
5371 +
5372 +#endif
5373 --- /dev/null
5374 +++ b/drivers/staging/fsl_qbman/dpa_sys_ppc64.h
5375 @@ -0,0 +1,79 @@
5376 +/* Copyright 2014 Freescale Semiconductor, Inc.
5377 + *
5378 + * Redistribution and use in source and binary forms, with or without
5379 + * modification, are permitted provided that the following conditions are met:
5380 + * * Redistributions of source code must retain the above copyright
5381 + * notice, this list of conditions and the following disclaimer.
5382 + * * Redistributions in binary form must reproduce the above copyright
5383 + * notice, this list of conditions and the following disclaimer in the
5384 + * documentation and/or other materials provided with the distribution.
5385 + * * Neither the name of Freescale Semiconductor nor the
5386 + * names of its contributors may be used to endorse or promote products
5387 + * derived from this software without specific prior written permission.
5388 + *
5389 + *
5390 + * ALTERNATIVELY, this software may be distributed under the terms of the
5391 + * GNU General Public License ("GPL") as published by the Free Software
5392 + * Foundation, either version 2 of that License or (at your option) any
5393 + * later version.
5394 + *
5395 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
5396 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
5397 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
5398 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
5399 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
5400 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
5401 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
5402 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
5403 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5404 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5405 + */
5406 +
5407 +#ifndef DPA_SYS_PPC64_H
5408 +#define DPA_SYS_PPC64_H
5409 +
5410 +/* Implementation of PowerPC 64 bit specific routines */
5411 +
5412 +/* TODO: NB, we currently assume that hwsync() and lwsync() imply compiler
5413 + * barriers and that dcb*() won't fall victim to compiler or execution
5414 + * reordering with respect to other code/instructions that manipulate the same
5415 + * cacheline. */
5416 +#define hwsync() __asm__ __volatile__ ("sync" : : : "memory")
5417 +#define lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : : "memory")
5418 +#define dcbf(p) __asm__ __volatile__ ("dcbf 0,%0" : : "r" (p) : "memory")
5419 +#define dcbt_ro(p) __asm__ __volatile__ ("dcbt 0,%0" : : "r" (p))
5420 +#define dcbt_rw(p) __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (p))
5421 +#define dcbi(p) dcbf(p)
5422 +
5423 +#define dcbz(p) __asm__ __volatile__ ("dcbz 0,%0" : : "r" (p))
5424 +#define dcbz_64(p) \
5425 + do { \
5426 + dcbz((void*)p + 32); \
5427 + dcbz(p); \
5428 + } while (0)
5429 +#define dcbf_64(p) \
5430 + do { \
5431 + dcbf((void*)p + 32); \
5432 + dcbf(p); \
5433 + } while (0)
5434 +/* Commonly used combo */
5435 +#define dcbit_ro(p) \
5436 + do { \
5437 + dcbi(p); \
5438 + dcbi((void*)p + 32); \
5439 + dcbt_ro(p); \
5440 + dcbt_ro((void*)p + 32); \
5441 + } while (0)
5442 +
5443 +static inline u64 mfatb(void)
5444 +{
5445 + u32 hi, lo, chk;
5446 + do {
5447 + hi = mfspr(SPRN_ATBU);
5448 + lo = mfspr(SPRN_ATBL);
5449 + chk = mfspr(SPRN_ATBU);
5450 + } while (unlikely(hi != chk));
5451 + return ((u64)hi << 32) | (u64)lo;
5452 +}
5453 +
5454 +#endif
5455 --- /dev/null
5456 +++ b/drivers/staging/fsl_qbman/fsl_usdpaa.c
5457 @@ -0,0 +1,1984 @@
5458 +/* Copyright (C) 2008-2012 Freescale Semiconductor, Inc.
5459 + * Authors: Andy Fleming <afleming@freescale.com>
5460 + * Timur Tabi <timur@freescale.com>
5461 + * Geoff Thorpe <Geoff.Thorpe@freescale.com>
5462 + *
5463 + * This file is licensed under the terms of the GNU General Public License
5464 + * version 2. This program is licensed "as is" without any warranty of any
5465 + * kind, whether express or implied.
5466 + */
5467 +
5468 +
5469 +#include <linux/miscdevice.h>
5470 +#include <linux/fs.h>
5471 +#include <linux/cdev.h>
5472 +#include <linux/mm.h>
5473 +#include <linux/of.h>
5474 +#include <linux/memblock.h>
5475 +#include <linux/slab.h>
5476 +#include <linux/mman.h>
5477 +#include <linux/of_reserved_mem.h>
5478 +
5479 +#if !(defined(CONFIG_ARM) || defined(CONFIG_ARM64))
5480 +#include <mm/mmu_decl.h>
5481 +#endif
5482 +
5483 +#include "dpa_sys.h"
5484 +#include <linux/fsl_usdpaa.h>
5485 +#include "bman_low.h"
5486 +#include "qman_low.h"
5487 +
5488 +/* Physical address range of the memory reservation, exported for mm/mem.c */
5489 +static u64 phys_start;
5490 +static u64 phys_size;
5491 +static u64 arg_phys_size;
5492 +
5493 +/* PFN versions of the above */
5494 +static unsigned long pfn_start;
5495 +static unsigned long pfn_size;
5496 +
5497 +/* Memory reservations are manipulated under this spinlock (which is why 'refs'
5498 + * isn't atomic_t). */
5499 +static DEFINE_SPINLOCK(mem_lock);
5500 +
5501 +/* The range of TLB1 indices */
5502 +static unsigned int first_tlb;
5503 +static unsigned int num_tlb = 1;
5504 +static unsigned int current_tlb; /* loops around for fault handling */
5505 +
5506 +/* Memory reservation is represented as a list of 'mem_fragment's, some of which
5507 + * may be mapped. Unmapped fragments are always merged where possible. */
5508 +static LIST_HEAD(mem_list);
5509 +
5510 +struct mem_mapping;
5511 +
5512 +/* Memory fragments are in 'mem_list'. */
5513 +struct mem_fragment {
5514 + u64 base;
5515 + u64 len;
5516 + unsigned long pfn_base; /* PFN version of 'base' */
5517 + unsigned long pfn_len; /* PFN version of 'len' */
5518 + unsigned int refs; /* zero if unmapped */
5519 + u64 root_len; /* Size of the orignal fragment */
5520 + unsigned long root_pfn; /* PFN of the orignal fragment */
5521 + struct list_head list;
5522 + /* if mapped, flags+name captured at creation time */
5523 + u32 flags;
5524 + char name[USDPAA_DMA_NAME_MAX];
5525 + u64 map_len;
5526 + /* support multi-process locks per-memory-fragment. */
5527 + int has_locking;
5528 + wait_queue_head_t wq;
5529 + struct mem_mapping *owner;
5530 +};
5531 +
5532 +/* Mappings of memory fragments in 'struct ctx'. These are created from
5533 + * ioctl(USDPAA_IOCTL_DMA_MAP), though the actual mapping then happens via a
5534 + * mmap(). */
5535 +struct mem_mapping {
5536 + struct mem_fragment *root_frag;
5537 + u32 frag_count;
5538 + u64 total_size;
5539 + struct list_head list;
5540 + int refs;
5541 + void *virt_addr;
5542 +};
5543 +
5544 +struct portal_mapping {
5545 + struct usdpaa_ioctl_portal_map user;
5546 + union {
5547 + struct qm_portal_config *qportal;
5548 + struct bm_portal_config *bportal;
5549 + };
5550 + /* Declare space for the portals in case the process
5551 + exits unexpectedly and needs to be cleaned by the kernel */
5552 + union {
5553 + struct qm_portal qman_portal_low;
5554 + struct bm_portal bman_portal_low;
5555 + };
5556 + struct list_head list;
5557 + struct resource *phys;
5558 + struct iommu_domain *iommu_domain;
5559 +};
5560 +
5561 +/* Track the DPAA resources the process is using */
5562 +struct active_resource {
5563 + struct list_head list;
5564 + u32 id;
5565 + u32 num;
5566 + unsigned int refcount;
5567 +};
5568 +
5569 +/* Per-FD state (which should also be per-process but we don't enforce that) */
5570 +struct ctx {
5571 + /* Lock to protect the context */
5572 + spinlock_t lock;
5573 + /* Allocated resources get put here for accounting */
5574 + struct list_head resources[usdpaa_id_max];
5575 + /* list of DMA maps */
5576 + struct list_head maps;
5577 + /* list of portal maps */
5578 + struct list_head portals;
5579 +};
5580 +
5581 +/* Different resource classes */
5582 +static const struct alloc_backend {
5583 + enum usdpaa_id_type id_type;
5584 + int (*alloc)(u32 *, u32, u32, int);
5585 + void (*release)(u32 base, unsigned int count);
5586 + int (*reserve)(u32 base, unsigned int count);
5587 + const char *acronym;
5588 +} alloc_backends[] = {
5589 + {
5590 + .id_type = usdpaa_id_fqid,
5591 + .alloc = qman_alloc_fqid_range,
5592 + .release = qman_release_fqid_range,
5593 + .reserve = qman_reserve_fqid_range,
5594 + .acronym = "FQID"
5595 + },
5596 + {
5597 + .id_type = usdpaa_id_bpid,
5598 + .alloc = bman_alloc_bpid_range,
5599 + .release = bman_release_bpid_range,
5600 + .reserve = bman_reserve_bpid_range,
5601 + .acronym = "BPID"
5602 + },
5603 + {
5604 + .id_type = usdpaa_id_qpool,
5605 + .alloc = qman_alloc_pool_range,
5606 + .release = qman_release_pool_range,
5607 + .reserve = qman_reserve_pool_range,
5608 + .acronym = "QPOOL"
5609 + },
5610 + {
5611 + .id_type = usdpaa_id_cgrid,
5612 + .alloc = qman_alloc_cgrid_range,
5613 + .release = qman_release_cgrid_range,
5614 + .acronym = "CGRID"
5615 + },
5616 + {
5617 + .id_type = usdpaa_id_ceetm0_lfqid,
5618 + .alloc = qman_alloc_ceetm0_lfqid_range,
5619 + .release = qman_release_ceetm0_lfqid_range,
5620 + .acronym = "CEETM0_LFQID"
5621 + },
5622 + {
5623 + .id_type = usdpaa_id_ceetm0_channelid,
5624 + .alloc = qman_alloc_ceetm0_channel_range,
5625 + .release = qman_release_ceetm0_channel_range,
5626 + .acronym = "CEETM0_LFQID"
5627 + },
5628 + {
5629 + .id_type = usdpaa_id_ceetm1_lfqid,
5630 + .alloc = qman_alloc_ceetm1_lfqid_range,
5631 + .release = qman_release_ceetm1_lfqid_range,
5632 + .acronym = "CEETM1_LFQID"
5633 + },
5634 + {
5635 + .id_type = usdpaa_id_ceetm1_channelid,
5636 + .alloc = qman_alloc_ceetm1_channel_range,
5637 + .release = qman_release_ceetm1_channel_range,
5638 + .acronym = "CEETM1_LFQID"
5639 + },
5640 + {
5641 + /* This terminates the array */
5642 + .id_type = usdpaa_id_max
5643 + }
5644 +};
5645 +
5646 +/* Determines the largest acceptable page size for a given size
5647 + The sizes are determined by what the TLB1 acceptable page sizes are */
5648 +static u32 largest_page_size(u32 size)
5649 +{
5650 + int shift = 30; /* Start at 1G size */
5651 + if (size < 4096)
5652 + return 0;
5653 + do {
5654 + if (size >= (1<<shift))
5655 + return 1<<shift;
5656 + shift -= 2;
5657 + } while (shift >= 12); /* Up to 4k */
5658 + return 0;
5659 +}
5660 +
5661 +/* Determine if value is power of 4 */
5662 +static inline bool is_power_of_4(u64 x)
5663 +{
5664 + if (x == 0 || ((x & (x - 1)) != 0))
5665 + return false;
5666 + return !!(x & 0x5555555555555555ull);
5667 +}
5668 +
5669 +/* Helper for ioctl_dma_map() when we have a larger fragment than we need. This
5670 + * splits the fragment into 4 and returns the upper-most. (The caller can loop
5671 + * until it has a suitable fragment size.) */
5672 +static struct mem_fragment *split_frag(struct mem_fragment *frag)
5673 +{
5674 + struct mem_fragment *x[3];
5675 +
5676 + x[0] = kmalloc(sizeof(struct mem_fragment), GFP_ATOMIC);
5677 + x[1] = kmalloc(sizeof(struct mem_fragment), GFP_ATOMIC);
5678 + x[2] = kmalloc(sizeof(struct mem_fragment), GFP_ATOMIC);
5679 + if (!x[0] || !x[1] || !x[2]) {
5680 + kfree(x[0]);
5681 + kfree(x[1]);
5682 + kfree(x[2]);
5683 + return NULL;
5684 + }
5685 + BUG_ON(frag->refs);
5686 + frag->len >>= 2;
5687 + frag->pfn_len >>= 2;
5688 + x[0]->base = frag->base + frag->len;
5689 + x[1]->base = x[0]->base + frag->len;
5690 + x[2]->base = x[1]->base + frag->len;
5691 + x[0]->len = x[1]->len = x[2]->len = frag->len;
5692 + x[0]->pfn_base = frag->pfn_base + frag->pfn_len;
5693 + x[1]->pfn_base = x[0]->pfn_base + frag->pfn_len;
5694 + x[2]->pfn_base = x[1]->pfn_base + frag->pfn_len;
5695 + x[0]->pfn_len = x[1]->pfn_len = x[2]->pfn_len = frag->pfn_len;
5696 + x[0]->refs = x[1]->refs = x[2]->refs = 0;
5697 + x[0]->root_len = x[1]->root_len = x[2]->root_len = frag->root_len;
5698 + x[0]->root_pfn = x[1]->root_pfn = x[2]->root_pfn = frag->root_pfn;
5699 + x[0]->name[0] = x[1]->name[0] = x[2]->name[0] = 0;
5700 + list_add_tail(&x[0]->list, &frag->list);
5701 + list_add_tail(&x[1]->list, &x[0]->list);
5702 + list_add_tail(&x[2]->list, &x[1]->list);
5703 + return x[2];
5704 +}
5705 +
5706 +static __maybe_unused void dump_frags(void)
5707 +{
5708 + struct mem_fragment *frag;
5709 + int i = 0;
5710 + list_for_each_entry(frag, &mem_list, list) {
5711 + pr_info("FRAG %d: base 0x%llx pfn_base 0x%lx len 0x%llx root_len 0x%llx root_pfn 0x%lx refs %d name %s\n",
5712 + i, frag->base, frag->pfn_base,
5713 + frag->len, frag->root_len, frag->root_pfn,
5714 + frag->refs, frag->name);
5715 + ++i;
5716 + }
5717 +}
5718 +
5719 +/* Walk the list of fragments and adjoin neighbouring segments if possible */
5720 +static void compress_frags(void)
5721 +{
5722 + /* Walk the fragment list and combine fragments */
5723 + struct mem_fragment *frag, *nxtfrag;
5724 + u64 len = 0;
5725 +
5726 + int i, numfrags;
5727 +
5728 +
5729 + frag = list_entry(mem_list.next, struct mem_fragment, list);
5730 +
5731 + while (&frag->list != &mem_list) {
5732 + /* Must combine consecutive fragemenst with
5733 + same root_pfn such that they are power of 4 */
5734 + if (frag->refs != 0) {
5735 + frag = list_entry(frag->list.next,
5736 + struct mem_fragment, list);
5737 + continue; /* Not this window */
5738 + }
5739 + len = frag->len;
5740 + numfrags = 0;
5741 + nxtfrag = list_entry(frag->list.next,
5742 + struct mem_fragment, list);
5743 + while (true) {
5744 + if (&nxtfrag->list == &mem_list) {
5745 + numfrags = 0;
5746 + break; /* End of list */
5747 + }
5748 + if (nxtfrag->refs) {
5749 + numfrags = 0;
5750 + break; /* In use still */
5751 + }
5752 + if (nxtfrag->root_pfn != frag->root_pfn) {
5753 + numfrags = 0;
5754 + break; /* Crosses root fragment boundary */
5755 + }
5756 + len += nxtfrag->len;
5757 + numfrags++;
5758 + if (is_power_of_4(len)) {
5759 + /* These fragments can be combined */
5760 + break;
5761 + }
5762 + nxtfrag = list_entry(nxtfrag->list.next,
5763 + struct mem_fragment, list);
5764 + }
5765 + if (numfrags == 0) {
5766 + frag = list_entry(frag->list.next,
5767 + struct mem_fragment, list);
5768 + continue; /* try the next window */
5769 + }
5770 + for (i = 0; i < numfrags; i++) {
5771 + struct mem_fragment *todel =
5772 + list_entry(nxtfrag->list.prev,
5773 + struct mem_fragment, list);
5774 + nxtfrag->len += todel->len;
5775 + nxtfrag->pfn_len += todel->pfn_len;
5776 + list_del(&todel->list);
5777 + }
5778 + /* Re evaluate the list, things may merge now */
5779 + frag = list_entry(mem_list.next, struct mem_fragment, list);
5780 + }
5781 +}
5782 +
5783 +/* Hook from arch/powerpc/mm/mem.c */
5784 +int usdpaa_test_fault(unsigned long pfn, u64 *phys_addr, u64 *size)
5785 +{
5786 + struct mem_fragment *frag;
5787 + int idx = -1;
5788 + if ((pfn < pfn_start) || (pfn >= (pfn_start + pfn_size)))
5789 + return -1;
5790 + /* It's in-range, we need to find the fragment */
5791 + spin_lock(&mem_lock);
5792 + list_for_each_entry(frag, &mem_list, list) {
5793 + if ((pfn >= frag->pfn_base) && (pfn < (frag->pfn_base +
5794 + frag->pfn_len))) {
5795 + *phys_addr = frag->base;
5796 + *size = frag->len;
5797 + idx = current_tlb++;
5798 + if (current_tlb >= (first_tlb + num_tlb))
5799 + current_tlb = first_tlb;
5800 + break;
5801 + }
5802 + }
5803 + spin_unlock(&mem_lock);
5804 + return idx;
5805 +}
5806 +
5807 +static int usdpaa_open(struct inode *inode, struct file *filp)
5808 +{
5809 + const struct alloc_backend *backend = &alloc_backends[0];
5810 + struct ctx *ctx = kmalloc(sizeof(struct ctx), GFP_KERNEL);
5811 + if (!ctx)
5812 + return -ENOMEM;
5813 + filp->private_data = ctx;
5814 +
5815 + while (backend->id_type != usdpaa_id_max) {
5816 + INIT_LIST_HEAD(&ctx->resources[backend->id_type]);
5817 + backend++;
5818 + }
5819 +
5820 + INIT_LIST_HEAD(&ctx->maps);
5821 + INIT_LIST_HEAD(&ctx->portals);
5822 + spin_lock_init(&ctx->lock);
5823 +
5824 + //filp->f_mapping->backing_dev_info = &directly_mappable_cdev_bdi;
5825 +
5826 + return 0;
5827 +}
5828 +
5829 +#define DQRR_MAXFILL 15
5830 +
5831 +/* Reset a QMan portal to its default state */
5832 +static int init_qm_portal(struct qm_portal_config *config,
5833 + struct qm_portal *portal)
5834 +{
5835 + const struct qm_dqrr_entry *dqrr = NULL;
5836 + int i;
5837 +
5838 + portal->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE];
5839 + portal->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI];
5840 +
5841 + /* Make sure interrupts are inhibited */
5842 + qm_out(IIR, 1);
5843 +
5844 + /* Initialize the DQRR. This will stop any dequeue
5845 + commands that are in progress */
5846 + if (qm_dqrr_init(portal, config, qm_dqrr_dpush, qm_dqrr_pvb,
5847 + qm_dqrr_cdc, DQRR_MAXFILL)) {
5848 + pr_err("qm_dqrr_init() failed when trying to"
5849 + " recover portal, portal will be leaked\n");
5850 + return 1;
5851 + }
5852 +
5853 + /* Discard any entries on the DQRR */
5854 + /* If we consume the ring twice something is wrong */
5855 + for (i = 0; i < DQRR_MAXFILL * 2; i++) {
5856 + qm_dqrr_pvb_update(portal);
5857 + dqrr = qm_dqrr_current(portal);
5858 + if (!dqrr)
5859 + break;
5860 + qm_dqrr_cdc_consume_1ptr(portal, dqrr, 0);
5861 + qm_dqrr_pvb_update(portal);
5862 + qm_dqrr_next(portal);
5863 + }
5864 + /* Initialize the EQCR */
5865 + if (qm_eqcr_init(portal, qm_eqcr_pvb,
5866 + qm_eqcr_get_ci_stashing(portal), 1)) {
5867 + pr_err("Qman EQCR initialisation failed\n");
5868 + return 1;
5869 + }
5870 + /* initialize the MR */
5871 + if (qm_mr_init(portal, qm_mr_pvb, qm_mr_cci)) {
5872 + pr_err("Qman MR initialisation failed\n");
5873 + return 1;
5874 + }
5875 + qm_mr_pvb_update(portal);
5876 + while (qm_mr_current(portal)) {
5877 + qm_mr_next(portal);
5878 + qm_mr_cci_consume_to_current(portal);
5879 + qm_mr_pvb_update(portal);
5880 + }
5881 +
5882 + if (qm_mc_init(portal)) {
5883 + pr_err("Qman MC initialisation failed\n");
5884 + return 1;
5885 + }
5886 + return 0;
5887 +}
5888 +
5889 +static int init_bm_portal(struct bm_portal_config *config,
5890 + struct bm_portal *portal)
5891 +{
5892 + portal->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE];
5893 + portal->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI];
5894 +
5895 + if (bm_rcr_init(portal, bm_rcr_pvb, bm_rcr_cce)) {
5896 + pr_err("Bman RCR initialisation failed\n");
5897 + return 1;
5898 + }
5899 + if (bm_mc_init(portal)) {
5900 + pr_err("Bman MC initialisation failed\n");
5901 + return 1;
5902 + }
5903 + return 0;
5904 +}
5905 +
5906 +/* Function that will scan all FQ's in the system. For each FQ that is not
5907 + OOS it will call the check_channel helper to determine if the FQ should
5908 + be torn down. If the check_channel helper returns true the FQ will be
5909 + transitioned to the OOS state */
5910 +static int qm_check_and_destroy_fqs(struct qm_portal *portal, void *ctx,
5911 + bool (*check_channel)(void*, u32))
5912 +{
5913 + u32 fq_id = 0;
5914 + while (1) {
5915 + struct qm_mc_command *mcc;
5916 + struct qm_mc_result *mcr;
5917 + u8 state;
5918 + u32 channel;
5919 +
5920 + /* Determine the channel for the FQID */
5921 + mcc = qm_mc_start(portal);
5922 + mcc->queryfq.fqid = fq_id;
5923 + qm_mc_commit(portal, QM_MCC_VERB_QUERYFQ);
5924 + while (!(mcr = qm_mc_result(portal)))
5925 + cpu_relax();
5926 + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK)
5927 + == QM_MCR_VERB_QUERYFQ);
5928 + if (mcr->result != QM_MCR_RESULT_OK)
5929 + break; /* End of valid FQIDs */
5930 +
5931 + channel = mcr->queryfq.fqd.dest.channel;
5932 + /* Determine the state of the FQID */
5933 + mcc = qm_mc_start(portal);
5934 + mcc->queryfq_np.fqid = fq_id;
5935 + qm_mc_commit(portal, QM_MCC_VERB_QUERYFQ_NP);
5936 + while (!(mcr = qm_mc_result(portal)))
5937 + cpu_relax();
5938 + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK)
5939 + == QM_MCR_VERB_QUERYFQ_NP);
5940 + state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
5941 + if (state == QM_MCR_NP_STATE_OOS)
5942 + /* Already OOS, no need to do anymore checks */
5943 + goto next;
5944 +
5945 + if (check_channel(ctx, channel))
5946 + qm_shutdown_fq(&portal, 1, fq_id);
5947 + next:
5948 + ++fq_id;
5949 + }
5950 + return 0;
5951 +}
5952 +
5953 +static bool check_channel_device(void *_ctx, u32 channel)
5954 +{
5955 + struct ctx *ctx = _ctx;
5956 + struct portal_mapping *portal, *tmpportal;
5957 + struct active_resource *res;
5958 +
5959 + /* See if the FQ is destined for one of the portals we're cleaning up */
5960 + list_for_each_entry_safe(portal, tmpportal, &ctx->portals, list) {
5961 + if (portal->user.type == usdpaa_portal_qman) {
5962 + if (portal->qportal->public_cfg.channel == channel) {
5963 + /* This FQs destination is a portal
5964 + we're cleaning, send a retire */
5965 + return true;
5966 + }
5967 + }
5968 + }
5969 +
5970 + /* Check the pool channels that will be released as well */
5971 + list_for_each_entry(res, &ctx->resources[usdpaa_id_qpool], list) {
5972 + if ((res->id >= channel) &&
5973 + ((res->id + res->num - 1) <= channel))
5974 + return true;
5975 + }
5976 + return false;
5977 +}
5978 +
5979 +static bool check_portal_channel(void *ctx, u32 channel)
5980 +{
5981 + u32 portal_channel = *(u32 *)ctx;
5982 + if (portal_channel == channel) {
5983 + /* This FQs destination is a portal
5984 + we're cleaning, send a retire */
5985 + return true;
5986 + }
5987 + return false;
5988 +}
5989 +
5990 +
5991 +
5992 +
5993 +static int usdpaa_release(struct inode *inode, struct file *filp)
5994 +{
5995 + struct ctx *ctx = filp->private_data;
5996 + struct mem_mapping *map, *tmpmap;
5997 + struct portal_mapping *portal, *tmpportal;
5998 + const struct alloc_backend *backend = &alloc_backends[0];
5999 + struct active_resource *res;
6000 + struct qm_portal *qm_cleanup_portal = NULL;
6001 + struct bm_portal *bm_cleanup_portal = NULL;
6002 + struct qm_portal_config *qm_alloced_portal = NULL;
6003 + struct bm_portal_config *bm_alloced_portal = NULL;
6004 +
6005 + struct qm_portal *portal_array[qman_portal_max];
6006 + int portal_count = 0;
6007 +
6008 + /* Ensure the release operation cannot be migrated to another
6009 + CPU as CPU specific variables may be needed during cleanup */
6010 +#ifdef CONFIG_PREEMPT_RT_FULL
6011 + migrate_disable();
6012 +#endif
6013 + /* The following logic is used to recover resources that were not
6014 + correctly released by the process that is closing the FD.
6015 + Step 1: syncronize the HW with the qm_portal/bm_portal structures
6016 + in the kernel
6017 + */
6018 +
6019 + list_for_each_entry_safe(portal, tmpportal, &ctx->portals, list) {
6020 + /* Try to recover any portals that weren't shut down */
6021 + if (portal->user.type == usdpaa_portal_qman) {
6022 + portal_array[portal_count] = &portal->qman_portal_low;
6023 + ++portal_count;
6024 + init_qm_portal(portal->qportal,
6025 + &portal->qman_portal_low);
6026 + if (!qm_cleanup_portal) {
6027 + qm_cleanup_portal = &portal->qman_portal_low;
6028 + } else {
6029 + /* Clean FQs on the dedicated channel */
6030 + u32 chan = portal->qportal->public_cfg.channel;
6031 + qm_check_and_destroy_fqs(
6032 + &portal->qman_portal_low, &chan,
6033 + check_portal_channel);
6034 + }
6035 + } else {
6036 + /* BMAN */
6037 + init_bm_portal(portal->bportal,
6038 + &portal->bman_portal_low);
6039 + if (!bm_cleanup_portal)
6040 + bm_cleanup_portal = &portal->bman_portal_low;
6041 + }
6042 + }
6043 + /* If no portal was found, allocate one for cleanup */
6044 + if (!qm_cleanup_portal) {
6045 + qm_alloced_portal = qm_get_unused_portal();
6046 + if (!qm_alloced_portal) {
6047 + pr_crit("No QMan portal avalaible for cleanup\n");
6048 +#ifdef CONFIG_PREEMPT_RT_FULL
6049 + migrate_enable();
6050 +#endif
6051 + return -1;
6052 + }
6053 + qm_cleanup_portal = kmalloc(sizeof(struct qm_portal),
6054 + GFP_KERNEL);
6055 + if (!qm_cleanup_portal) {
6056 +#ifdef CONFIG_PREEMPT_RT_FULL
6057 + migrate_enable();
6058 +#endif
6059 + return -ENOMEM;
6060 + }
6061 + init_qm_portal(qm_alloced_portal, qm_cleanup_portal);
6062 + portal_array[portal_count] = qm_cleanup_portal;
6063 + ++portal_count;
6064 + }
6065 + if (!bm_cleanup_portal) {
6066 + bm_alloced_portal = bm_get_unused_portal();
6067 + if (!bm_alloced_portal) {
6068 + pr_crit("No BMan portal avalaible for cleanup\n");
6069 +#ifdef CONFIG_PREEMPT_RT_FULL
6070 + migrate_enable();
6071 +#endif
6072 + return -1;
6073 + }
6074 + bm_cleanup_portal = kmalloc(sizeof(struct bm_portal),
6075 + GFP_KERNEL);
6076 + if (!bm_cleanup_portal) {
6077 +#ifdef CONFIG_PREEMPT_RT_FULL
6078 + migrate_enable();
6079 +#endif
6080 + return -ENOMEM;
6081 + }
6082 + init_bm_portal(bm_alloced_portal, bm_cleanup_portal);
6083 + }
6084 +
6085 + /* OOS the FQs associated with this process */
6086 + qm_check_and_destroy_fqs(qm_cleanup_portal, ctx, check_channel_device);
6087 +
6088 + while (backend->id_type != usdpaa_id_max) {
6089 + int leaks = 0;
6090 + list_for_each_entry(res, &ctx->resources[backend->id_type],
6091 + list) {
6092 + if (backend->id_type == usdpaa_id_fqid) {
6093 + int i = 0;
6094 + for (; i < res->num; i++) {
6095 + /* Clean FQs with the cleanup portal */
6096 + qm_shutdown_fq(portal_array,
6097 + portal_count,
6098 + res->id + i);
6099 + }
6100 + }
6101 + leaks += res->num;
6102 + backend->release(res->id, res->num);
6103 + }
6104 + if (leaks)
6105 + pr_crit("USDPAA process leaking %d %s%s\n", leaks,
6106 + backend->acronym, (leaks > 1) ? "s" : "");
6107 + backend++;
6108 + }
6109 + /* Release any DMA regions */
6110 + spin_lock(&mem_lock);
6111 + list_for_each_entry_safe(map, tmpmap, &ctx->maps, list) {
6112 + struct mem_fragment *current_frag = map->root_frag;
6113 + int i;
6114 + if (map->root_frag->has_locking &&
6115 + (map->root_frag->owner == map)) {
6116 + map->root_frag->owner = NULL;
6117 + wake_up(&map->root_frag->wq);
6118 + }
6119 + /* Check each fragment and merge if the ref count is 0 */
6120 + for (i = 0; i < map->frag_count; i++) {
6121 + --current_frag->refs;
6122 + current_frag = list_entry(current_frag->list.prev,
6123 + struct mem_fragment, list);
6124 + }
6125 +
6126 + compress_frags();
6127 + list_del(&map->list);
6128 + kfree(map);
6129 + }
6130 + spin_unlock(&mem_lock);
6131 +
6132 + /* Return portals */
6133 + list_for_each_entry_safe(portal, tmpportal, &ctx->portals, list) {
6134 + if (portal->user.type == usdpaa_portal_qman) {
6135 + /* Give the portal back to the allocator */
6136 + init_qm_portal(portal->qportal,
6137 + &portal->qman_portal_low);
6138 + qm_put_unused_portal(portal->qportal);
6139 + } else {
6140 + init_bm_portal(portal->bportal,
6141 + &portal->bman_portal_low);
6142 + bm_put_unused_portal(portal->bportal);
6143 + }
6144 + list_del(&portal->list);
6145 + kfree(portal);
6146 + }
6147 + if (qm_alloced_portal) {
6148 + qm_put_unused_portal(qm_alloced_portal);
6149 + kfree(qm_cleanup_portal);
6150 + }
6151 + if (bm_alloced_portal) {
6152 + bm_put_unused_portal(bm_alloced_portal);
6153 + kfree(bm_cleanup_portal);
6154 + }
6155 +
6156 + kfree(ctx);
6157 +#ifdef CONFIG_PREEMPT_RT_FULL
6158 + migrate_enable();
6159 +#endif
6160 + return 0;
6161 +}
6162 +
6163 +static int check_mmap_dma(struct ctx *ctx, struct vm_area_struct *vma,
6164 + int *match, unsigned long *pfn)
6165 +{
6166 + struct mem_mapping *map;
6167 +
6168 + list_for_each_entry(map, &ctx->maps, list) {
6169 + int i;
6170 + struct mem_fragment *frag = map->root_frag;
6171 +
6172 + for (i = 0; i < map->frag_count; i++) {
6173 + if (frag->pfn_base == vma->vm_pgoff) {
6174 + *match = 1;
6175 + *pfn = frag->pfn_base;
6176 + return 0;
6177 + }
6178 + frag = list_entry(frag->list.next, struct mem_fragment,
6179 + list);
6180 + }
6181 + }
6182 + *match = 0;
6183 + return 0;
6184 +}
6185 +
6186 +static int check_mmap_resource(struct resource *res, struct vm_area_struct *vma,
6187 + int *match, unsigned long *pfn)
6188 +{
6189 + *pfn = res->start >> PAGE_SHIFT;
6190 + if (*pfn == vma->vm_pgoff) {
6191 + *match = 1;
6192 + if ((vma->vm_end - vma->vm_start) != resource_size(res))
6193 + return -EINVAL;
6194 + } else
6195 + *match = 0;
6196 + return 0;
6197 +}
6198 +
6199 +static int check_mmap_portal(struct ctx *ctx, struct vm_area_struct *vma,
6200 + int *match, unsigned long *pfn)
6201 +{
6202 + struct portal_mapping *portal;
6203 + int ret;
6204 +
6205 + list_for_each_entry(portal, &ctx->portals, list) {
6206 + ret = check_mmap_resource(&portal->phys[DPA_PORTAL_CE], vma,
6207 + match, pfn);
6208 + if (*match) {
6209 + vma->vm_page_prot =
6210 +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
6211 + pgprot_cached_ns(vma->vm_page_prot);
6212 +#else
6213 + pgprot_cached_noncoherent(vma->vm_page_prot);
6214 +#endif
6215 + return ret;
6216 + }
6217 + ret = check_mmap_resource(&portal->phys[DPA_PORTAL_CI], vma,
6218 + match, pfn);
6219 + if (*match) {
6220 + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
6221 + return ret;
6222 + }
6223 + }
6224 + *match = 0;
6225 + return 0;
6226 +}
6227 +
6228 +static int usdpaa_mmap(struct file *filp, struct vm_area_struct *vma)
6229 +{
6230 + struct ctx *ctx = filp->private_data;
6231 + unsigned long pfn = 0;
6232 + int match, ret;
6233 +
6234 + spin_lock(&mem_lock);
6235 + ret = check_mmap_dma(ctx, vma, &match, &pfn);
6236 + if (!match)
6237 + ret = check_mmap_portal(ctx, vma, &match, &pfn);
6238 + spin_unlock(&mem_lock);
6239 + if (!match)
6240 + return -EINVAL;
6241 + if (!ret)
6242 + ret = remap_pfn_range(vma, vma->vm_start, pfn,
6243 + vma->vm_end - vma->vm_start,
6244 + vma->vm_page_prot);
6245 + return ret;
6246 +}
6247 +
6248 +/* Return the nearest rounded-up address >= 'addr' that is 'sz'-aligned. 'sz'
6249 + * must be a power of 2, but both 'addr' and 'sz' can be expressions. */
6250 +#define USDPAA_MEM_ROUNDUP(addr, sz) \
6251 + ({ \
6252 + unsigned long foo_align = (sz) - 1; \
6253 + ((addr) + foo_align) & ~foo_align; \
6254 + })
6255 +/* Searching for a size-aligned virtual address range starting from 'addr' */
6256 +static unsigned long usdpaa_get_unmapped_area(struct file *file,
6257 + unsigned long addr,
6258 + unsigned long len,
6259 + unsigned long pgoff,
6260 + unsigned long flags)
6261 +{
6262 + struct vm_area_struct *vma;
6263 +
6264 + if (len % PAGE_SIZE)
6265 + return -EINVAL;
6266 + if (!len)
6267 + return -EINVAL;
6268 +
6269 + /* Need to align the address to the largest pagesize of the mapping
6270 + * because the MMU requires the virtual address to have the same
6271 + * alignment as the physical address */
6272 + addr = USDPAA_MEM_ROUNDUP(addr, largest_page_size(len));
6273 + vma = find_vma(current->mm, addr);
6274 + /* Keep searching until we reach the end of currently-used virtual
6275 + * address-space or we find a big enough gap. */
6276 + while (vma) {
6277 + if ((addr + len) < vma->vm_start)
6278 + return addr;
6279 +
6280 + addr = USDPAA_MEM_ROUNDUP(vma->vm_end, largest_page_size(len));
6281 + vma = vma->vm_next;
6282 + }
6283 + if ((TASK_SIZE - len) < addr)
6284 + return -ENOMEM;
6285 + return addr;
6286 +}
6287 +
6288 +static long ioctl_id_alloc(struct ctx *ctx, void __user *arg)
6289 +{
6290 + struct usdpaa_ioctl_id_alloc i;
6291 + const struct alloc_backend *backend;
6292 + struct active_resource *res;
6293 + int ret = copy_from_user(&i, arg, sizeof(i));
6294 + if (ret)
6295 + return ret;
6296 + if ((i.id_type >= usdpaa_id_max) || !i.num)
6297 + return -EINVAL;
6298 + backend = &alloc_backends[i.id_type];
6299 + /* Allocate the required resource type */
6300 + ret = backend->alloc(&i.base, i.num, i.align, i.partial);
6301 + if (ret < 0)
6302 + return ret;
6303 + i.num = ret;
6304 + /* Copy the result to user-space */
6305 + ret = copy_to_user(arg, &i, sizeof(i));
6306 + if (ret) {
6307 + backend->release(i.base, i.num);
6308 + return ret;
6309 + }
6310 + /* Assign the allocated range to the FD accounting */
6311 + res = kmalloc(sizeof(*res), GFP_KERNEL);
6312 + if (!res) {
6313 + backend->release(i.base, i.num);
6314 + return -ENOMEM;
6315 + }
6316 + spin_lock(&ctx->lock);
6317 + res->id = i.base;
6318 + res->num = i.num;
6319 + res->refcount = 1;
6320 + list_add(&res->list, &ctx->resources[i.id_type]);
6321 + spin_unlock(&ctx->lock);
6322 + return 0;
6323 +}
6324 +
6325 +static long ioctl_id_release(struct ctx *ctx, void __user *arg)
6326 +{
6327 + struct usdpaa_ioctl_id_release i;
6328 + const struct alloc_backend *backend;
6329 + struct active_resource *tmp, *pos;
6330 +
6331 + int ret = copy_from_user(&i, arg, sizeof(i));
6332 + if (ret)
6333 + return ret;
6334 + if ((i.id_type >= usdpaa_id_max) || !i.num)
6335 + return -EINVAL;
6336 + backend = &alloc_backends[i.id_type];
6337 + /* Pull the range out of the FD accounting - the range is valid iff this
6338 + * succeeds. */
6339 + spin_lock(&ctx->lock);
6340 + list_for_each_entry_safe(pos, tmp, &ctx->resources[i.id_type], list) {
6341 + if (pos->id == i.base && pos->num == i.num) {
6342 + pos->refcount--;
6343 + if (pos->refcount) {
6344 + spin_unlock(&ctx->lock);
6345 + return 0; /* Still being used */
6346 + }
6347 + list_del(&pos->list);
6348 + kfree(pos);
6349 + spin_unlock(&ctx->lock);
6350 + goto found;
6351 + }
6352 + }
6353 + /* Failed to find the resource */
6354 + spin_unlock(&ctx->lock);
6355 + pr_err("Couldn't find resource type %d base 0x%x num %d\n",
6356 + i.id_type, i.base, i.num);
6357 + return -EINVAL;
6358 +found:
6359 + /* Release the resource to the backend */
6360 + backend->release(i.base, i.num);
6361 + return 0;
6362 +}
6363 +
6364 +static long ioctl_id_reserve(struct ctx *ctx, void __user *arg)
6365 +{
6366 + struct usdpaa_ioctl_id_reserve i;
6367 + const struct alloc_backend *backend;
6368 + struct active_resource *tmp, *pos;
6369 +
6370 + int ret = copy_from_user(&i, arg, sizeof(i));
6371 + if (ret)
6372 + return ret;
6373 + if ((i.id_type >= usdpaa_id_max) || !i.num)
6374 + return -EINVAL;
6375 + backend = &alloc_backends[i.id_type];
6376 + if (!backend->reserve)
6377 + return -EINVAL;
6378 + /* Pull the range out of the FD accounting - the range is valid iff this
6379 + * succeeds. */
6380 + spin_lock(&ctx->lock);
6381 + list_for_each_entry_safe(pos, tmp, &ctx->resources[i.id_type], list) {
6382 + if (pos->id == i.base && pos->num == i.num) {
6383 + pos->refcount++;
6384 + spin_unlock(&ctx->lock);
6385 + return 0;
6386 + }
6387 + }
6388 +
6389 + /* Failed to find the resource */
6390 + spin_unlock(&ctx->lock);
6391 +
6392 + /* Reserve the resource in the backend */
6393 + ret = backend->reserve(i.base, i.num);
6394 + if (ret)
6395 + return ret;
6396 + /* Assign the reserved range to the FD accounting */
6397 + pos = kmalloc(sizeof(*pos), GFP_KERNEL);
6398 + if (!pos) {
6399 + backend->release(i.base, i.num);
6400 + return -ENOMEM;
6401 + }
6402 + spin_lock(&ctx->lock);
6403 + pos->id = i.base;
6404 + pos->num = i.num;
6405 + pos->refcount = 1;
6406 + list_add(&pos->list, &ctx->resources[i.id_type]);
6407 + spin_unlock(&ctx->lock);
6408 + return 0;
6409 +}
6410 +
6411 +static long ioctl_dma_map(struct file *fp, struct ctx *ctx,
6412 + struct usdpaa_ioctl_dma_map *i)
6413 +{
6414 + struct mem_fragment *frag, *start_frag, *next_frag;
6415 + struct mem_mapping *map, *tmp;
6416 + int ret = 0;
6417 + u32 largest_page, so_far = 0;
6418 + int frag_count = 0;
6419 + unsigned long next_addr = PAGE_SIZE, populate;
6420 +
6421 + /* error checking to ensure values copied from user space are valid */
6422 + if (i->len % PAGE_SIZE)
6423 + return -EINVAL;
6424 +
6425 + map = kmalloc(sizeof(*map), GFP_KERNEL);
6426 + if (!map)
6427 + return -ENOMEM;
6428 +
6429 + spin_lock(&mem_lock);
6430 + if (i->flags & USDPAA_DMA_FLAG_SHARE) {
6431 + list_for_each_entry(frag, &mem_list, list) {
6432 + if (frag->refs && (frag->flags &
6433 + USDPAA_DMA_FLAG_SHARE) &&
6434 + !strncmp(i->name, frag->name,
6435 + USDPAA_DMA_NAME_MAX)) {
6436 + /* Matching entry */
6437 + if ((i->flags & USDPAA_DMA_FLAG_CREATE) &&
6438 + !(i->flags & USDPAA_DMA_FLAG_LAZY)) {
6439 + ret = -EBUSY;
6440 + goto out;
6441 + }
6442 +
6443 + /* Check to ensure size matches record */
6444 + if (i->len != frag->map_len && i->len) {
6445 + pr_err("ioctl_dma_map() Size requested does not match %s and is none zero\n",
6446 + frag->name);
6447 + return -EINVAL;
6448 + }
6449 +
6450 + /* Check if this has already been mapped
6451 + to this process */
6452 + list_for_each_entry(tmp, &ctx->maps, list)
6453 + if (tmp->root_frag == frag) {
6454 + /* Already mapped, just need to
6455 + inc ref count */
6456 + tmp->refs++;
6457 + kfree(map);
6458 + i->did_create = 0;
6459 + i->len = tmp->total_size;
6460 + i->phys_addr = frag->base;
6461 + i->ptr = tmp->virt_addr;
6462 + spin_unlock(&mem_lock);
6463 + return 0;
6464 + }
6465 + /* Matching entry - just need to map */
6466 + i->has_locking = frag->has_locking;
6467 + i->did_create = 0;
6468 + i->len = frag->map_len;
6469 + start_frag = frag;
6470 + goto do_map;
6471 + }
6472 + }
6473 + /* No matching entry */
6474 + if (!(i->flags & USDPAA_DMA_FLAG_CREATE)) {
6475 + pr_err("ioctl_dma_map() No matching entry\n");
6476 + ret = -ENOMEM;
6477 + goto out;
6478 + }
6479 + }
6480 + /* New fragment required, size must be provided. */
6481 + if (!i->len) {
6482 + ret = -EINVAL;
6483 + goto out;
6484 + }
6485 +
6486 + /* Find one of more contiguous fragments that satisfy the total length
6487 + trying to minimize the number of fragments
6488 + compute the largest page size that the allocation could use */
6489 + largest_page = largest_page_size(i->len);
6490 + start_frag = NULL;
6491 + while (largest_page &&
6492 + largest_page <= largest_page_size(phys_size) &&
6493 + start_frag == NULL) {
6494 + /* Search the list for a frag of that size */
6495 + list_for_each_entry(frag, &mem_list, list) {
6496 + if (!frag->refs && (frag->len == largest_page)) {
6497 + /* See if the next x fragments are free
6498 + and can accomidate the size */
6499 + u32 found_size = largest_page;
6500 + next_frag = list_entry(frag->list.prev,
6501 + struct mem_fragment,
6502 + list);
6503 + /* If the fragement is too small check
6504 + if the neighbours cab support it */
6505 + while (found_size < i->len) {
6506 + if (&mem_list == &next_frag->list)
6507 + break; /* End of list */
6508 + if (next_frag->refs != 0 ||
6509 + next_frag->len == 0)
6510 + break; /* not enough space */
6511 + found_size += next_frag->len;
6512 + next_frag = list_entry(
6513 + next_frag->list.prev,
6514 + struct mem_fragment,
6515 + list);
6516 + }
6517 + if (found_size >= i->len) {
6518 + /* Success! there is enough contigous
6519 + free space */
6520 + start_frag = frag;
6521 + break;
6522 + }
6523 + }
6524 + } /* next frag loop */
6525 + /* Couldn't statisfy the request with this
6526 + largest page size, try a smaller one */
6527 + largest_page <<= 2;
6528 + }
6529 + if (start_frag == NULL) {
6530 + /* Couldn't find proper amount of space */
6531 + ret = -ENOMEM;
6532 + goto out;
6533 + }
6534 + i->did_create = 1;
6535 +do_map:
6536 + /* Verify there is sufficient space to do the mapping */
6537 + down_write(&current->mm->mmap_sem);
6538 + next_addr = usdpaa_get_unmapped_area(fp, next_addr, i->len, 0, 0);
6539 + up_write(&current->mm->mmap_sem);
6540 +
6541 + if (next_addr & ~PAGE_MASK) {
6542 + ret = -ENOMEM;
6543 + goto out;
6544 + }
6545 +
6546 + /* We may need to divide the final fragment to accomidate the mapping */
6547 + next_frag = start_frag;
6548 + while (so_far != i->len) {
6549 + BUG_ON(next_frag->len == 0);
6550 + while ((next_frag->len + so_far) > i->len) {
6551 + /* Split frag until they match */
6552 + split_frag(next_frag);
6553 + }
6554 + so_far += next_frag->len;
6555 + next_frag->refs++;
6556 + ++frag_count;
6557 + next_frag = list_entry(next_frag->list.prev,
6558 + struct mem_fragment, list);
6559 + }
6560 + if (i->did_create) {
6561 + size_t name_len = 0;
6562 + start_frag->flags = i->flags;
6563 + strncpy(start_frag->name, i->name, USDPAA_DMA_NAME_MAX);
6564 + name_len = strnlen(start_frag->name, USDPAA_DMA_NAME_MAX);
6565 + if (name_len >= USDPAA_DMA_NAME_MAX) {
6566 + ret = -EFAULT;
6567 + goto out;
6568 + }
6569 + start_frag->map_len = i->len;
6570 + start_frag->has_locking = i->has_locking;
6571 + init_waitqueue_head(&start_frag->wq);
6572 + start_frag->owner = NULL;
6573 + }
6574 +
6575 + /* Setup the map entry */
6576 + map->root_frag = start_frag;
6577 + map->total_size = i->len;
6578 + map->frag_count = frag_count;
6579 + map->refs = 1;
6580 + list_add(&map->list, &ctx->maps);
6581 + i->phys_addr = start_frag->base;
6582 +out:
6583 + spin_unlock(&mem_lock);
6584 +
6585 + if (!ret) {
6586 + unsigned long longret;
6587 + down_write(&current->mm->mmap_sem);
6588 + longret = do_mmap_pgoff(fp, next_addr, map->total_size,
6589 + PROT_READ |
6590 + (i->flags &
6591 + USDPAA_DMA_FLAG_RDONLY ? 0
6592 + : PROT_WRITE),
6593 + MAP_SHARED,
6594 + start_frag->pfn_base,
6595 + &populate,
6596 + NULL);
6597 + up_write(&current->mm->mmap_sem);
6598 + if (longret & ~PAGE_MASK) {
6599 + ret = (int)longret;
6600 + } else {
6601 + i->ptr = (void *)longret;
6602 + map->virt_addr = i->ptr;
6603 + }
6604 + } else
6605 + kfree(map);
6606 + return ret;
6607 +}
6608 +
6609 +static long ioctl_dma_unmap(struct ctx *ctx, void __user *arg)
6610 +{
6611 + struct mem_mapping *map;
6612 + struct vm_area_struct *vma;
6613 + int ret, i;
6614 + struct mem_fragment *current_frag;
6615 + size_t sz;
6616 + unsigned long base;
6617 + unsigned long vaddr;
6618 +
6619 + down_write(&current->mm->mmap_sem);
6620 + vma = find_vma(current->mm, (unsigned long)arg);
6621 + if (!vma || (vma->vm_start > (unsigned long)arg)) {
6622 + up_write(&current->mm->mmap_sem);
6623 + return -EFAULT;
6624 + }
6625 + spin_lock(&mem_lock);
6626 + list_for_each_entry(map, &ctx->maps, list) {
6627 + if (map->root_frag->pfn_base == vma->vm_pgoff) {
6628 + /* Drop the map lock if we hold it */
6629 + if (map->root_frag->has_locking &&
6630 + (map->root_frag->owner == map)) {
6631 + map->root_frag->owner = NULL;
6632 + wake_up(&map->root_frag->wq);
6633 + }
6634 + goto map_match;
6635 + }
6636 + }
6637 + /* Failed to find a matching mapping for this process */
6638 + ret = -EFAULT;
6639 + spin_unlock(&mem_lock);
6640 + goto out;
6641 +map_match:
6642 + map->refs--;
6643 + if (map->refs != 0) {
6644 + /* Another call the dma_map is referencing this */
6645 + ret = 0;
6646 + spin_unlock(&mem_lock);
6647 + goto out;
6648 + }
6649 +
6650 + current_frag = map->root_frag;
6651 + vaddr = (unsigned long) map->virt_addr;
6652 + for (i = 0; i < map->frag_count; i++) {
6653 + DPA_ASSERT(current_frag->refs > 0);
6654 + --current_frag->refs;
6655 +#if !(defined(CONFIG_ARM) || defined(CONFIG_ARM64))
6656 + /*
6657 + * Make sure we invalidate the TLB entry for
6658 + * this fragment, otherwise a remap of a different
6659 + * page to this vaddr would give acces to an
6660 + * incorrect piece of memory
6661 + */
6662 + cleartlbcam(vaddr, mfspr(SPRN_PID));
6663 +#endif
6664 + vaddr += current_frag->len;
6665 + current_frag = list_entry(current_frag->list.prev,
6666 + struct mem_fragment, list);
6667 + }
6668 + map->root_frag->name[0] = 0;
6669 + list_del(&map->list);
6670 + compress_frags();
6671 + spin_unlock(&mem_lock);
6672 +
6673 + base = vma->vm_start;
6674 + sz = vma->vm_end - vma->vm_start;
6675 + do_munmap(current->mm, base, sz, NULL);
6676 + ret = 0;
6677 + out:
6678 + up_write(&current->mm->mmap_sem);
6679 + return ret;
6680 +}
6681 +
6682 +static long ioctl_dma_stats(struct ctx *ctx, void __user *arg)
6683 +{
6684 + struct mem_fragment *frag;
6685 + struct usdpaa_ioctl_dma_used result;
6686 +
6687 + result.free_bytes = 0;
6688 + result.total_bytes = phys_size;
6689 +
6690 + list_for_each_entry(frag, &mem_list, list) {
6691 + if (frag->refs == 0)
6692 + result.free_bytes += frag->len;
6693 + }
6694 +
6695 + return copy_to_user(arg, &result, sizeof(result)); }
6696 +
6697 +static int test_lock(struct mem_mapping *map)
6698 +{
6699 + int ret = 0;
6700 + spin_lock(&mem_lock);
6701 + if (!map->root_frag->owner) {
6702 + map->root_frag->owner = map;
6703 + ret = 1;
6704 + }
6705 + spin_unlock(&mem_lock);
6706 + return ret;
6707 +}
6708 +
6709 +static long ioctl_dma_lock(struct ctx *ctx, void __user *arg)
6710 +{
6711 + struct mem_mapping *map;
6712 + struct vm_area_struct *vma;
6713 +
6714 + down_read(&current->mm->mmap_sem);
6715 + vma = find_vma(current->mm, (unsigned long)arg);
6716 + if (!vma || (vma->vm_start > (unsigned long)arg)) {
6717 + up_read(&current->mm->mmap_sem);
6718 + return -EFAULT;
6719 + }
6720 + spin_lock(&mem_lock);
6721 + list_for_each_entry(map, &ctx->maps, list) {
6722 + if (map->root_frag->pfn_base == vma->vm_pgoff)
6723 + goto map_match;
6724 + }
6725 + map = NULL;
6726 +map_match:
6727 + spin_unlock(&mem_lock);
6728 + up_read(&current->mm->mmap_sem);
6729 +
6730 + if (!map)
6731 + return -EFAULT;
6732 + if (!map->root_frag->has_locking)
6733 + return -ENODEV;
6734 + return wait_event_interruptible(map->root_frag->wq, test_lock(map));
6735 +}
6736 +
6737 +static long ioctl_dma_unlock(struct ctx *ctx, void __user *arg)
6738 +{
6739 + struct mem_mapping *map;
6740 + struct vm_area_struct *vma;
6741 + int ret;
6742 +
6743 + down_read(&current->mm->mmap_sem);
6744 + vma = find_vma(current->mm, (unsigned long)arg);
6745 + if (!vma || (vma->vm_start > (unsigned long)arg))
6746 + ret = -EFAULT;
6747 + else {
6748 + spin_lock(&mem_lock);
6749 + list_for_each_entry(map, &ctx->maps, list) {
6750 + if (map->root_frag->pfn_base == vma->vm_pgoff) {
6751 + if (!map->root_frag->has_locking)
6752 + ret = -ENODEV;
6753 + else if (map->root_frag->owner == map) {
6754 + map->root_frag->owner = NULL;
6755 + wake_up(&map->root_frag->wq);
6756 + ret = 0;
6757 + } else
6758 + ret = -EBUSY;
6759 + goto map_match;
6760 + }
6761 + }
6762 + ret = -EINVAL;
6763 +map_match:
6764 + spin_unlock(&mem_lock);
6765 + }
6766 + up_read(&current->mm->mmap_sem);
6767 + return ret;
6768 +}
6769 +
6770 +static int portal_mmap(struct file *fp, struct resource *res, void **ptr)
6771 +{
6772 + unsigned long longret = 0, populate;
6773 + resource_size_t len;
6774 +
6775 + down_write(&current->mm->mmap_sem);
6776 + len = resource_size(res);
6777 + if (len != (unsigned long)len)
6778 + return -EINVAL;
6779 + longret = do_mmap_pgoff(fp, PAGE_SIZE, (unsigned long)len,
6780 + PROT_READ | PROT_WRITE, MAP_SHARED,
6781 + res->start >> PAGE_SHIFT, &populate, NULL);
6782 + up_write(&current->mm->mmap_sem);
6783 +
6784 + if (longret & ~PAGE_MASK)
6785 + return (int)longret;
6786 +
6787 + *ptr = (void *) longret;
6788 + return 0;
6789 +}
6790 +
6791 +static void portal_munmap(struct resource *res, void *ptr)
6792 +{
6793 + down_write(&current->mm->mmap_sem);
6794 + do_munmap(current->mm, (unsigned long)ptr, resource_size(res), NULL);
6795 + up_write(&current->mm->mmap_sem);
6796 +}
6797 +
6798 +static long ioctl_portal_map(struct file *fp, struct ctx *ctx,
6799 + struct usdpaa_ioctl_portal_map *arg)
6800 +{
6801 + struct portal_mapping *mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
6802 + int ret;
6803 +
6804 + if (!mapping)
6805 + return -ENOMEM;
6806 +
6807 + mapping->user = *arg;
6808 + mapping->iommu_domain = NULL;
6809 +
6810 + if (mapping->user.type == usdpaa_portal_qman) {
6811 + mapping->qportal =
6812 + qm_get_unused_portal_idx(mapping->user.index);
6813 + if (!mapping->qportal) {
6814 + ret = -ENODEV;
6815 + goto err_get_portal;
6816 + }
6817 + mapping->phys = &mapping->qportal->addr_phys[0];
6818 + mapping->user.channel = mapping->qportal->public_cfg.channel;
6819 + mapping->user.pools = mapping->qportal->public_cfg.pools;
6820 + mapping->user.index = mapping->qportal->public_cfg.index;
6821 + } else if (mapping->user.type == usdpaa_portal_bman) {
6822 + mapping->bportal =
6823 + bm_get_unused_portal_idx(mapping->user.index);
6824 + if (!mapping->bportal) {
6825 + ret = -ENODEV;
6826 + goto err_get_portal;
6827 + }
6828 + mapping->phys = &mapping->bportal->addr_phys[0];
6829 + mapping->user.index = mapping->bportal->public_cfg.index;
6830 + } else {
6831 + ret = -EINVAL;
6832 + goto err_copy_from_user;
6833 + }
6834 + /* Need to put pcfg in ctx's list before the mmaps because the mmap
6835 + * handlers look it up. */
6836 + spin_lock(&mem_lock);
6837 + list_add(&mapping->list, &ctx->portals);
6838 + spin_unlock(&mem_lock);
6839 + ret = portal_mmap(fp, &mapping->phys[DPA_PORTAL_CE],
6840 + &mapping->user.addr.cena);
6841 + if (ret)
6842 + goto err_mmap_cena;
6843 + ret = portal_mmap(fp, &mapping->phys[DPA_PORTAL_CI],
6844 + &mapping->user.addr.cinh);
6845 + if (ret)
6846 + goto err_mmap_cinh;
6847 + *arg = mapping->user;
6848 + return ret;
6849 +
6850 +err_mmap_cinh:
6851 + portal_munmap(&mapping->phys[DPA_PORTAL_CE], mapping->user.addr.cena);
6852 +err_mmap_cena:
6853 + if ((mapping->user.type == usdpaa_portal_qman) && mapping->qportal)
6854 + qm_put_unused_portal(mapping->qportal);
6855 + else if ((mapping->user.type == usdpaa_portal_bman) && mapping->bportal)
6856 + bm_put_unused_portal(mapping->bportal);
6857 + spin_lock(&mem_lock);
6858 + list_del(&mapping->list);
6859 + spin_unlock(&mem_lock);
6860 +err_get_portal:
6861 +err_copy_from_user:
6862 + kfree(mapping);
6863 + return ret;
6864 +}
6865 +
6866 +static long ioctl_portal_unmap(struct ctx *ctx, struct usdpaa_portal_map *i)
6867 +{
6868 + struct portal_mapping *mapping;
6869 + struct vm_area_struct *vma;
6870 + unsigned long pfn;
6871 + u32 channel;
6872 +
6873 + /* Get the PFN corresponding to one of the virt addresses */
6874 + down_read(&current->mm->mmap_sem);
6875 + vma = find_vma(current->mm, (unsigned long)i->cinh);
6876 + if (!vma || (vma->vm_start > (unsigned long)i->cinh)) {
6877 + up_read(&current->mm->mmap_sem);
6878 + return -EFAULT;
6879 + }
6880 + pfn = vma->vm_pgoff;
6881 + up_read(&current->mm->mmap_sem);
6882 +
6883 + /* Find the corresponding portal */
6884 + spin_lock(&mem_lock);
6885 + list_for_each_entry(mapping, &ctx->portals, list) {
6886 + if (pfn == (mapping->phys[DPA_PORTAL_CI].start >> PAGE_SHIFT))
6887 + goto found;
6888 + }
6889 + mapping = NULL;
6890 +found:
6891 + if (mapping)
6892 + list_del(&mapping->list);
6893 + spin_unlock(&mem_lock);
6894 + if (!mapping)
6895 + return -ENODEV;
6896 + portal_munmap(&mapping->phys[DPA_PORTAL_CI], mapping->user.addr.cinh);
6897 + portal_munmap(&mapping->phys[DPA_PORTAL_CE], mapping->user.addr.cena);
6898 + if (mapping->user.type == usdpaa_portal_qman) {
6899 + init_qm_portal(mapping->qportal,
6900 + &mapping->qman_portal_low);
6901 +
6902 + /* Tear down any FQs this portal is referencing */
6903 + channel = mapping->qportal->public_cfg.channel;
6904 + qm_check_and_destroy_fqs(&mapping->qman_portal_low,
6905 + &channel,
6906 + check_portal_channel);
6907 + qm_put_unused_portal(mapping->qportal);
6908 + } else if (mapping->user.type == usdpaa_portal_bman) {
6909 + init_bm_portal(mapping->bportal,
6910 + &mapping->bman_portal_low);
6911 + bm_put_unused_portal(mapping->bportal);
6912 + }
6913 + kfree(mapping);
6914 + return 0;
6915 +}
6916 +
6917 +static void portal_config_pamu(struct qm_portal_config *pcfg, uint8_t sdest,
6918 + uint32_t cpu, uint32_t cache, uint32_t window)
6919 +{
6920 +#ifdef CONFIG_FSL_PAMU
6921 + int ret;
6922 + int window_count = 1;
6923 + struct iommu_domain_geometry geom_attr;
6924 + struct pamu_stash_attribute stash_attr;
6925 +
6926 + pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type);
6927 + if (!pcfg->iommu_domain) {
6928 + pr_err(KBUILD_MODNAME ":%s(): iommu_domain_alloc() failed",
6929 + __func__);
6930 + goto _no_iommu;
6931 + }
6932 + geom_attr.aperture_start = 0;
6933 + geom_attr.aperture_end =
6934 + ((dma_addr_t)1 << min(8 * sizeof(dma_addr_t), (size_t)36)) - 1;
6935 + geom_attr.force_aperture = true;
6936 + ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_GEOMETRY,
6937 + &geom_attr);
6938 + if (ret < 0) {
6939 + pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
6940 + __func__, ret);
6941 + goto _iommu_domain_free;
6942 + }
6943 + ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS,
6944 + &window_count);
6945 + if (ret < 0) {
6946 + pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
6947 + __func__, ret);
6948 + goto _iommu_domain_free;
6949 + }
6950 + stash_attr.cpu = cpu;
6951 + stash_attr.cache = cache;
6952 + /* set stash information for the window */
6953 + stash_attr.window = 0;
6954 +
6955 + ret = iommu_domain_set_attr(pcfg->iommu_domain,
6956 + DOMAIN_ATTR_FSL_PAMU_STASH,
6957 + &stash_attr);
6958 + if (ret < 0) {
6959 + pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
6960 + __func__, ret);
6961 + goto _iommu_domain_free;
6962 + }
6963 + ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36,
6964 + IOMMU_READ | IOMMU_WRITE);
6965 + if (ret < 0) {
6966 + pr_err(KBUILD_MODNAME ":%s(): iommu_domain_window_enable() = %d",
6967 + __func__, ret);
6968 + goto _iommu_domain_free;
6969 + }
6970 + ret = iommu_attach_device(pcfg->iommu_domain, &pcfg->dev);
6971 + if (ret < 0) {
6972 + pr_err(KBUILD_MODNAME ":%s(): iommu_device_attach() = %d",
6973 + __func__, ret);
6974 + goto _iommu_domain_free;
6975 + }
6976 + ret = iommu_domain_set_attr(pcfg->iommu_domain,
6977 + DOMAIN_ATTR_FSL_PAMU_ENABLE,
6978 + &window_count);
6979 + if (ret < 0) {
6980 + pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
6981 + __func__, ret);
6982 + goto _iommu_detach_device;
6983 + }
6984 +_no_iommu:
6985 +#endif
6986 +
6987 +#ifdef CONFIG_FSL_QMAN_CONFIG
6988 + if (qman_set_sdest(pcfg->public_cfg.channel, sdest))
6989 +#endif
6990 + pr_warn("Failed to set QMan portal's stash request queue\n");
6991 +
6992 + return;
6993 +
6994 +#ifdef CONFIG_FSL_PAMU
6995 +_iommu_detach_device:
6996 + iommu_detach_device(pcfg->iommu_domain, NULL);
6997 +_iommu_domain_free:
6998 + iommu_domain_free(pcfg->iommu_domain);
6999 +#endif
7000 +}
7001 +
7002 +static long ioctl_allocate_raw_portal(struct file *fp, struct ctx *ctx,
7003 + struct usdpaa_ioctl_raw_portal *arg)
7004 +{
7005 + struct portal_mapping *mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
7006 + int ret;
7007 +
7008 + if (!mapping)
7009 + return -ENOMEM;
7010 +
7011 + mapping->user.type = arg->type;
7012 + mapping->iommu_domain = NULL;
7013 + if (arg->type == usdpaa_portal_qman) {
7014 + mapping->qportal = qm_get_unused_portal_idx(arg->index);
7015 + if (!mapping->qportal) {
7016 + ret = -ENODEV;
7017 + goto err;
7018 + }
7019 + mapping->phys = &mapping->qportal->addr_phys[0];
7020 + arg->index = mapping->qportal->public_cfg.index;
7021 + arg->cinh = mapping->qportal->addr_phys[DPA_PORTAL_CI].start;
7022 + arg->cena = mapping->qportal->addr_phys[DPA_PORTAL_CE].start;
7023 + if (arg->enable_stash) {
7024 + /* Setup the PAMU with the supplied parameters */
7025 + portal_config_pamu(mapping->qportal, arg->sdest,
7026 + arg->cpu, arg->cache, arg->window);
7027 + }
7028 + } else if (mapping->user.type == usdpaa_portal_bman) {
7029 + mapping->bportal =
7030 + bm_get_unused_portal_idx(arg->index);
7031 + if (!mapping->bportal) {
7032 + ret = -ENODEV;
7033 + goto err;
7034 + }
7035 + mapping->phys = &mapping->bportal->addr_phys[0];
7036 + arg->index = mapping->bportal->public_cfg.index;
7037 + arg->cinh = mapping->bportal->addr_phys[DPA_PORTAL_CI].start;
7038 + arg->cena = mapping->bportal->addr_phys[DPA_PORTAL_CE].start;
7039 + } else {
7040 + ret = -EINVAL;
7041 + goto err;
7042 + }
7043 + /* Need to put pcfg in ctx's list before the mmaps because the mmap
7044 + * handlers look it up. */
7045 + spin_lock(&mem_lock);
7046 + list_add(&mapping->list, &ctx->portals);
7047 + spin_unlock(&mem_lock);
7048 + return 0;
7049 +err:
7050 + kfree(mapping);
7051 + return ret;
7052 +}
7053 +
7054 +static long ioctl_free_raw_portal(struct file *fp, struct ctx *ctx,
7055 + struct usdpaa_ioctl_raw_portal *arg)
7056 +{
7057 + struct portal_mapping *mapping;
7058 + u32 channel;
7059 +
7060 + /* Find the corresponding portal */
7061 + spin_lock(&mem_lock);
7062 + list_for_each_entry(mapping, &ctx->portals, list) {
7063 + if (mapping->phys[DPA_PORTAL_CI].start == arg->cinh)
7064 + goto found;
7065 + }
7066 + mapping = NULL;
7067 +found:
7068 + if (mapping)
7069 + list_del(&mapping->list);
7070 + spin_unlock(&mem_lock);
7071 + if (!mapping)
7072 + return -ENODEV;
7073 + if (mapping->user.type == usdpaa_portal_qman) {
7074 + init_qm_portal(mapping->qportal,
7075 + &mapping->qman_portal_low);
7076 +
7077 + /* Tear down any FQs this portal is referencing */
7078 + channel = mapping->qportal->public_cfg.channel;
7079 + qm_check_and_destroy_fqs(&mapping->qman_portal_low,
7080 + &channel,
7081 + check_portal_channel);
7082 + qm_put_unused_portal(mapping->qportal);
7083 + } else if (mapping->user.type == usdpaa_portal_bman) {
7084 + init_bm_portal(mapping->bportal,
7085 + &mapping->bman_portal_low);
7086 + bm_put_unused_portal(mapping->bportal);
7087 + }
7088 + kfree(mapping);
7089 + return 0;
7090 +}
7091 +
7092 +static long usdpaa_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
7093 +{
7094 + struct ctx *ctx = fp->private_data;
7095 + void __user *a = (void __user *)arg;
7096 + switch (cmd) {
7097 + case USDPAA_IOCTL_ID_ALLOC:
7098 + return ioctl_id_alloc(ctx, a);
7099 + case USDPAA_IOCTL_ID_RELEASE:
7100 + return ioctl_id_release(ctx, a);
7101 + case USDPAA_IOCTL_ID_RESERVE:
7102 + return ioctl_id_reserve(ctx, a);
7103 + case USDPAA_IOCTL_DMA_MAP:
7104 + {
7105 + struct usdpaa_ioctl_dma_map input;
7106 + int ret;
7107 + if (copy_from_user(&input, a, sizeof(input)))
7108 + return -EFAULT;
7109 + ret = ioctl_dma_map(fp, ctx, &input);
7110 + if (copy_to_user(a, &input, sizeof(input)))
7111 + return -EFAULT;
7112 + return ret;
7113 + }
7114 + case USDPAA_IOCTL_DMA_UNMAP:
7115 + return ioctl_dma_unmap(ctx, a);
7116 + case USDPAA_IOCTL_DMA_LOCK:
7117 + return ioctl_dma_lock(ctx, a);
7118 + case USDPAA_IOCTL_DMA_UNLOCK:
7119 + return ioctl_dma_unlock(ctx, a);
7120 + case USDPAA_IOCTL_PORTAL_MAP:
7121 + {
7122 + struct usdpaa_ioctl_portal_map input;
7123 + int ret;
7124 + if (copy_from_user(&input, a, sizeof(input)))
7125 + return -EFAULT;
7126 + ret = ioctl_portal_map(fp, ctx, &input);
7127 + if (copy_to_user(a, &input, sizeof(input)))
7128 + return -EFAULT;
7129 + return ret;
7130 + }
7131 + case USDPAA_IOCTL_PORTAL_UNMAP:
7132 + {
7133 + struct usdpaa_portal_map input;
7134 + if (copy_from_user(&input, a, sizeof(input)))
7135 + return -EFAULT;
7136 + return ioctl_portal_unmap(ctx, &input);
7137 + }
7138 + case USDPAA_IOCTL_DMA_USED:
7139 + return ioctl_dma_stats(ctx, a);
7140 + case USDPAA_IOCTL_ALLOC_RAW_PORTAL:
7141 + {
7142 + struct usdpaa_ioctl_raw_portal input;
7143 + int ret;
7144 + if (copy_from_user(&input, a, sizeof(input)))
7145 + return -EFAULT;
7146 + ret = ioctl_allocate_raw_portal(fp, ctx, &input);
7147 + if (copy_to_user(a, &input, sizeof(input)))
7148 + return -EFAULT;
7149 + return ret;
7150 + }
7151 + case USDPAA_IOCTL_FREE_RAW_PORTAL:
7152 + {
7153 + struct usdpaa_ioctl_raw_portal input;
7154 + if (copy_from_user(&input, a, sizeof(input)))
7155 + return -EFAULT;
7156 + return ioctl_free_raw_portal(fp, ctx, &input);
7157 + }
7158 + }
7159 + return -EINVAL;
7160 +}
7161 +
7162 +static long usdpaa_ioctl_compat(struct file *fp, unsigned int cmd,
7163 + unsigned long arg)
7164 +{
7165 +#ifdef CONFIG_COMPAT
7166 + struct ctx *ctx = fp->private_data;
7167 + void __user *a = (void __user *)arg;
7168 +#endif
7169 + switch (cmd) {
7170 +#ifdef CONFIG_COMPAT
7171 + case USDPAA_IOCTL_DMA_MAP_COMPAT:
7172 + {
7173 + int ret;
7174 + struct usdpaa_ioctl_dma_map_compat input;
7175 + struct usdpaa_ioctl_dma_map converted;
7176 +
7177 + if (copy_from_user(&input, a, sizeof(input)))
7178 + return -EFAULT;
7179 +
7180 + converted.ptr = compat_ptr(input.ptr);
7181 + converted.phys_addr = input.phys_addr;
7182 + converted.len = input.len;
7183 + converted.flags = input.flags;
7184 + strncpy(converted.name, input.name, USDPAA_DMA_NAME_MAX);
7185 + converted.has_locking = input.has_locking;
7186 + converted.did_create = input.did_create;
7187 +
7188 + ret = ioctl_dma_map(fp, ctx, &converted);
7189 + input.ptr = ptr_to_compat(converted.ptr);
7190 + input.phys_addr = converted.phys_addr;
7191 + input.len = converted.len;
7192 + input.flags = converted.flags;
7193 + strncpy(input.name, converted.name, USDPAA_DMA_NAME_MAX);
7194 + input.has_locking = converted.has_locking;
7195 + input.did_create = converted.did_create;
7196 + if (copy_to_user(a, &input, sizeof(input)))
7197 + return -EFAULT;
7198 + return ret;
7199 + }
7200 + case USDPAA_IOCTL_PORTAL_MAP_COMPAT:
7201 + {
7202 + int ret;
7203 + struct compat_usdpaa_ioctl_portal_map input;
7204 + struct usdpaa_ioctl_portal_map converted;
7205 + if (copy_from_user(&input, a, sizeof(input)))
7206 + return -EFAULT;
7207 + converted.type = input.type;
7208 + converted.index = input.index;
7209 + ret = ioctl_portal_map(fp, ctx, &converted);
7210 + input.addr.cinh = ptr_to_compat(converted.addr.cinh);
7211 + input.addr.cena = ptr_to_compat(converted.addr.cena);
7212 + input.channel = converted.channel;
7213 + input.pools = converted.pools;
7214 + input.index = converted.index;
7215 + if (copy_to_user(a, &input, sizeof(input)))
7216 + return -EFAULT;
7217 + return ret;
7218 + }
7219 + case USDPAA_IOCTL_PORTAL_UNMAP_COMPAT:
7220 + {
7221 + struct usdpaa_portal_map_compat input;
7222 + struct usdpaa_portal_map converted;
7223 +
7224 + if (copy_from_user(&input, a, sizeof(input)))
7225 + return -EFAULT;
7226 + converted.cinh = compat_ptr(input.cinh);
7227 + converted.cena = compat_ptr(input.cena);
7228 + return ioctl_portal_unmap(ctx, &converted);
7229 + }
7230 + case USDPAA_IOCTL_ALLOC_RAW_PORTAL_COMPAT:
7231 + {
7232 + int ret;
7233 + struct usdpaa_ioctl_raw_portal converted;
7234 + struct compat_ioctl_raw_portal input;
7235 + if (copy_from_user(&input, a, sizeof(input)))
7236 + return -EFAULT;
7237 + converted.type = input.type;
7238 + converted.index = input.index;
7239 + converted.enable_stash = input.enable_stash;
7240 + converted.cpu = input.cpu;
7241 + converted.cache = input.cache;
7242 + converted.window = input.window;
7243 + converted.sdest = input.sdest;
7244 + ret = ioctl_allocate_raw_portal(fp, ctx, &converted);
7245 +
7246 + input.cinh = converted.cinh;
7247 + input.cena = converted.cena;
7248 + input.index = converted.index;
7249 +
7250 + if (copy_to_user(a, &input, sizeof(input)))
7251 + return -EFAULT;
7252 + return ret;
7253 + }
7254 + case USDPAA_IOCTL_FREE_RAW_PORTAL_COMPAT:
7255 + {
7256 + struct usdpaa_ioctl_raw_portal converted;
7257 + struct compat_ioctl_raw_portal input;
7258 + if (copy_from_user(&input, a, sizeof(input)))
7259 + return -EFAULT;
7260 + converted.type = input.type;
7261 + converted.index = input.index;
7262 + converted.cinh = input.cinh;
7263 + converted.cena = input.cena;
7264 + return ioctl_free_raw_portal(fp, ctx, &converted);
7265 + }
7266 +#endif
7267 + default:
7268 + return usdpaa_ioctl(fp, cmd, arg);
7269 + }
7270 + return -EINVAL;
7271 +}
7272 +
7273 +int usdpaa_get_portal_config(struct file *filp, void *cinh,
7274 + enum usdpaa_portal_type ptype, unsigned int *irq,
7275 + void **iir_reg)
7276 +{
7277 + /* Walk the list of portals for filp and return the config
7278 + for the portal that matches the hint */
7279 + struct ctx *context;
7280 + struct portal_mapping *portal;
7281 +
7282 + /* First sanitize the filp */
7283 + if (filp->f_op->open != usdpaa_open)
7284 + return -ENODEV;
7285 + context = filp->private_data;
7286 + spin_lock(&context->lock);
7287 + list_for_each_entry(portal, &context->portals, list) {
7288 + if (portal->user.type == ptype &&
7289 + portal->user.addr.cinh == cinh) {
7290 + if (ptype == usdpaa_portal_qman) {
7291 + *irq = portal->qportal->public_cfg.irq;
7292 + *iir_reg = portal->qportal->addr_virt[1] +
7293 + QM_REG_IIR;
7294 + } else {
7295 + *irq = portal->bportal->public_cfg.irq;
7296 + *iir_reg = portal->bportal->addr_virt[1] +
7297 + BM_REG_IIR;
7298 + }
7299 + spin_unlock(&context->lock);
7300 + return 0;
7301 + }
7302 + }
7303 + spin_unlock(&context->lock);
7304 + return -EINVAL;
7305 +}
7306 +
7307 +static const struct file_operations usdpaa_fops = {
7308 + .open = usdpaa_open,
7309 + .release = usdpaa_release,
7310 + .mmap = usdpaa_mmap,
7311 + .get_unmapped_area = usdpaa_get_unmapped_area,
7312 + .unlocked_ioctl = usdpaa_ioctl,
7313 + .compat_ioctl = usdpaa_ioctl_compat
7314 +};
7315 +
7316 +static struct miscdevice usdpaa_miscdev = {
7317 + .name = "fsl-usdpaa",
7318 + .fops = &usdpaa_fops,
7319 + .minor = MISC_DYNAMIC_MINOR,
7320 +};
7321 +
7322 +/* Early-boot memory allocation. The boot-arg "usdpaa_mem=<x>" is used to
7323 + * indicate how much memory (if any) to allocate during early boot. If the
7324 + * format "usdpaa_mem=<x>,<y>" is used, then <y> will be interpreted as the
7325 + * number of TLB1 entries to reserve (default is 1). If there are more mappings
7326 + * than there are TLB1 entries, fault-handling will occur. */
7327 +
7328 +static __init int usdpaa_mem(char *arg)
7329 +{
7330 + pr_warn("uspdaa_mem argument is depracated\n");
7331 + arg_phys_size = memparse(arg, &arg);
7332 + num_tlb = 1;
7333 + if (*arg == ',') {
7334 + unsigned long ul;
7335 + int err = kstrtoul(arg + 1, 0, &ul);
7336 + if (err < 0) {
7337 + num_tlb = 1;
7338 + pr_warn("ERROR, usdpaa_mem arg is invalid\n");
7339 + } else
7340 + num_tlb = (unsigned int)ul;
7341 + }
7342 + return 0;
7343 +}
7344 +early_param("usdpaa_mem", usdpaa_mem);
7345 +
7346 +static int usdpaa_mem_init(struct reserved_mem *rmem)
7347 +{
7348 + phys_start = rmem->base;
7349 + phys_size = rmem->size;
7350 +
7351 + WARN_ON(!(phys_start && phys_size));
7352 +
7353 + return 0;
7354 +}
7355 +RESERVEDMEM_OF_DECLARE(usdpaa_mem_init, "fsl,usdpaa-mem", usdpaa_mem_init);
7356 +
7357 +__init int fsl_usdpaa_init_early(void)
7358 +{
7359 + if (!phys_size || !phys_start) {
7360 + pr_info("No USDPAA memory, no 'fsl,usdpaa-mem' in device-tree\n");
7361 + return 0;
7362 + }
7363 + if (phys_size % PAGE_SIZE) {
7364 + pr_err("'fsl,usdpaa-mem' size must be a multiple of page size\n");
7365 + phys_size = 0;
7366 + return 0;
7367 + }
7368 + if (arg_phys_size && phys_size != arg_phys_size) {
7369 + pr_err("'usdpaa_mem argument size (0x%llx) does not match device tree size (0x%llx)\n",
7370 + arg_phys_size, phys_size);
7371 + phys_size = 0;
7372 + return 0;
7373 + }
7374 + pfn_start = phys_start >> PAGE_SHIFT;
7375 + pfn_size = phys_size >> PAGE_SHIFT;
7376 +#ifdef CONFIG_PPC
7377 + first_tlb = current_tlb = tlbcam_index;
7378 + tlbcam_index += num_tlb;
7379 +#endif
7380 + pr_info("USDPAA region at %llx:%llx(%lx:%lx), %d TLB1 entries)\n",
7381 + phys_start, phys_size, pfn_start, pfn_size, num_tlb);
7382 + return 0;
7383 +}
7384 +subsys_initcall(fsl_usdpaa_init_early);
7385 +
7386 +
7387 +static int __init usdpaa_init(void)
7388 +{
7389 + struct mem_fragment *frag;
7390 + int ret;
7391 + u64 tmp_size = phys_size;
7392 + u64 tmp_start = phys_start;
7393 + u64 tmp_pfn_size = pfn_size;
7394 + u64 tmp_pfn_start = pfn_start;
7395 +
7396 + pr_info("Freescale USDPAA process driver\n");
7397 + if (!phys_start) {
7398 + pr_warn("fsl-usdpaa: no region found\n");
7399 + return 0;
7400 + }
7401 +
7402 + while (tmp_size != 0) {
7403 + u32 frag_size = largest_page_size(tmp_size);
7404 + frag = kmalloc(sizeof(*frag), GFP_KERNEL);
7405 + if (!frag) {
7406 + pr_err("Failed to setup USDPAA memory accounting\n");
7407 + return -ENOMEM;
7408 + }
7409 + frag->base = tmp_start;
7410 + frag->len = frag->root_len = frag_size;
7411 + frag->root_pfn = tmp_pfn_start;
7412 + frag->pfn_base = tmp_pfn_start;
7413 + frag->pfn_len = frag_size / PAGE_SIZE;
7414 + frag->refs = 0;
7415 + init_waitqueue_head(&frag->wq);
7416 + frag->owner = NULL;
7417 + list_add(&frag->list, &mem_list);
7418 +
7419 + /* Adjust for this frag */
7420 + tmp_start += frag_size;
7421 + tmp_size -= frag_size;
7422 + tmp_pfn_start += frag_size / PAGE_SIZE;
7423 + tmp_pfn_size -= frag_size / PAGE_SIZE;
7424 + }
7425 + ret = misc_register(&usdpaa_miscdev);
7426 + if (ret)
7427 + pr_err("fsl-usdpaa: failed to register misc device\n");
7428 + return ret;
7429 +}
7430 +
7431 +static void __exit usdpaa_exit(void)
7432 +{
7433 + misc_deregister(&usdpaa_miscdev);
7434 +}
7435 +
7436 +module_init(usdpaa_init);
7437 +module_exit(usdpaa_exit);
7438 +
7439 +MODULE_LICENSE("GPL");
7440 +MODULE_AUTHOR("Freescale Semiconductor");
7441 +MODULE_DESCRIPTION("Freescale USDPAA process driver");
7442 --- /dev/null
7443 +++ b/drivers/staging/fsl_qbman/fsl_usdpaa_irq.c
7444 @@ -0,0 +1,289 @@
7445 +/* Copyright (c) 2013 Freescale Semiconductor, Inc.
7446 + * All rights reserved.
7447 + *
7448 + * Redistribution and use in source and binary forms, with or without
7449 + * modification, are permitted provided that the following conditions are met:
7450 + * * Redistributions of source code must retain the above copyright
7451 + * notice, this list of conditions and the following disclaimer.
7452 + * * Redistributions in binary form must reproduce the above copyright
7453 + * notice, this list of conditions and the following disclaimer in the
7454 + * documentation and/or other materials provided with the distribution.
7455 + * * Neither the name of Freescale Semiconductor nor the
7456 + * names of its contributors may be used to endorse or promote products
7457 + * derived from this software without specific prior written permission.
7458 + *
7459 + *
7460 + * ALTERNATIVELY, this software may be distributed under the terms of the
7461 + * GNU General Public License ("GPL") as published by the Free Software
7462 + * Foundation, either version 2 of that License or (at your option) any
7463 + * later version.
7464 + *
7465 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
7466 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
7467 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
7468 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
7469 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
7470 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
7471 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
7472 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
7473 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
7474 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
7475 + */
7476 +
7477 +/* define a device that allows USPDAA processes to open a file
7478 + descriptor and specify which IRQ it wants to montior using an ioctl()
7479 + When an IRQ is received, the device becomes readable so that a process
7480 + can use read() or select() type calls to monitor for IRQs */
7481 +
7482 +#include <linux/miscdevice.h>
7483 +#include <linux/fs.h>
7484 +#include <linux/cdev.h>
7485 +#include <linux/slab.h>
7486 +#include <linux/interrupt.h>
7487 +#include <linux/poll.h>
7488 +#include <linux/uaccess.h>
7489 +#include <linux/fsl_usdpaa.h>
7490 +#include <linux/module.h>
7491 +#include <linux/fdtable.h>
7492 +#include <linux/file.h>
7493 +
7494 +#include "qman_low.h"
7495 +#include "bman_low.h"
7496 +
7497 +struct usdpaa_irq_ctx {
7498 + int irq_set; /* Set to true once the irq is set via ioctl */
7499 + unsigned int irq_num;
7500 + u32 last_irq_count; /* Last value returned from read */
7501 + u32 irq_count; /* Number of irqs since last read */
7502 + wait_queue_head_t wait_queue; /* Waiting processes */
7503 + spinlock_t lock;
7504 + void *inhibit_addr; /* inhibit register address */
7505 + struct file *usdpaa_filp;
7506 + char irq_name[128];
7507 +};
7508 +
7509 +static int usdpaa_irq_open(struct inode *inode, struct file *filp)
7510 +{
7511 + struct usdpaa_irq_ctx *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
7512 + if (!ctx)
7513 + return -ENOMEM;
7514 + ctx->irq_set = 0;
7515 + ctx->irq_count = 0;
7516 + ctx->last_irq_count = 0;
7517 + init_waitqueue_head(&ctx->wait_queue);
7518 + spin_lock_init(&ctx->lock);
7519 + filp->private_data = ctx;
7520 + return 0;
7521 +}
7522 +
7523 +static int usdpaa_irq_release(struct inode *inode, struct file *filp)
7524 +{
7525 + struct usdpaa_irq_ctx *ctx = filp->private_data;
7526 + if (ctx->irq_set) {
7527 + /* Inhibit the IRQ */
7528 + out_be32(ctx->inhibit_addr, 0x1);
7529 + irq_set_affinity_hint(ctx->irq_num, NULL);
7530 + free_irq(ctx->irq_num, ctx);
7531 + ctx->irq_set = 0;
7532 + fput(ctx->usdpaa_filp);
7533 + }
7534 + kfree(filp->private_data);
7535 + return 0;
7536 +}
7537 +
7538 +static irqreturn_t usdpaa_irq_handler(int irq, void *_ctx)
7539 +{
7540 + unsigned long flags;
7541 + struct usdpaa_irq_ctx *ctx = _ctx;
7542 + spin_lock_irqsave(&ctx->lock, flags);
7543 + ++ctx->irq_count;
7544 + spin_unlock_irqrestore(&ctx->lock, flags);
7545 + wake_up_all(&ctx->wait_queue);
7546 + /* Set the inhibit register. This will be reenabled
7547 + once the USDPAA code handles the IRQ */
7548 + out_be32(ctx->inhibit_addr, 0x1);
7549 + pr_info("Inhibit at %p count %d", ctx->inhibit_addr, ctx->irq_count);
7550 + return IRQ_HANDLED;
7551 +}
7552 +
7553 +static int map_irq(struct file *fp, struct usdpaa_ioctl_irq_map *irq_map)
7554 +{
7555 + struct usdpaa_irq_ctx *ctx = fp->private_data;
7556 + int ret;
7557 +
7558 + if (ctx->irq_set) {
7559 + pr_debug("Setting USDPAA IRQ when it was already set!\n");
7560 + return -EBUSY;
7561 + }
7562 +
7563 + ctx->usdpaa_filp = fget(irq_map->fd);
7564 + if (!ctx->usdpaa_filp) {
7565 + pr_debug("USDPAA fget(%d) returned NULL\n", irq_map->fd);
7566 + return -EINVAL;
7567 + }
7568 +
7569 + ret = usdpaa_get_portal_config(ctx->usdpaa_filp, irq_map->portal_cinh,
7570 + irq_map->type, &ctx->irq_num,
7571 + &ctx->inhibit_addr);
7572 + if (ret) {
7573 + pr_debug("USDPAA IRQ couldn't identify portal\n");
7574 + fput(ctx->usdpaa_filp);
7575 + return ret;
7576 + }
7577 +
7578 + ctx->irq_set = 1;
7579 +
7580 + snprintf(ctx->irq_name, sizeof(ctx->irq_name),
7581 + "usdpaa_irq %d", ctx->irq_num);
7582 +
7583 + ret = request_irq(ctx->irq_num, usdpaa_irq_handler, 0,
7584 + ctx->irq_name, ctx);
7585 + if (ret) {
7586 + pr_err("USDPAA request_irq(%d) failed, ret= %d\n",
7587 + ctx->irq_num, ret);
7588 + ctx->irq_set = 0;
7589 + fput(ctx->usdpaa_filp);
7590 + return ret;
7591 + }
7592 + ret = irq_set_affinity(ctx->irq_num, &current->cpus_allowed);
7593 + if (ret)
7594 + pr_err("USDPAA irq_set_affinity() failed, ret= %d\n", ret);
7595 +
7596 + ret = irq_set_affinity_hint(ctx->irq_num, &current->cpus_allowed);
7597 + if (ret)
7598 + pr_err("USDPAA irq_set_affinity_hint() failed, ret= %d\n", ret);
7599 +
7600 + return 0;
7601 +}
7602 +
7603 +static long usdpaa_irq_ioctl(struct file *fp, unsigned int cmd,
7604 + unsigned long arg)
7605 +{
7606 + int ret;
7607 + struct usdpaa_ioctl_irq_map irq_map;
7608 +
7609 + if (cmd != USDPAA_IOCTL_PORTAL_IRQ_MAP) {
7610 + pr_debug("USDPAA IRQ unknown command 0x%x\n", cmd);
7611 + return -EINVAL;
7612 + }
7613 +
7614 + ret = copy_from_user(&irq_map, (void __user *)arg,
7615 + sizeof(irq_map));
7616 + if (ret)
7617 + return ret;
7618 + return map_irq(fp, &irq_map);
7619 +}
7620 +
7621 +static ssize_t usdpaa_irq_read(struct file *filp, char __user *buff,
7622 + size_t count, loff_t *offp)
7623 +{
7624 + struct usdpaa_irq_ctx *ctx = filp->private_data;
7625 + int ret;
7626 +
7627 + if (!ctx->irq_set) {
7628 + pr_debug("Reading USDPAA IRQ before it was set\n");
7629 + return -EINVAL;
7630 + }
7631 +
7632 + if (count < sizeof(ctx->irq_count)) {
7633 + pr_debug("USDPAA IRQ Read too small\n");
7634 + return -EINVAL;
7635 + }
7636 + if (ctx->irq_count == ctx->last_irq_count) {
7637 + if (filp->f_flags & O_NONBLOCK)
7638 + return -EAGAIN;
7639 +
7640 + ret = wait_event_interruptible(ctx->wait_queue,
7641 + ctx->irq_count != ctx->last_irq_count);
7642 + if (ret == -ERESTARTSYS)
7643 + return ret;
7644 + }
7645 +
7646 + ctx->last_irq_count = ctx->irq_count;
7647 +
7648 + if (copy_to_user(buff, &ctx->last_irq_count,
7649 + sizeof(ctx->last_irq_count)))
7650 + return -EFAULT;
7651 + return sizeof(ctx->irq_count);
7652 +}
7653 +
7654 +static unsigned int usdpaa_irq_poll(struct file *filp, poll_table *wait)
7655 +{
7656 + struct usdpaa_irq_ctx *ctx = filp->private_data;
7657 + unsigned int ret = 0;
7658 + unsigned long flags;
7659 +
7660 + if (!ctx->irq_set)
7661 + return POLLHUP;
7662 +
7663 + poll_wait(filp, &ctx->wait_queue, wait);
7664 +
7665 + spin_lock_irqsave(&ctx->lock, flags);
7666 + if (ctx->irq_count != ctx->last_irq_count)
7667 + ret |= POLLIN | POLLRDNORM;
7668 + spin_unlock_irqrestore(&ctx->lock, flags);
7669 + return ret;
7670 +}
7671 +
7672 +static long usdpaa_irq_ioctl_compat(struct file *fp, unsigned int cmd,
7673 + unsigned long arg)
7674 +{
7675 +#ifdef CONFIG_COMPAT
7676 + void __user *a = (void __user *)arg;
7677 +#endif
7678 + switch (cmd) {
7679 +#ifdef CONFIG_COMPAT
7680 + case USDPAA_IOCTL_PORTAL_IRQ_MAP_COMPAT:
7681 + {
7682 + struct compat_ioctl_irq_map input;
7683 + struct usdpaa_ioctl_irq_map converted;
7684 + if (copy_from_user(&input, a, sizeof(input)))
7685 + return -EFAULT;
7686 + converted.type = input.type;
7687 + converted.fd = input.fd;
7688 + converted.portal_cinh = compat_ptr(input.portal_cinh);
7689 + return map_irq(fp, &converted);
7690 + }
7691 +#endif
7692 + default:
7693 + return usdpaa_irq_ioctl(fp, cmd, arg);
7694 + }
7695 +}
7696 +
7697 +static const struct file_operations usdpaa_irq_fops = {
7698 + .open = usdpaa_irq_open,
7699 + .release = usdpaa_irq_release,
7700 + .unlocked_ioctl = usdpaa_irq_ioctl,
7701 + .compat_ioctl = usdpaa_irq_ioctl_compat,
7702 + .read = usdpaa_irq_read,
7703 + .poll = usdpaa_irq_poll
7704 +};
7705 +
7706 +static struct miscdevice usdpaa_miscdev = {
7707 + .name = "fsl-usdpaa-irq",
7708 + .fops = &usdpaa_irq_fops,
7709 + .minor = MISC_DYNAMIC_MINOR,
7710 +};
7711 +
7712 +static int __init usdpaa_irq_init(void)
7713 +{
7714 + int ret;
7715 +
7716 + pr_info("Freescale USDPAA process IRQ driver\n");
7717 + ret = misc_register(&usdpaa_miscdev);
7718 + if (ret)
7719 + pr_err("fsl-usdpaa-irq: failed to register misc device\n");
7720 + return ret;
7721 +}
7722 +
7723 +static void __exit usdpaa_irq_exit(void)
7724 +{
7725 + misc_deregister(&usdpaa_miscdev);
7726 +}
7727 +
7728 +module_init(usdpaa_irq_init);
7729 +module_exit(usdpaa_irq_exit);
7730 +
7731 +MODULE_LICENSE("GPL");
7732 +MODULE_AUTHOR("Freescale Semiconductor");
7733 +MODULE_DESCRIPTION("Freescale USDPAA process IRQ driver");
7734 --- /dev/null
7735 +++ b/drivers/staging/fsl_qbman/qbman_driver.c
7736 @@ -0,0 +1,88 @@
7737 +/* Copyright 2013 Freescale Semiconductor, Inc.
7738 + *
7739 + * Redistribution and use in source and binary forms, with or without
7740 + * modification, are permitted provided that the following conditions are met:
7741 + * * Redistributions of source code must retain the above copyright
7742 + * notice, this list of conditions and the following disclaimer.
7743 + * * Redistributions in binary form must reproduce the above copyright
7744 + * notice, this list of conditions and the following disclaimer in the
7745 + * documentation and/or other materials provided with the distribution.
7746 + * * Neither the name of Freescale Semiconductor nor the
7747 + * names of its contributors may be used to endorse or promote products
7748 + * derived from this software without specific prior written permission.
7749 + *
7750 + *
7751 + * ALTERNATIVELY, this software may be distributed under the terms of the
7752 + * GNU General Public License ("GPL") as published by the Free Software
7753 + * Foundation, either version 2 of that License or (at your option) any
7754 + * later version.
7755 + *
7756 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
7757 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
7758 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
7759 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
7760 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
7761 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
7762 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
7763 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
7764 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
7765 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
7766 + */
7767 +
7768 +#include <linux/time.h>
7769 +#include "qman_private.h"
7770 +#include "bman_private.h"
7771 +__init void qman_init_early(void);
7772 +__init void bman_init_early(void);
7773 +
7774 +static __init int qbman_init(void)
7775 +{
7776 + struct device_node *dn;
7777 + u32 is_portal_available;
7778 +
7779 + bman_init();
7780 + qman_init();
7781 +
7782 + is_portal_available = 0;
7783 + for_each_compatible_node(dn, NULL, "fsl,qman-portal") {
7784 + if (!of_device_is_available(dn))
7785 + continue;
7786 + else
7787 + is_portal_available = 1;
7788 + }
7789 +
7790 + if (!qman_have_ccsr() && is_portal_available) {
7791 + struct qman_fq fq = {
7792 + .fqid = 1
7793 + };
7794 + struct qm_mcr_queryfq_np np;
7795 + int err, retry = CONFIG_FSL_QMAN_INIT_TIMEOUT;
7796 + struct timespec nowts, diffts, startts = current_kernel_time();
7797 + /* Loop while querying given fqid succeeds or time out */
7798 + while (1) {
7799 + err = qman_query_fq_np(&fq, &np);
7800 + if (!err) {
7801 + /* success, control-plane has configured QMan */
7802 + break;
7803 + } else if (err != -ERANGE) {
7804 + pr_err("QMan: I/O error, continuing anyway\n");
7805 + break;
7806 + }
7807 + nowts = current_kernel_time();
7808 + diffts = timespec_sub(nowts, startts);
7809 + if (diffts.tv_sec > 0) {
7810 + if (!retry--) {
7811 + pr_err("QMan: time out, control-plane"
7812 + " dead?\n");
7813 + break;
7814 + }
7815 + pr_warn("QMan: polling for the control-plane"
7816 + " (%d)\n", retry);
7817 + }
7818 + }
7819 + }
7820 + bman_resource_init();
7821 + qman_resource_init();
7822 + return 0;
7823 +}
7824 +subsys_initcall(qbman_init);
7825 --- /dev/null
7826 +++ b/drivers/staging/fsl_qbman/qman_config.c
7827 @@ -0,0 +1,1224 @@
7828 +/* Copyright 2008-2012 Freescale Semiconductor, Inc.
7829 + *
7830 + * Redistribution and use in source and binary forms, with or without
7831 + * modification, are permitted provided that the following conditions are met:
7832 + * * Redistributions of source code must retain the above copyright
7833 + * notice, this list of conditions and the following disclaimer.
7834 + * * Redistributions in binary form must reproduce the above copyright
7835 + * notice, this list of conditions and the following disclaimer in the
7836 + * documentation and/or other materials provided with the distribution.
7837 + * * Neither the name of Freescale Semiconductor nor the
7838 + * names of its contributors may be used to endorse or promote products
7839 + * derived from this software without specific prior written permission.
7840 + *
7841 + *
7842 + * ALTERNATIVELY, this software may be distributed under the terms of the
7843 + * GNU General Public License ("GPL") as published by the Free Software
7844 + * Foundation, either version 2 of that License or (at your option) any
7845 + * later version.
7846 + *
7847 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
7848 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
7849 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
7850 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
7851 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
7852 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
7853 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
7854 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
7855 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
7856 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
7857 + */
7858 +
7859 +#include <asm/cacheflush.h>
7860 +#include "qman_private.h"
7861 +#include <linux/highmem.h>
7862 +#include <linux/of_reserved_mem.h>
7863 +
7864 +/* Last updated for v00.800 of the BG */
7865 +
7866 +/* Register offsets */
7867 +#define REG_QCSP_LIO_CFG(n) (0x0000 + ((n) * 0x10))
7868 +#define REG_QCSP_IO_CFG(n) (0x0004 + ((n) * 0x10))
7869 +#define REG_QCSP_DD_CFG(n) (0x000c + ((n) * 0x10))
7870 +#define REG_DD_CFG 0x0200
7871 +#define REG_DCP_CFG(n) (0x0300 + ((n) * 0x10))
7872 +#define REG_DCP_DD_CFG(n) (0x0304 + ((n) * 0x10))
7873 +#define REG_DCP_DLM_AVG(n) (0x030c + ((n) * 0x10))
7874 +#define REG_PFDR_FPC 0x0400
7875 +#define REG_PFDR_FP_HEAD 0x0404
7876 +#define REG_PFDR_FP_TAIL 0x0408
7877 +#define REG_PFDR_FP_LWIT 0x0410
7878 +#define REG_PFDR_CFG 0x0414
7879 +#define REG_SFDR_CFG 0x0500
7880 +#define REG_SFDR_IN_USE 0x0504
7881 +#define REG_WQ_CS_CFG(n) (0x0600 + ((n) * 0x04))
7882 +#define REG_WQ_DEF_ENC_WQID 0x0630
7883 +#define REG_WQ_SC_DD_CFG(n) (0x640 + ((n) * 0x04))
7884 +#define REG_WQ_PC_DD_CFG(n) (0x680 + ((n) * 0x04))
7885 +#define REG_WQ_DC0_DD_CFG(n) (0x6c0 + ((n) * 0x04))
7886 +#define REG_WQ_DC1_DD_CFG(n) (0x700 + ((n) * 0x04))
7887 +#define REG_WQ_DCn_DD_CFG(n) (0x6c0 + ((n) * 0x40)) /* n=2,3 */
7888 +#define REG_CM_CFG 0x0800
7889 +#define REG_ECSR 0x0a00
7890 +#define REG_ECIR 0x0a04
7891 +#define REG_EADR 0x0a08
7892 +#define REG_ECIR2 0x0a0c
7893 +#define REG_EDATA(n) (0x0a10 + ((n) * 0x04))
7894 +#define REG_SBEC(n) (0x0a80 + ((n) * 0x04))
7895 +#define REG_MCR 0x0b00
7896 +#define REG_MCP(n) (0x0b04 + ((n) * 0x04))
7897 +#define REG_MISC_CFG 0x0be0
7898 +#define REG_HID_CFG 0x0bf0
7899 +#define REG_IDLE_STAT 0x0bf4
7900 +#define REG_IP_REV_1 0x0bf8
7901 +#define REG_IP_REV_2 0x0bfc
7902 +#define REG_FQD_BARE 0x0c00
7903 +#define REG_PFDR_BARE 0x0c20
7904 +#define REG_offset_BAR 0x0004 /* relative to REG_[FQD|PFDR]_BARE */
7905 +#define REG_offset_AR 0x0010 /* relative to REG_[FQD|PFDR]_BARE */
7906 +#define REG_QCSP_BARE 0x0c80
7907 +#define REG_QCSP_BAR 0x0c84
7908 +#define REG_CI_SCHED_CFG 0x0d00
7909 +#define REG_SRCIDR 0x0d04
7910 +#define REG_LIODNR 0x0d08
7911 +#define REG_CI_RLM_AVG 0x0d14
7912 +#define REG_ERR_ISR 0x0e00 /* + "enum qm_isr_reg" */
7913 +#define REG_REV3_QCSP_LIO_CFG(n) (0x1000 + ((n) * 0x10))
7914 +#define REG_REV3_QCSP_IO_CFG(n) (0x1004 + ((n) * 0x10))
7915 +#define REG_REV3_QCSP_DD_CFG(n) (0x100c + ((n) * 0x10))
7916 +#define REG_CEETM_CFG_IDX 0x900
7917 +#define REG_CEETM_CFG_PRES 0x904
7918 +#define REG_CEETM_XSFDR_IN_USE 0x908
7919 +
7920 +/* Assists for QMAN_MCR */
7921 +#define MCR_INIT_PFDR 0x01000000
7922 +#define MCR_get_rslt(v) (u8)((v) >> 24)
7923 +#define MCR_rslt_idle(r) (!rslt || (rslt >= 0xf0))
7924 +#define MCR_rslt_ok(r) (rslt == 0xf0)
7925 +#define MCR_rslt_eaccess(r) (rslt == 0xf8)
7926 +#define MCR_rslt_inval(r) (rslt == 0xff)
7927 +
7928 +struct qman;
7929 +
7930 +/* Follows WQ_CS_CFG0-5 */
7931 +enum qm_wq_class {
7932 + qm_wq_portal = 0,
7933 + qm_wq_pool = 1,
7934 + qm_wq_fman0 = 2,
7935 + qm_wq_fman1 = 3,
7936 + qm_wq_caam = 4,
7937 + qm_wq_pme = 5,
7938 + qm_wq_first = qm_wq_portal,
7939 + qm_wq_last = qm_wq_pme
7940 +};
7941 +
7942 +/* Follows FQD_[BARE|BAR|AR] and PFDR_[BARE|BAR|AR] */
7943 +enum qm_memory {
7944 + qm_memory_fqd,
7945 + qm_memory_pfdr
7946 +};
7947 +
7948 +/* Used by all error interrupt registers except 'inhibit' */
7949 +#define QM_EIRQ_CIDE 0x20000000 /* Corenet Initiator Data Error */
7950 +#define QM_EIRQ_CTDE 0x10000000 /* Corenet Target Data Error */
7951 +#define QM_EIRQ_CITT 0x08000000 /* Corenet Invalid Target Transaction */
7952 +#define QM_EIRQ_PLWI 0x04000000 /* PFDR Low Watermark */
7953 +#define QM_EIRQ_MBEI 0x02000000 /* Multi-bit ECC Error */
7954 +#define QM_EIRQ_SBEI 0x01000000 /* Single-bit ECC Error */
7955 +#define QM_EIRQ_PEBI 0x00800000 /* PFDR Enqueues Blocked Interrupt */
7956 +#define QM_EIRQ_IFSI 0x00020000 /* Invalid FQ Flow Control State */
7957 +#define QM_EIRQ_ICVI 0x00010000 /* Invalid Command Verb */
7958 +#define QM_EIRQ_IDDI 0x00000800 /* Invalid Dequeue (Direct-connect) */
7959 +#define QM_EIRQ_IDFI 0x00000400 /* Invalid Dequeue FQ */
7960 +#define QM_EIRQ_IDSI 0x00000200 /* Invalid Dequeue Source */
7961 +#define QM_EIRQ_IDQI 0x00000100 /* Invalid Dequeue Queue */
7962 +#define QM_EIRQ_IECE 0x00000010 /* Invalid Enqueue Configuration */
7963 +#define QM_EIRQ_IEOI 0x00000008 /* Invalid Enqueue Overflow */
7964 +#define QM_EIRQ_IESI 0x00000004 /* Invalid Enqueue State */
7965 +#define QM_EIRQ_IECI 0x00000002 /* Invalid Enqueue Channel */
7966 +#define QM_EIRQ_IEQI 0x00000001 /* Invalid Enqueue Queue */
7967 +
7968 +/* QMAN_ECIR valid error bit */
7969 +#define PORTAL_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IESI | QM_EIRQ_IEOI | \
7970 + QM_EIRQ_IDQI | QM_EIRQ_IDSI | QM_EIRQ_IDFI | \
7971 + QM_EIRQ_IDDI | QM_EIRQ_ICVI | QM_EIRQ_IFSI)
7972 +#define FQID_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IECI | QM_EIRQ_IESI | \
7973 + QM_EIRQ_IEOI | QM_EIRQ_IDQI | QM_EIRQ_IDFI | \
7974 + QM_EIRQ_IFSI)
7975 +
7976 +union qman_ecir {
7977 + u32 ecir_raw;
7978 + struct {
7979 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
7980 + u32 __reserved:2;
7981 + u32 portal_type:1;
7982 + u32 portal_num:5;
7983 + u32 fqid:24;
7984 +#else
7985 + u32 fqid:24;
7986 + u32 portal_num:5;
7987 + u32 portal_type:1;
7988 + u32 __reserved:2;
7989 +#endif
7990 + } __packed info;
7991 +};
7992 +
7993 +union qman_ecir2 {
7994 + u32 ecir2_raw;
7995 + struct {
7996 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
7997 + u32 portal_type:1;
7998 + u32 __reserved:21;
7999 + u32 portal_num:10;
8000 +#else
8001 + u32 portal_num:10;
8002 + u32 __reserved:21;
8003 + u32 portal_type:1;
8004 +#endif
8005 + } __packed info;
8006 +};
8007 +
8008 +union qman_eadr {
8009 + u32 eadr_raw;
8010 + struct {
8011 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
8012 + u32 __reserved1:4;
8013 + u32 memid:4;
8014 + u32 __reserved2:12;
8015 + u32 eadr:12;
8016 +#else
8017 + u32 eadr:12;
8018 + u32 __reserved2:12;
8019 + u32 memid:4;
8020 + u32 __reserved1:4;
8021 +#endif
8022 + } __packed info;
8023 + struct {
8024 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
8025 + u32 __reserved1:3;
8026 + u32 memid:5;
8027 + u32 __reserved:8;
8028 + u32 eadr:16;
8029 +#else
8030 + u32 eadr:16;
8031 + u32 __reserved:8;
8032 + u32 memid:5;
8033 + u32 __reserved1:3;
8034 +#endif
8035 + } __packed info_rev3;
8036 +};
8037 +
8038 +struct qman_hwerr_txt {
8039 + u32 mask;
8040 + const char *txt;
8041 +};
8042 +
8043 +#define QMAN_HWE_TXT(a, b) { .mask = QM_EIRQ_##a, .txt = b }
8044 +
8045 +static const struct qman_hwerr_txt qman_hwerr_txts[] = {
8046 + QMAN_HWE_TXT(CIDE, "Corenet Initiator Data Error"),
8047 + QMAN_HWE_TXT(CTDE, "Corenet Target Data Error"),
8048 + QMAN_HWE_TXT(CITT, "Corenet Invalid Target Transaction"),
8049 + QMAN_HWE_TXT(PLWI, "PFDR Low Watermark"),
8050 + QMAN_HWE_TXT(MBEI, "Multi-bit ECC Error"),
8051 + QMAN_HWE_TXT(SBEI, "Single-bit ECC Error"),
8052 + QMAN_HWE_TXT(PEBI, "PFDR Enqueues Blocked Interrupt"),
8053 + QMAN_HWE_TXT(ICVI, "Invalid Command Verb"),
8054 + QMAN_HWE_TXT(IFSI, "Invalid Flow Control State"),
8055 + QMAN_HWE_TXT(IDDI, "Invalid Dequeue (Direct-connect)"),
8056 + QMAN_HWE_TXT(IDFI, "Invalid Dequeue FQ"),
8057 + QMAN_HWE_TXT(IDSI, "Invalid Dequeue Source"),
8058 + QMAN_HWE_TXT(IDQI, "Invalid Dequeue Queue"),
8059 + QMAN_HWE_TXT(IECE, "Invalid Enqueue Configuration"),
8060 + QMAN_HWE_TXT(IEOI, "Invalid Enqueue Overflow"),
8061 + QMAN_HWE_TXT(IESI, "Invalid Enqueue State"),
8062 + QMAN_HWE_TXT(IECI, "Invalid Enqueue Channel"),
8063 + QMAN_HWE_TXT(IEQI, "Invalid Enqueue Queue")
8064 +};
8065 +#define QMAN_HWE_COUNT (sizeof(qman_hwerr_txts)/sizeof(struct qman_hwerr_txt))
8066 +
8067 +struct qman_error_info_mdata {
8068 + u16 addr_mask;
8069 + u16 bits;
8070 + const char *txt;
8071 +};
8072 +
8073 +#define QMAN_ERR_MDATA(a, b, c) { .addr_mask = a, .bits = b, .txt = c}
8074 +static const struct qman_error_info_mdata error_mdata[] = {
8075 + QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 0"),
8076 + QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 1"),
8077 + QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 2"),
8078 + QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 3"),
8079 + QMAN_ERR_MDATA(0x0FFF, 512, "FQD cache memory"),
8080 + QMAN_ERR_MDATA(0x07FF, 128, "SFDR memory"),
8081 + QMAN_ERR_MDATA(0x01FF, 72, "WQ context memory"),
8082 + QMAN_ERR_MDATA(0x00FF, 240, "CGR memory"),
8083 + QMAN_ERR_MDATA(0x00FF, 302, "Internal Order Restoration List memory"),
8084 + QMAN_ERR_MDATA(0x01FF, 256, "SW portal ring memory"),
8085 + QMAN_ERR_MDATA(0x07FF, 181, "CEETM class queue descriptor memory"),
8086 + QMAN_ERR_MDATA(0x0FFF, 140, "CEETM extended SFDR memory"),
8087 + QMAN_ERR_MDATA(0x0FFF, 25, "CEETM logical FQ mapping memory"),
8088 + QMAN_ERR_MDATA(0x0FFF, 96, "CEETM dequeue context memory"),
8089 + QMAN_ERR_MDATA(0x07FF, 396, "CEETM ccgr memory"),
8090 + QMAN_ERR_MDATA(0x00FF, 146, "CEETM CQ channel shaping memory"),
8091 + QMAN_ERR_MDATA(0x007F, 256, "CEETM CQ channel scheduling memory"),
8092 + QMAN_ERR_MDATA(0x01FF, 88, "CEETM dequeue statistics memory"),
8093 +};
8094 +#define QMAN_ERR_MDATA_COUNT \
8095 + (sizeof(error_mdata)/sizeof(struct qman_error_info_mdata))
8096 +
8097 +/* Add this in Kconfig */
8098 +#define QMAN_ERRS_TO_UNENABLE (QM_EIRQ_PLWI | QM_EIRQ_PEBI)
8099 +
8100 +/**
8101 + * qm_err_isr_<reg>_<verb> - Manipulate global interrupt registers
8102 + * @v: for accessors that write values, this is the 32-bit value
8103 + *
8104 + * Manipulates QMAN_ERR_ISR, QMAN_ERR_IER, QMAN_ERR_ISDR, QMAN_ERR_IIR. All
8105 + * manipulations except qm_err_isr_[un]inhibit() use 32-bit masks composed of
8106 + * the QM_EIRQ_*** definitions. Note that "qm_err_isr_enable_write" means
8107 + * "write the enable register" rather than "enable the write register"!
8108 + */
8109 +#define qm_err_isr_status_read(qm) \
8110 + __qm_err_isr_read(qm, qm_isr_status)
8111 +#define qm_err_isr_status_clear(qm, m) \
8112 + __qm_err_isr_write(qm, qm_isr_status, m)
8113 +#define qm_err_isr_enable_read(qm) \
8114 + __qm_err_isr_read(qm, qm_isr_enable)
8115 +#define qm_err_isr_enable_write(qm, v) \
8116 + __qm_err_isr_write(qm, qm_isr_enable, v)
8117 +#define qm_err_isr_disable_read(qm) \
8118 + __qm_err_isr_read(qm, qm_isr_disable)
8119 +#define qm_err_isr_disable_write(qm, v) \
8120 + __qm_err_isr_write(qm, qm_isr_disable, v)
8121 +#define qm_err_isr_inhibit(qm) \
8122 + __qm_err_isr_write(qm, qm_isr_inhibit, 1)
8123 +#define qm_err_isr_uninhibit(qm) \
8124 + __qm_err_isr_write(qm, qm_isr_inhibit, 0)
8125 +
8126 +/*
8127 + * TODO: unimplemented registers
8128 + *
8129 + * Keeping a list here of Qman registers I have not yet covered;
8130 + * QCSP_DD_IHRSR, QCSP_DD_IHRFR, QCSP_DD_HASR,
8131 + * DCP_DD_IHRSR, DCP_DD_IHRFR, DCP_DD_HASR, CM_CFG,
8132 + * QMAN_EECC, QMAN_SBET, QMAN_EINJ, QMAN_SBEC0-12
8133 + */
8134 +
8135 +/* Encapsulate "struct qman *" as a cast of the register space address. */
8136 +
8137 +static struct qman *qm_create(void *regs)
8138 +{
8139 + return (struct qman *)regs;
8140 +}
8141 +
8142 +static inline u32 __qm_in(struct qman *qm, u32 offset)
8143 +{
8144 + return in_be32((void *)qm + offset);
8145 +}
8146 +static inline void __qm_out(struct qman *qm, u32 offset, u32 val)
8147 +{
8148 + out_be32((void *)qm + offset, val);
8149 +}
8150 +#define qm_in(reg) __qm_in(qm, REG_##reg)
8151 +#define qm_out(reg, val) __qm_out(qm, REG_##reg, val)
8152 +
8153 +static u32 __qm_err_isr_read(struct qman *qm, enum qm_isr_reg n)
8154 +{
8155 + return __qm_in(qm, REG_ERR_ISR + (n << 2));
8156 +}
8157 +
8158 +static void __qm_err_isr_write(struct qman *qm, enum qm_isr_reg n, u32 val)
8159 +{
8160 + __qm_out(qm, REG_ERR_ISR + (n << 2), val);
8161 +}
8162 +
8163 +static void qm_set_dc(struct qman *qm, enum qm_dc_portal portal,
8164 + int ed, u8 sernd)
8165 +{
8166 + DPA_ASSERT(!ed || (portal == qm_dc_portal_fman0) ||
8167 + (portal == qm_dc_portal_fman1));
8168 + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
8169 + qm_out(DCP_CFG(portal), (ed ? 0x1000 : 0) | (sernd & 0x3ff));
8170 + else
8171 + qm_out(DCP_CFG(portal), (ed ? 0x100 : 0) | (sernd & 0x1f));
8172 +}
8173 +
8174 +static void qm_set_wq_scheduling(struct qman *qm, enum qm_wq_class wq_class,
8175 + u8 cs_elev, u8 csw2, u8 csw3, u8 csw4, u8 csw5,
8176 + u8 csw6, u8 csw7)
8177 +{
8178 + qm_out(WQ_CS_CFG(wq_class), ((cs_elev & 0xff) << 24) |
8179 + ((csw2 & 0x7) << 20) | ((csw3 & 0x7) << 16) |
8180 + ((csw4 & 0x7) << 12) | ((csw5 & 0x7) << 8) |
8181 + ((csw6 & 0x7) << 4) | (csw7 & 0x7));
8182 +}
8183 +
8184 +static void qm_set_hid(struct qman *qm)
8185 +{
8186 + qm_out(HID_CFG, 0);
8187 +}
8188 +
8189 +static void qm_set_corenet_initiator(struct qman *qm)
8190 +{
8191 + qm_out(CI_SCHED_CFG,
8192 + 0x80000000 | /* write srcciv enable */
8193 + (CONFIG_FSL_QMAN_CI_SCHED_CFG_SRCCIV << 24) |
8194 + (CONFIG_FSL_QMAN_CI_SCHED_CFG_SRQ_W << 8) |
8195 + (CONFIG_FSL_QMAN_CI_SCHED_CFG_RW_W << 4) |
8196 + CONFIG_FSL_QMAN_CI_SCHED_CFG_BMAN_W);
8197 +}
8198 +
8199 +static void qm_get_version(struct qman *qm, u16 *id, u8 *major, u8 *minor,
8200 + u8 *cfg)
8201 +{
8202 + u32 v = qm_in(IP_REV_1);
8203 + u32 v2 = qm_in(IP_REV_2);
8204 + *id = (v >> 16);
8205 + *major = (v >> 8) & 0xff;
8206 + *minor = v & 0xff;
8207 + *cfg = v2 & 0xff;
8208 +}
8209 +
8210 +static void qm_set_memory(struct qman *qm, enum qm_memory memory, u64 ba,
8211 + int enable, int prio, int stash, u32 size)
8212 +{
8213 + u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE;
8214 + u32 exp = ilog2(size);
8215 + /* choke if size isn't within range */
8216 + DPA_ASSERT((size >= 4096) && (size <= 1073741824) &&
8217 + is_power_of_2(size));
8218 + /* choke if 'ba' has lower-alignment than 'size' */
8219 + DPA_ASSERT(!(ba & (size - 1)));
8220 + __qm_out(qm, offset, upper_32_bits(ba));
8221 + __qm_out(qm, offset + REG_offset_BAR, lower_32_bits(ba));
8222 + __qm_out(qm, offset + REG_offset_AR,
8223 + (enable ? 0x80000000 : 0) |
8224 + (prio ? 0x40000000 : 0) |
8225 + (stash ? 0x20000000 : 0) |
8226 + (exp - 1));
8227 +}
8228 +
8229 +static void qm_set_pfdr_threshold(struct qman *qm, u32 th, u8 k)
8230 +{
8231 + qm_out(PFDR_FP_LWIT, th & 0xffffff);
8232 + qm_out(PFDR_CFG, k);
8233 +}
8234 +
8235 +static void qm_set_sfdr_threshold(struct qman *qm, u16 th)
8236 +{
8237 + qm_out(SFDR_CFG, th & 0x3ff);
8238 +}
8239 +
8240 +static int qm_init_pfdr(struct qman *qm, u32 pfdr_start, u32 num)
8241 +{
8242 + u8 rslt = MCR_get_rslt(qm_in(MCR));
8243 +
8244 + DPA_ASSERT(pfdr_start && !(pfdr_start & 7) && !(num & 7) && num);
8245 + /* Make sure the command interface is 'idle' */
8246 + if (!MCR_rslt_idle(rslt))
8247 + panic("QMAN_MCR isn't idle");
8248 +
8249 + /* Write the MCR command params then the verb */
8250 + qm_out(MCP(0), pfdr_start);
8251 + /* TODO: remove this - it's a workaround for a model bug that is
8252 + * corrected in more recent versions. We use the workaround until
8253 + * everyone has upgraded. */
8254 + qm_out(MCP(1), (pfdr_start + num - 16));
8255 + lwsync();
8256 + qm_out(MCR, MCR_INIT_PFDR);
8257 + /* Poll for the result */
8258 + do {
8259 + rslt = MCR_get_rslt(qm_in(MCR));
8260 + } while (!MCR_rslt_idle(rslt));
8261 + if (MCR_rslt_ok(rslt))
8262 + return 0;
8263 + if (MCR_rslt_eaccess(rslt))
8264 + return -EACCES;
8265 + if (MCR_rslt_inval(rslt))
8266 + return -EINVAL;
8267 + pr_crit("Unexpected result from MCR_INIT_PFDR: %02x\n", rslt);
8268 + return -ENOSYS;
8269 +}
8270 +
8271 +/*****************/
8272 +/* Config driver */
8273 +/*****************/
8274 +
8275 +#define DEFAULT_FQD_SZ (PAGE_SIZE << CONFIG_FSL_QMAN_FQD_SZ)
8276 +#define DEFAULT_PFDR_SZ (PAGE_SIZE << CONFIG_FSL_QMAN_PFDR_SZ)
8277 +
8278 +/* We support only one of these */
8279 +static struct qman *qm;
8280 +static struct device_node *qm_node;
8281 +
8282 +/* And this state belongs to 'qm'. It is set during fsl_qman_init(), but used
8283 + * during qman_init_ccsr(). */
8284 +static dma_addr_t fqd_a, pfdr_a;
8285 +static size_t fqd_sz = DEFAULT_FQD_SZ, pfdr_sz = DEFAULT_PFDR_SZ;
8286 +
8287 +static int qman_fqd(struct reserved_mem *rmem)
8288 +{
8289 + fqd_a = rmem->base;
8290 + fqd_sz = rmem->size;
8291 +
8292 + WARN_ON(!(fqd_a && fqd_sz));
8293 +
8294 + return 0;
8295 +}
8296 +RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd);
8297 +
8298 +static int qman_pfdr(struct reserved_mem *rmem)
8299 +{
8300 + pfdr_a = rmem->base;
8301 + pfdr_sz = rmem->size;
8302 +
8303 + WARN_ON(!(pfdr_a && pfdr_sz));
8304 +
8305 + return 0;
8306 +}
8307 +RESERVEDMEM_OF_DECLARE(qman_fbpr, "fsl,qman-pfdr", qman_pfdr);
8308 +
8309 +size_t get_qman_fqd_size()
8310 +{
8311 + return fqd_sz;
8312 +}
8313 +
8314 +/* Parse the <name> property to extract the memory location and size and
8315 + * memblock_reserve() it. If it isn't supplied, memblock_alloc() the default
8316 + * size. Also flush this memory range from data cache so that QMAN originated
8317 + * transactions for this memory region could be marked non-coherent.
8318 + */
8319 +static __init int parse_mem_property(struct device_node *node, const char *name,
8320 + dma_addr_t *addr, size_t *sz, int zero)
8321 +{
8322 + int ret;
8323 +
8324 + /* If using a "zero-pma", don't try to zero it, even if you asked */
8325 + if (zero && of_find_property(node, "zero-pma", &ret)) {
8326 + pr_info(" it's a 'zero-pma', not zeroing from s/w\n");
8327 + zero = 0;
8328 + }
8329 +
8330 + if (zero) {
8331 + /* map as cacheable, non-guarded */
8332 +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
8333 + void __iomem *tmpp = ioremap_cache(*addr, *sz);
8334 +#else
8335 + void __iomem *tmpp = ioremap(*addr, *sz);
8336 +#endif
8337 +
8338 + if (!tmpp)
8339 + return -ENOMEM;
8340 + memset_io(tmpp, 0, *sz);
8341 + flush_dcache_range((unsigned long)tmpp,
8342 + (unsigned long)tmpp + *sz);
8343 + iounmap(tmpp);
8344 + }
8345 +
8346 + return 0;
8347 +}
8348 +
8349 +/* TODO:
8350 + * - there is obviously no handling of errors,
8351 + * - the calls to qm_set_memory() hard-code the priority and CPC-stashing for
8352 + * both memory resources to zero.
8353 + */
8354 +static int __init fsl_qman_init(struct device_node *node)
8355 +{
8356 + struct resource res;
8357 + resource_size_t len;
8358 + u32 __iomem *regs;
8359 + const char *s;
8360 + int ret, standby = 0;
8361 + u16 id;
8362 + u8 major, minor, cfg;
8363 + ret = of_address_to_resource(node, 0, &res);
8364 + if (ret) {
8365 + pr_err("Can't get %s property '%s'\n", node->full_name, "reg");
8366 + return ret;
8367 + }
8368 + s = of_get_property(node, "fsl,hv-claimable", &ret);
8369 + if (s && !strcmp(s, "standby"))
8370 + standby = 1;
8371 + if (!standby) {
8372 + ret = parse_mem_property(node, "fsl,qman-fqd",
8373 + &fqd_a, &fqd_sz, 1);
8374 + pr_info("qman-fqd addr %pad size 0x%zx\n", &fqd_a, fqd_sz);
8375 + BUG_ON(ret);
8376 + ret = parse_mem_property(node, "fsl,qman-pfdr",
8377 + &pfdr_a, &pfdr_sz, 0);
8378 + pr_info("qman-pfdr addr %pad size 0x%zx\n", &pfdr_a, pfdr_sz);
8379 + BUG_ON(ret);
8380 + }
8381 + /* Global configuration */
8382 + len = resource_size(&res);
8383 + if (len != (unsigned long)len)
8384 + return -EINVAL;
8385 + regs = ioremap(res.start, (unsigned long)len);
8386 + qm = qm_create(regs);
8387 + qm_node = node;
8388 + qm_get_version(qm, &id, &major, &minor, &cfg);
8389 + pr_info("Qman ver:%04x,%02x,%02x,%02x\n", id, major, minor, cfg);
8390 + if (!qman_ip_rev) {
8391 + if ((major == 1) && (minor == 0)) {
8392 + pr_err("QMAN rev1.0 on P4080 rev1 is not supported!\n");
8393 + iounmap(regs);
8394 + return -ENODEV;
8395 + } else if ((major == 1) && (minor == 1))
8396 + qman_ip_rev = QMAN_REV11;
8397 + else if ((major == 1) && (minor == 2))
8398 + qman_ip_rev = QMAN_REV12;
8399 + else if ((major == 2) && (minor == 0))
8400 + qman_ip_rev = QMAN_REV20;
8401 + else if ((major == 3) && (minor == 0))
8402 + qman_ip_rev = QMAN_REV30;
8403 + else if ((major == 3) && (minor == 1))
8404 + qman_ip_rev = QMAN_REV31;
8405 + else if ((major == 3) && (minor == 2))
8406 + qman_ip_rev = QMAN_REV32;
8407 + else {
8408 + pr_warn("unknown Qman version, default to rev1.1\n");
8409 + qman_ip_rev = QMAN_REV11;
8410 + }
8411 + qman_ip_cfg = cfg;
8412 + }
8413 +
8414 + if (standby) {
8415 + pr_info(" -> in standby mode\n");
8416 + return 0;
8417 + }
8418 + return 0;
8419 +}
8420 +
8421 +int qman_have_ccsr(void)
8422 +{
8423 + return qm ? 1 : 0;
8424 +}
8425 +
8426 +__init int qman_init_early(void)
8427 +{
8428 + struct device_node *dn;
8429 + int ret;
8430 +
8431 + for_each_compatible_node(dn, NULL, "fsl,qman") {
8432 + if (qm)
8433 + pr_err("%s: only one 'fsl,qman' allowed\n",
8434 + dn->full_name);
8435 + else {
8436 + if (!of_device_is_available(dn))
8437 + continue;
8438 +
8439 + ret = fsl_qman_init(dn);
8440 + BUG_ON(ret);
8441 + }
8442 + }
8443 + return 0;
8444 +}
8445 +postcore_initcall_sync(qman_init_early);
8446 +
8447 +static void log_edata_bits(u32 bit_count)
8448 +{
8449 + u32 i, j, mask = 0xffffffff;
8450 +
8451 + pr_warn("Qman ErrInt, EDATA:\n");
8452 + i = bit_count/32;
8453 + if (bit_count%32) {
8454 + i++;
8455 + mask = ~(mask << bit_count%32);
8456 + }
8457 + j = 16-i;
8458 + pr_warn(" 0x%08x\n", qm_in(EDATA(j)) & mask);
8459 + j++;
8460 + for (; j < 16; j++)
8461 + pr_warn(" 0x%08x\n", qm_in(EDATA(j)));
8462 +}
8463 +
8464 +static void log_additional_error_info(u32 isr_val, u32 ecsr_val)
8465 +{
8466 + union qman_ecir ecir_val;
8467 + union qman_eadr eadr_val;
8468 +
8469 + ecir_val.ecir_raw = qm_in(ECIR);
8470 + /* Is portal info valid */
8471 + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
8472 + union qman_ecir2 ecir2_val;
8473 + ecir2_val.ecir2_raw = qm_in(ECIR2);
8474 + if (ecsr_val & PORTAL_ECSR_ERR) {
8475 + pr_warn("Qman ErrInt: %s id %d\n",
8476 + (ecir2_val.info.portal_type) ?
8477 + "DCP" : "SWP", ecir2_val.info.portal_num);
8478 + }
8479 + if (ecsr_val & (FQID_ECSR_ERR | QM_EIRQ_IECE)) {
8480 + pr_warn("Qman ErrInt: ecir.fqid 0x%x\n",
8481 + ecir_val.info.fqid);
8482 + }
8483 + if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
8484 + eadr_val.eadr_raw = qm_in(EADR);
8485 + pr_warn("Qman ErrInt: EADR Memory: %s, 0x%x\n",
8486 + error_mdata[eadr_val.info_rev3.memid].txt,
8487 + error_mdata[eadr_val.info_rev3.memid].addr_mask
8488 + & eadr_val.info_rev3.eadr);
8489 + log_edata_bits(
8490 + error_mdata[eadr_val.info_rev3.memid].bits);
8491 + }
8492 + } else {
8493 + if (ecsr_val & PORTAL_ECSR_ERR) {
8494 + pr_warn("Qman ErrInt: %s id %d\n",
8495 + (ecir_val.info.portal_type) ?
8496 + "DCP" : "SWP", ecir_val.info.portal_num);
8497 + }
8498 + if (ecsr_val & FQID_ECSR_ERR) {
8499 + pr_warn("Qman ErrInt: ecir.fqid 0x%x\n",
8500 + ecir_val.info.fqid);
8501 + }
8502 + if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
8503 + eadr_val.eadr_raw = qm_in(EADR);
8504 + pr_warn("Qman ErrInt: EADR Memory: %s, 0x%x\n",
8505 + error_mdata[eadr_val.info.memid].txt,
8506 + error_mdata[eadr_val.info.memid].addr_mask
8507 + & eadr_val.info.eadr);
8508 + log_edata_bits(error_mdata[eadr_val.info.memid].bits);
8509 + }
8510 + }
8511 +}
8512 +
8513 +/* Qman interrupt handler */
8514 +static irqreturn_t qman_isr(int irq, void *ptr)
8515 +{
8516 + u32 isr_val, ier_val, ecsr_val, isr_mask, i;
8517 +
8518 + ier_val = qm_err_isr_enable_read(qm);
8519 + isr_val = qm_err_isr_status_read(qm);
8520 + ecsr_val = qm_in(ECSR);
8521 + isr_mask = isr_val & ier_val;
8522 +
8523 + if (!isr_mask)
8524 + return IRQ_NONE;
8525 + for (i = 0; i < QMAN_HWE_COUNT; i++) {
8526 + if (qman_hwerr_txts[i].mask & isr_mask) {
8527 + pr_warn("Qman ErrInt: %s\n", qman_hwerr_txts[i].txt);
8528 + if (qman_hwerr_txts[i].mask & ecsr_val) {
8529 + log_additional_error_info(isr_mask, ecsr_val);
8530 + /* Re-arm error capture registers */
8531 + qm_out(ECSR, ecsr_val);
8532 + }
8533 + if (qman_hwerr_txts[i].mask & QMAN_ERRS_TO_UNENABLE) {
8534 + pr_devel("Qman un-enabling error 0x%x\n",
8535 + qman_hwerr_txts[i].mask);
8536 + ier_val &= ~qman_hwerr_txts[i].mask;
8537 + qm_err_isr_enable_write(qm, ier_val);
8538 + }
8539 + }
8540 + }
8541 + qm_err_isr_status_clear(qm, isr_val);
8542 + return IRQ_HANDLED;
8543 +}
8544 +
8545 +static int __bind_irq(void)
8546 +{
8547 + int ret, err_irq;
8548 +
8549 + err_irq = of_irq_to_resource(qm_node, 0, NULL);
8550 + if (err_irq == 0) {
8551 + pr_info("Can't get %s property '%s'\n", qm_node->full_name,
8552 + "interrupts");
8553 + return -ENODEV;
8554 + }
8555 + ret = request_irq(err_irq, qman_isr, IRQF_SHARED, "qman-err", qm_node);
8556 + if (ret) {
8557 + pr_err("request_irq() failed %d for '%s'\n", ret,
8558 + qm_node->full_name);
8559 + return -ENODEV;
8560 + }
8561 + /* Write-to-clear any stale bits, (eg. starvation being asserted prior
8562 + * to resource allocation during driver init). */
8563 + qm_err_isr_status_clear(qm, 0xffffffff);
8564 + /* Enable Error Interrupts */
8565 + qm_err_isr_enable_write(qm, 0xffffffff);
8566 + return 0;
8567 +}
8568 +
8569 +int qman_init_ccsr(struct device_node *node)
8570 +{
8571 + int ret;
8572 + if (!qman_have_ccsr())
8573 + return 0;
8574 + if (node != qm_node)
8575 + return -EINVAL;
8576 +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
8577 + /* TEMP for LS1043 : should be done in uboot */
8578 + qm_out(QCSP_BARE, 0x5);
8579 + qm_out(QCSP_BAR, 0x0);
8580 +#endif
8581 + /* FQD memory */
8582 + qm_set_memory(qm, qm_memory_fqd, fqd_a, 1, 0, 0, fqd_sz);
8583 + /* PFDR memory */
8584 + qm_set_memory(qm, qm_memory_pfdr, pfdr_a, 1, 0, 0, pfdr_sz);
8585 + qm_init_pfdr(qm, 8, pfdr_sz / 64 - 8);
8586 + /* thresholds */
8587 + qm_set_pfdr_threshold(qm, 512, 64);
8588 + qm_set_sfdr_threshold(qm, 128);
8589 + /* clear stale PEBI bit from interrupt status register */
8590 + qm_err_isr_status_clear(qm, QM_EIRQ_PEBI);
8591 + /* corenet initiator settings */
8592 + qm_set_corenet_initiator(qm);
8593 + /* HID settings */
8594 + qm_set_hid(qm);
8595 + /* Set scheduling weights to defaults */
8596 + for (ret = qm_wq_first; ret <= qm_wq_last; ret++)
8597 + qm_set_wq_scheduling(qm, ret, 0, 0, 0, 0, 0, 0, 0);
8598 + /* We are not prepared to accept ERNs for hardware enqueues */
8599 + qm_set_dc(qm, qm_dc_portal_fman0, 1, 0);
8600 + qm_set_dc(qm, qm_dc_portal_fman1, 1, 0);
8601 + /* Initialise Error Interrupt Handler */
8602 + ret = __bind_irq();
8603 + if (ret)
8604 + return ret;
8605 + return 0;
8606 +}
8607 +
8608 +#define LIO_CFG_LIODN_MASK 0x0fff0000
8609 +void qman_liodn_fixup(u16 channel)
8610 +{
8611 + static int done;
8612 + static u32 liodn_offset;
8613 + u32 before, after;
8614 + int idx = channel - QM_CHANNEL_SWPORTAL0;
8615 +
8616 + if (!qman_have_ccsr())
8617 + return;
8618 + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
8619 + before = qm_in(REV3_QCSP_LIO_CFG(idx));
8620 + else
8621 + before = qm_in(QCSP_LIO_CFG(idx));
8622 + if (!done) {
8623 + liodn_offset = before & LIO_CFG_LIODN_MASK;
8624 + done = 1;
8625 + return;
8626 + }
8627 + after = (before & (~LIO_CFG_LIODN_MASK)) | liodn_offset;
8628 + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
8629 + qm_out(REV3_QCSP_LIO_CFG(idx), after);
8630 + else
8631 + qm_out(QCSP_LIO_CFG(idx), after);
8632 +}
8633 +
8634 +#define IO_CFG_SDEST_MASK 0x00ff0000
8635 +int qman_set_sdest(u16 channel, unsigned int cpu_idx)
8636 +{
8637 + int idx = channel - QM_CHANNEL_SWPORTAL0;
8638 + u32 before, after;
8639 +
8640 + if (!qman_have_ccsr())
8641 + return -ENODEV;
8642 + if ((qman_ip_rev & 0xFF00) == QMAN_REV31) {
8643 + /* LS1043A - only one L2 cache */
8644 + cpu_idx = 0;
8645 + }
8646 +
8647 + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
8648 + before = qm_in(REV3_QCSP_IO_CFG(idx));
8649 + /* Each pair of vcpu share the same SRQ(SDEST) */
8650 + cpu_idx /= 2;
8651 + after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
8652 + qm_out(REV3_QCSP_IO_CFG(idx), after);
8653 + } else {
8654 + before = qm_in(QCSP_IO_CFG(idx));
8655 + after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
8656 + qm_out(QCSP_IO_CFG(idx), after);
8657 + }
8658 + return 0;
8659 +}
8660 +
8661 +#define MISC_CFG_WPM_MASK 0x00000002
8662 +int qm_set_wpm(int wpm)
8663 +{
8664 + u32 before;
8665 + u32 after;
8666 +
8667 + if (!qman_have_ccsr())
8668 + return -ENODEV;
8669 +
8670 + before = qm_in(MISC_CFG);
8671 + after = (before & (~MISC_CFG_WPM_MASK)) | (wpm << 1);
8672 + qm_out(MISC_CFG, after);
8673 + return 0;
8674 +}
8675 +
8676 +int qm_get_wpm(int *wpm)
8677 +{
8678 + u32 before;
8679 +
8680 + if (!qman_have_ccsr())
8681 + return -ENODEV;
8682 +
8683 + before = qm_in(MISC_CFG);
8684 + *wpm = (before & MISC_CFG_WPM_MASK) >> 1;
8685 + return 0;
8686 +}
8687 +
8688 +/* CEETM_CFG_PRES register has PRES field which is calculated by:
8689 + * PRES = (2^22 / credit update reference period) * QMan clock period
8690 + * = (2^22 * 10^9)/ CONFIG_QMAN_CEETM_UPDATE_PERIOD) / qman_clk
8691 + */
8692 +
8693 +int qman_ceetm_set_prescaler(enum qm_dc_portal portal)
8694 +{
8695 + u64 temp;
8696 + u16 pres;
8697 +
8698 + if (!qman_have_ccsr())
8699 + return -ENODEV;
8700 +
8701 + temp = 0x400000 * 100;
8702 + do_div(temp, CONFIG_QMAN_CEETM_UPDATE_PERIOD);
8703 + temp *= 10000000;
8704 + do_div(temp, qman_clk);
8705 + pres = (u16) temp;
8706 + qm_out(CEETM_CFG_IDX, portal);
8707 + qm_out(CEETM_CFG_PRES, pres);
8708 + return 0;
8709 +}
8710 +
8711 +int qman_ceetm_get_prescaler(u16 *pres)
8712 +{
8713 + if (!qman_have_ccsr())
8714 + return -ENODEV;
8715 + *pres = (u16)qm_in(CEETM_CFG_PRES);
8716 + return 0;
8717 +}
8718 +
8719 +#define DCP_CFG_CEETME_MASK 0xFFFF0000
8720 +#define QM_SP_ENABLE_CEETM(n) (0x80000000 >> (n))
8721 +int qman_sp_enable_ceetm_mode(enum qm_dc_portal portal, u16 sub_portal)
8722 +{
8723 + u32 dcp_cfg;
8724 +
8725 + if (!qman_have_ccsr())
8726 + return -ENODEV;
8727 +
8728 + dcp_cfg = qm_in(DCP_CFG(portal));
8729 + dcp_cfg |= QM_SP_ENABLE_CEETM(sub_portal);
8730 + qm_out(DCP_CFG(portal), dcp_cfg);
8731 + return 0;
8732 +}
8733 +
8734 +int qman_sp_disable_ceetm_mode(enum qm_dc_portal portal, u16 sub_portal)
8735 +{
8736 + u32 dcp_cfg;
8737 +
8738 + if (!qman_have_ccsr())
8739 + return -ENODEV;
8740 + dcp_cfg = qm_in(DCP_CFG(portal));
8741 + dcp_cfg &= ~(QM_SP_ENABLE_CEETM(sub_portal));
8742 + qm_out(DCP_CFG(portal), dcp_cfg);
8743 + return 0;
8744 +}
8745 +
8746 +int qman_ceetm_get_xsfdr(enum qm_dc_portal portal, unsigned int *num)
8747 +{
8748 + if (!qman_have_ccsr())
8749 + return -ENODEV;
8750 + *num = qm_in(CEETM_XSFDR_IN_USE);
8751 + return 0;
8752 +}
8753 +EXPORT_SYMBOL(qman_ceetm_get_xsfdr);
8754 +
8755 +#ifdef CONFIG_SYSFS
8756 +
8757 +#define DRV_NAME "fsl-qman"
8758 +#define DCP_MAX_ID 3
8759 +#define DCP_MIN_ID 0
8760 +
8761 +static ssize_t show_pfdr_fpc(struct device *dev,
8762 + struct device_attribute *dev_attr, char *buf)
8763 +{
8764 + return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(PFDR_FPC));
8765 +};
8766 +
8767 +static ssize_t show_dlm_avg(struct device *dev,
8768 + struct device_attribute *dev_attr, char *buf)
8769 +{
8770 + u32 data;
8771 + int i;
8772 +
8773 + if (!sscanf(dev_attr->attr.name, "dcp%d_dlm_avg", &i))
8774 + return -EINVAL;
8775 + if (i < DCP_MIN_ID || i > DCP_MAX_ID)
8776 + return -EINVAL;
8777 + data = qm_in(DCP_DLM_AVG(i));
8778 + return snprintf(buf, PAGE_SIZE, "%d.%08d\n", data>>8,
8779 + (data & 0x000000ff)*390625);
8780 +};
8781 +
8782 +static ssize_t set_dlm_avg(struct device *dev,
8783 + struct device_attribute *dev_attr, const char *buf, size_t count)
8784 +{
8785 + unsigned long val;
8786 + int i;
8787 +
8788 + if (!sscanf(dev_attr->attr.name, "dcp%d_dlm_avg", &i))
8789 + return -EINVAL;
8790 + if (i < DCP_MIN_ID || i > DCP_MAX_ID)
8791 + return -EINVAL;
8792 + if (kstrtoul(buf, 0, &val)) {
8793 + dev_dbg(dev, "invalid input %s\n", buf);
8794 + return -EINVAL;
8795 + }
8796 + qm_out(DCP_DLM_AVG(i), val);
8797 + return count;
8798 +};
8799 +
8800 +static ssize_t show_pfdr_cfg(struct device *dev,
8801 + struct device_attribute *dev_attr, char *buf)
8802 +{
8803 + return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(PFDR_CFG));
8804 +};
8805 +
8806 +static ssize_t set_pfdr_cfg(struct device *dev,
8807 + struct device_attribute *dev_attr, const char *buf, size_t count)
8808 +{
8809 + unsigned long val;
8810 +
8811 + if (kstrtoul(buf, 0, &val)) {
8812 + dev_dbg(dev, "invalid input %s\n", buf);
8813 + return -EINVAL;
8814 + }
8815 + qm_out(PFDR_CFG, val);
8816 + return count;
8817 +};
8818 +
8819 +static ssize_t show_sfdr_in_use(struct device *dev,
8820 + struct device_attribute *dev_attr, char *buf)
8821 +{
8822 + return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(SFDR_IN_USE));
8823 +};
8824 +
8825 +static ssize_t show_idle_stat(struct device *dev,
8826 + struct device_attribute *dev_attr, char *buf)
8827 +{
8828 + return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(IDLE_STAT));
8829 +};
8830 +
8831 +static ssize_t show_ci_rlm_avg(struct device *dev,
8832 + struct device_attribute *dev_attr, char *buf)
8833 +{
8834 + u32 data = qm_in(CI_RLM_AVG);
8835 + return snprintf(buf, PAGE_SIZE, "%d.%08d\n", data>>8,
8836 + (data & 0x000000ff)*390625);
8837 +};
8838 +
8839 +static ssize_t set_ci_rlm_avg(struct device *dev,
8840 + struct device_attribute *dev_attr, const char *buf, size_t count)
8841 +{
8842 + unsigned long val;
8843 +
8844 + if (kstrtoul(buf, 0, &val)) {
8845 + dev_dbg(dev, "invalid input %s\n", buf);
8846 + return -EINVAL;
8847 + }
8848 + qm_out(CI_RLM_AVG, val);
8849 + return count;
8850 +};
8851 +
8852 +static ssize_t show_err_isr(struct device *dev,
8853 + struct device_attribute *dev_attr, char *buf)
8854 +{
8855 + return snprintf(buf, PAGE_SIZE, "0x%08x\n", qm_in(ERR_ISR));
8856 +};
8857 +
8858 +#define SBEC_MAX_ID 14
8859 +#define SBEC_MIN_ID 0
8860 +
8861 +static ssize_t show_sbec(struct device *dev,
8862 + struct device_attribute *dev_attr, char *buf)
8863 +{
8864 + int i;
8865 +
8866 + if (!sscanf(dev_attr->attr.name, "sbec_%d", &i))
8867 + return -EINVAL;
8868 + if (i < SBEC_MIN_ID || i > SBEC_MAX_ID)
8869 + return -EINVAL;
8870 + return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(SBEC(i)));
8871 +};
8872 +
8873 +static DEVICE_ATTR(pfdr_fpc, S_IRUSR, show_pfdr_fpc, NULL);
8874 +static DEVICE_ATTR(pfdr_cfg, S_IRUSR, show_pfdr_cfg, set_pfdr_cfg);
8875 +static DEVICE_ATTR(idle_stat, S_IRUSR, show_idle_stat, NULL);
8876 +static DEVICE_ATTR(ci_rlm_avg, (S_IRUSR|S_IWUSR),
8877 + show_ci_rlm_avg, set_ci_rlm_avg);
8878 +static DEVICE_ATTR(err_isr, S_IRUSR, show_err_isr, NULL);
8879 +static DEVICE_ATTR(sfdr_in_use, S_IRUSR, show_sfdr_in_use, NULL);
8880 +
8881 +static DEVICE_ATTR(dcp0_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg);
8882 +static DEVICE_ATTR(dcp1_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg);
8883 +static DEVICE_ATTR(dcp2_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg);
8884 +static DEVICE_ATTR(dcp3_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg);
8885 +
8886 +static DEVICE_ATTR(sbec_0, S_IRUSR, show_sbec, NULL);
8887 +static DEVICE_ATTR(sbec_1, S_IRUSR, show_sbec, NULL);
8888 +static DEVICE_ATTR(sbec_2, S_IRUSR, show_sbec, NULL);
8889 +static DEVICE_ATTR(sbec_3, S_IRUSR, show_sbec, NULL);
8890 +static DEVICE_ATTR(sbec_4, S_IRUSR, show_sbec, NULL);
8891 +static DEVICE_ATTR(sbec_5, S_IRUSR, show_sbec, NULL);
8892 +static DEVICE_ATTR(sbec_6, S_IRUSR, show_sbec, NULL);
8893 +static DEVICE_ATTR(sbec_7, S_IRUSR, show_sbec, NULL);
8894 +static DEVICE_ATTR(sbec_8, S_IRUSR, show_sbec, NULL);
8895 +static DEVICE_ATTR(sbec_9, S_IRUSR, show_sbec, NULL);
8896 +static DEVICE_ATTR(sbec_10, S_IRUSR, show_sbec, NULL);
8897 +static DEVICE_ATTR(sbec_11, S_IRUSR, show_sbec, NULL);
8898 +static DEVICE_ATTR(sbec_12, S_IRUSR, show_sbec, NULL);
8899 +static DEVICE_ATTR(sbec_13, S_IRUSR, show_sbec, NULL);
8900 +static DEVICE_ATTR(sbec_14, S_IRUSR, show_sbec, NULL);
8901 +
8902 +static struct attribute *qman_dev_attributes[] = {
8903 + &dev_attr_pfdr_fpc.attr,
8904 + &dev_attr_pfdr_cfg.attr,
8905 + &dev_attr_idle_stat.attr,
8906 + &dev_attr_ci_rlm_avg.attr,
8907 + &dev_attr_err_isr.attr,
8908 + &dev_attr_dcp0_dlm_avg.attr,
8909 + &dev_attr_dcp1_dlm_avg.attr,
8910 + &dev_attr_dcp2_dlm_avg.attr,
8911 + &dev_attr_dcp3_dlm_avg.attr,
8912 + /* sfdr_in_use will be added if necessary */
8913 + NULL
8914 +};
8915 +
8916 +static struct attribute *qman_dev_ecr_attributes[] = {
8917 + &dev_attr_sbec_0.attr,
8918 + &dev_attr_sbec_1.attr,
8919 + &dev_attr_sbec_2.attr,
8920 + &dev_attr_sbec_3.attr,
8921 + &dev_attr_sbec_4.attr,
8922 + &dev_attr_sbec_5.attr,
8923 + &dev_attr_sbec_6.attr,
8924 + &dev_attr_sbec_7.attr,
8925 + &dev_attr_sbec_8.attr,
8926 + &dev_attr_sbec_9.attr,
8927 + &dev_attr_sbec_10.attr,
8928 + &dev_attr_sbec_11.attr,
8929 + &dev_attr_sbec_12.attr,
8930 + &dev_attr_sbec_13.attr,
8931 + &dev_attr_sbec_14.attr,
8932 + NULL
8933 +};
8934 +
8935 +/* root level */
8936 +static const struct attribute_group qman_dev_attr_grp = {
8937 + .name = NULL,
8938 + .attrs = qman_dev_attributes
8939 +};
8940 +static const struct attribute_group qman_dev_ecr_grp = {
8941 + .name = "error_capture",
8942 + .attrs = qman_dev_ecr_attributes
8943 +};
8944 +
8945 +static int of_fsl_qman_remove(struct platform_device *ofdev)
8946 +{
8947 + sysfs_remove_group(&ofdev->dev.kobj, &qman_dev_attr_grp);
8948 + return 0;
8949 +};
8950 +
8951 +static int of_fsl_qman_probe(struct platform_device *ofdev)
8952 +{
8953 + int ret;
8954 +
8955 + ret = sysfs_create_group(&ofdev->dev.kobj, &qman_dev_attr_grp);
8956 + if (ret)
8957 + goto done;
8958 + ret = sysfs_add_file_to_group(&ofdev->dev.kobj,
8959 + &dev_attr_sfdr_in_use.attr, qman_dev_attr_grp.name);
8960 + if (ret)
8961 + goto del_group_0;
8962 + ret = sysfs_create_group(&ofdev->dev.kobj, &qman_dev_ecr_grp);
8963 + if (ret)
8964 + goto del_group_0;
8965 +
8966 + goto done;
8967 +
8968 +del_group_0:
8969 + sysfs_remove_group(&ofdev->dev.kobj, &qman_dev_attr_grp);
8970 +done:
8971 + if (ret)
8972 + dev_err(&ofdev->dev,
8973 + "Cannot create dev attributes ret=%d\n", ret);
8974 + return ret;
8975 +};
8976 +
8977 +static struct of_device_id of_fsl_qman_ids[] = {
8978 + {
8979 + .compatible = "fsl,qman",
8980 + },
8981 + {}
8982 +};
8983 +MODULE_DEVICE_TABLE(of, of_fsl_qman_ids);
8984 +
8985 +#ifdef CONFIG_SUSPEND
8986 +
8987 +static u32 saved_isdr;
8988 +static int qman_pm_suspend_noirq(struct device *dev)
8989 +{
8990 + uint32_t idle_state;
8991 +
8992 + suspend_unused_qportal();
8993 + /* save isdr, disable all, clear isr */
8994 + saved_isdr = qm_err_isr_disable_read(qm);
8995 + qm_err_isr_disable_write(qm, 0xffffffff);
8996 + qm_err_isr_status_clear(qm, 0xffffffff);
8997 + idle_state = qm_in(IDLE_STAT);
8998 + if (!(idle_state & 0x1)) {
8999 + pr_err("Qman not idle 0x%x aborting\n", idle_state);
9000 + qm_err_isr_disable_write(qm, saved_isdr);
9001 + resume_unused_qportal();
9002 + return -EBUSY;
9003 + }
9004 +#ifdef CONFIG_PM_DEBUG
9005 + pr_info("Qman suspend code, IDLE_STAT = 0x%x\n", idle_state);
9006 +#endif
9007 + return 0;
9008 +}
9009 +
9010 +static int qman_pm_resume_noirq(struct device *dev)
9011 +{
9012 + /* restore isdr */
9013 + qm_err_isr_disable_write(qm, saved_isdr);
9014 + resume_unused_qportal();
9015 + return 0;
9016 +}
9017 +#else
9018 +#define qman_pm_suspend_noirq NULL
9019 +#define qman_pm_resume_noirq NULL
9020 +#endif
9021 +
9022 +static const struct dev_pm_ops qman_pm_ops = {
9023 + .suspend_noirq = qman_pm_suspend_noirq,
9024 + .resume_noirq = qman_pm_resume_noirq,
9025 +};
9026 +
9027 +static struct platform_driver of_fsl_qman_driver = {
9028 + .driver = {
9029 + .owner = THIS_MODULE,
9030 + .name = DRV_NAME,
9031 + .of_match_table = of_fsl_qman_ids,
9032 + .pm = &qman_pm_ops,
9033 + },
9034 + .probe = of_fsl_qman_probe,
9035 + .remove = of_fsl_qman_remove,
9036 +};
9037 +
9038 +static int qman_ctrl_init(void)
9039 +{
9040 + return platform_driver_register(&of_fsl_qman_driver);
9041 +}
9042 +
9043 +static void qman_ctrl_exit(void)
9044 +{
9045 + platform_driver_unregister(&of_fsl_qman_driver);
9046 +}
9047 +
9048 +module_init(qman_ctrl_init);
9049 +module_exit(qman_ctrl_exit);
9050 +
9051 +#endif /* CONFIG_SYSFS */
9052 --- /dev/null
9053 +++ b/drivers/staging/fsl_qbman/qman_debugfs.c
9054 @@ -0,0 +1,1594 @@
9055 +/* Copyright 2010-2011 Freescale Semiconductor, Inc.
9056 + *
9057 + * Redistribution and use in source and binary forms, with or without
9058 + * modification, are permitted provided that the following conditions are met:
9059 + * * Redistributions of source code must retain the above copyright
9060 + * notice, this list of conditions and the following disclaimer.
9061 + * * Redistributions in binary form must reproduce the above copyright
9062 + * notice, this list of conditions and the following disclaimer in the
9063 + * documentation and/or other materials provided with the distribution.
9064 + * * Neither the name of Freescale Semiconductor nor the
9065 + * names of its contributors may be used to endorse or promote products
9066 + * derived from this software without specific prior written permission.
9067 + *
9068 + *
9069 + * ALTERNATIVELY, this software may be distributed under the terms of the
9070 + * GNU General Public License ("GPL") as published by the Free Software
9071 + * Foundation, either version 2 of that License or (at your option) any
9072 + * later version.
9073 + *
9074 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
9075 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
9076 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
9077 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
9078 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
9079 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
9080 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
9081 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
9082 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
9083 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
9084 + */
9085 +#include "qman_private.h"
9086 +
9087 +#define MAX_FQID (0x00ffffff)
9088 +#define QM_FQD_BLOCK_SIZE 64
9089 +#define QM_FQD_AR (0xC10)
9090 +
9091 +static u32 fqid_max;
9092 +static u64 qman_ccsr_start;
9093 +static u64 qman_ccsr_size;
9094 +
9095 +static const char * const state_txt[] = {
9096 + "Out of Service",
9097 + "Retired",
9098 + "Tentatively Scheduled",
9099 + "Truly Scheduled",
9100 + "Parked",
9101 + "Active, Active Held or Held Suspended",
9102 + "Unknown State 6",
9103 + "Unknown State 7",
9104 + NULL,
9105 +};
9106 +
9107 +static const u8 fqd_states[] = {
9108 + QM_MCR_NP_STATE_OOS, QM_MCR_NP_STATE_RETIRED, QM_MCR_NP_STATE_TEN_SCHED,
9109 + QM_MCR_NP_STATE_TRU_SCHED, QM_MCR_NP_STATE_PARKED,
9110 + QM_MCR_NP_STATE_ACTIVE};
9111 +
9112 +struct mask_to_text {
9113 + u16 mask;
9114 + const char *txt;
9115 +};
9116 +
9117 +struct mask_filter_s {
9118 + u16 mask;
9119 + u8 filter;
9120 +};
9121 +
9122 +static const struct mask_filter_s mask_filter[] = {
9123 + {QM_FQCTRL_PREFERINCACHE, 0},
9124 + {QM_FQCTRL_PREFERINCACHE, 1},
9125 + {QM_FQCTRL_HOLDACTIVE, 0},
9126 + {QM_FQCTRL_HOLDACTIVE, 1},
9127 + {QM_FQCTRL_AVOIDBLOCK, 0},
9128 + {QM_FQCTRL_AVOIDBLOCK, 1},
9129 + {QM_FQCTRL_FORCESFDR, 0},
9130 + {QM_FQCTRL_FORCESFDR, 1},
9131 + {QM_FQCTRL_CPCSTASH, 0},
9132 + {QM_FQCTRL_CPCSTASH, 1},
9133 + {QM_FQCTRL_CTXASTASHING, 0},
9134 + {QM_FQCTRL_CTXASTASHING, 1},
9135 + {QM_FQCTRL_ORP, 0},
9136 + {QM_FQCTRL_ORP, 1},
9137 + {QM_FQCTRL_TDE, 0},
9138 + {QM_FQCTRL_TDE, 1},
9139 + {QM_FQCTRL_CGE, 0},
9140 + {QM_FQCTRL_CGE, 1}
9141 +};
9142 +
9143 +static const struct mask_to_text fq_ctrl_text_list[] = {
9144 + {
9145 + .mask = QM_FQCTRL_PREFERINCACHE,
9146 + .txt = "Prefer in cache",
9147 + },
9148 + {
9149 + .mask = QM_FQCTRL_HOLDACTIVE,
9150 + .txt = "Hold active in portal",
9151 + },
9152 + {
9153 + .mask = QM_FQCTRL_AVOIDBLOCK,
9154 + .txt = "Avoid Blocking",
9155 + },
9156 + {
9157 + .mask = QM_FQCTRL_FORCESFDR,
9158 + .txt = "High-priority SFDRs",
9159 + },
9160 + {
9161 + .mask = QM_FQCTRL_CPCSTASH,
9162 + .txt = "CPC Stash Enable",
9163 + },
9164 + {
9165 + .mask = QM_FQCTRL_CTXASTASHING,
9166 + .txt = "Context-A stashing",
9167 + },
9168 + {
9169 + .mask = QM_FQCTRL_ORP,
9170 + .txt = "ORP Enable",
9171 + },
9172 + {
9173 + .mask = QM_FQCTRL_TDE,
9174 + .txt = "Tail-Drop Enable",
9175 + },
9176 + {
9177 + .mask = QM_FQCTRL_CGE,
9178 + .txt = "Congestion Group Enable",
9179 + },
9180 + {
9181 + .mask = 0,
9182 + .txt = NULL,
9183 + }
9184 +};
9185 +
9186 +static const char *get_fqd_ctrl_text(u16 mask)
9187 +{
9188 + int i = 0;
9189 +
9190 + while (fq_ctrl_text_list[i].txt != NULL) {
9191 + if (fq_ctrl_text_list[i].mask == mask)
9192 + return fq_ctrl_text_list[i].txt;
9193 + i++;
9194 + }
9195 + return NULL;
9196 +}
9197 +
9198 +static const struct mask_to_text stashing_text_list[] = {
9199 + {
9200 + .mask = QM_STASHING_EXCL_CTX,
9201 + .txt = "FQ Ctx Stash"
9202 + },
9203 + {
9204 + .mask = QM_STASHING_EXCL_DATA,
9205 + .txt = "Frame Data Stash",
9206 + },
9207 + {
9208 + .mask = QM_STASHING_EXCL_ANNOTATION,
9209 + .txt = "Frame Annotation Stash",
9210 + },
9211 + {
9212 + .mask = 0,
9213 + .txt = NULL,
9214 + },
9215 +};
9216 +
9217 +static int user_input_convert(const char __user *user_buf, size_t count,
9218 + unsigned long *val)
9219 +{
9220 + char buf[12];
9221 +
9222 + if (count > sizeof(buf) - 1)
9223 + return -EINVAL;
9224 + if (copy_from_user(buf, user_buf, count))
9225 + return -EFAULT;
9226 + buf[count] = '\0';
9227 + if (kstrtoul(buf, 0, val))
9228 + return -EINVAL;
9229 + return 0;
9230 +}
9231 +
9232 +struct line_buffer_fq {
9233 + u32 buf[8];
9234 + u32 buf_cnt;
9235 + int line_cnt;
9236 +};
9237 +
9238 +static void add_to_line_buffer(struct line_buffer_fq *line_buf, u32 fqid,
9239 + struct seq_file *file)
9240 +{
9241 + line_buf->buf[line_buf->buf_cnt] = fqid;
9242 + line_buf->buf_cnt++;
9243 + if (line_buf->buf_cnt == 8) {
9244 + /* Buffer is full, flush it */
9245 + if (line_buf->line_cnt != 0)
9246 + seq_puts(file, ",\n");
9247 + seq_printf(file, "0x%06x,0x%06x,0x%06x,0x%06x,0x%06x,"
9248 + "0x%06x,0x%06x,0x%06x",
9249 + line_buf->buf[0], line_buf->buf[1], line_buf->buf[2],
9250 + line_buf->buf[3], line_buf->buf[4], line_buf->buf[5],
9251 + line_buf->buf[6], line_buf->buf[7]);
9252 + line_buf->buf_cnt = 0;
9253 + line_buf->line_cnt++;
9254 + }
9255 +}
9256 +
9257 +static void flush_line_buffer(struct line_buffer_fq *line_buf,
9258 + struct seq_file *file)
9259 +{
9260 + if (line_buf->buf_cnt) {
9261 + int y = 0;
9262 + if (line_buf->line_cnt != 0)
9263 + seq_puts(file, ",\n");
9264 + while (y != line_buf->buf_cnt) {
9265 + if (y+1 == line_buf->buf_cnt)
9266 + seq_printf(file, "0x%06x", line_buf->buf[y]);
9267 + else
9268 + seq_printf(file, "0x%06x,", line_buf->buf[y]);
9269 + y++;
9270 + }
9271 + line_buf->line_cnt++;
9272 + }
9273 + if (line_buf->line_cnt)
9274 + seq_putc(file, '\n');
9275 +}
9276 +
9277 +static struct dentry *dfs_root; /* debugfs root directory */
9278 +
9279 +/*******************************************************************************
9280 + * Query Frame Queue Non Programmable Fields
9281 + ******************************************************************************/
9282 +struct query_fq_np_fields_data_s {
9283 + u32 fqid;
9284 +};
9285 +static struct query_fq_np_fields_data_s query_fq_np_fields_data = {
9286 + .fqid = 1,
9287 +};
9288 +
9289 +static int query_fq_np_fields_show(struct seq_file *file, void *offset)
9290 +{
9291 + int ret;
9292 + struct qm_mcr_queryfq_np np;
9293 + struct qman_fq fq;
9294 +
9295 + fq.fqid = query_fq_np_fields_data.fqid;
9296 + ret = qman_query_fq_np(&fq, &np);
9297 + if (ret)
9298 + return ret;
9299 + /* Print state */
9300 + seq_printf(file, "Query FQ Non Programmable Fields Result fqid 0x%x\n",
9301 + fq.fqid);
9302 + seq_printf(file, " force eligible pending: %s\n",
9303 + (np.state & QM_MCR_NP_STATE_FE) ? "yes" : "no");
9304 + seq_printf(file, " retirement pending: %s\n",
9305 + (np.state & QM_MCR_NP_STATE_R) ? "yes" : "no");
9306 + seq_printf(file, " state: %s\n",
9307 + state_txt[np.state & QM_MCR_NP_STATE_MASK]);
9308 + seq_printf(file, " fq_link: 0x%x\n", np.fqd_link);
9309 + seq_printf(file, " odp_seq: %u\n", np.odp_seq);
9310 + seq_printf(file, " orp_nesn: %u\n", np.orp_nesn);
9311 + seq_printf(file, " orp_ea_hseq: %u\n", np.orp_ea_hseq);
9312 + seq_printf(file, " orp_ea_tseq: %u\n", np.orp_ea_tseq);
9313 + seq_printf(file, " orp_ea_hptr: 0x%x\n", np.orp_ea_hptr);
9314 + seq_printf(file, " orp_ea_tptr: 0x%x\n", np.orp_ea_tptr);
9315 + seq_printf(file, " pfdr_hptr: 0x%x\n", np.pfdr_hptr);
9316 + seq_printf(file, " pfdr_tptr: 0x%x\n", np.pfdr_tptr);
9317 + seq_printf(file, " is: ics_surp contains a %s\n",
9318 + (np.is) ? "deficit" : "surplus");
9319 + seq_printf(file, " ics_surp: %u\n", np.ics_surp);
9320 + seq_printf(file, " byte_cnt: %u\n", np.byte_cnt);
9321 + seq_printf(file, " frm_cnt: %u\n", np.frm_cnt);
9322 + seq_printf(file, " ra1_sfdr: 0x%x\n", np.ra1_sfdr);
9323 + seq_printf(file, " ra2_sfdr: 0x%x\n", np.ra2_sfdr);
9324 + seq_printf(file, " od1_sfdr: 0x%x\n", np.od1_sfdr);
9325 + seq_printf(file, " od2_sfdr: 0x%x\n", np.od2_sfdr);
9326 + seq_printf(file, " od3_sfdr: 0x%x\n", np.od3_sfdr);
9327 + return 0;
9328 +}
9329 +
9330 +static int query_fq_np_fields_open(struct inode *inode,
9331 + struct file *file)
9332 +{
9333 + return single_open(file, query_fq_np_fields_show, NULL);
9334 +}
9335 +
9336 +static ssize_t query_fq_np_fields_write(struct file *f,
9337 + const char __user *buf, size_t count, loff_t *off)
9338 +{
9339 + int ret;
9340 + unsigned long val;
9341 +
9342 + ret = user_input_convert(buf, count, &val);
9343 + if (ret)
9344 + return ret;
9345 + if (val > MAX_FQID)
9346 + return -EINVAL;
9347 + query_fq_np_fields_data.fqid = (u32)val;
9348 + return count;
9349 +}
9350 +
9351 +static const struct file_operations query_fq_np_fields_fops = {
9352 + .owner = THIS_MODULE,
9353 + .open = query_fq_np_fields_open,
9354 + .read = seq_read,
9355 + .write = query_fq_np_fields_write,
9356 + .release = single_release,
9357 +};
9358 +
9359 +/*******************************************************************************
9360 + * Frame Queue Programmable Fields
9361 + ******************************************************************************/
9362 +struct query_fq_fields_data_s {
9363 + u32 fqid;
9364 +};
9365 +
9366 +static struct query_fq_fields_data_s query_fq_fields_data = {
9367 + .fqid = 1,
9368 +};
9369 +
9370 +static int query_fq_fields_show(struct seq_file *file, void *offset)
9371 +{
9372 + int ret;
9373 + struct qm_fqd fqd;
9374 + struct qman_fq fq;
9375 + int i = 0;
9376 +
9377 + memset(&fqd, 0, sizeof(struct qm_fqd));
9378 + fq.fqid = query_fq_fields_data.fqid;
9379 + ret = qman_query_fq(&fq, &fqd);
9380 + if (ret)
9381 + return ret;
9382 + seq_printf(file, "Query FQ Programmable Fields Result fqid 0x%x\n",
9383 + fq.fqid);
9384 + seq_printf(file, " orprws: %u\n", fqd.orprws);
9385 + seq_printf(file, " oa: %u\n", fqd.oa);
9386 + seq_printf(file, " olws: %u\n", fqd.olws);
9387 +
9388 + seq_printf(file, " cgid: %u\n", fqd.cgid);
9389 +
9390 + if ((fqd.fq_ctrl & QM_FQCTRL_MASK) == 0)
9391 + seq_puts(file, " fq_ctrl: None\n");
9392 + else {
9393 + i = 0;
9394 + seq_puts(file, " fq_ctrl:\n");
9395 + while (fq_ctrl_text_list[i].txt != NULL) {
9396 + if ((fqd.fq_ctrl & QM_FQCTRL_MASK) &
9397 + fq_ctrl_text_list[i].mask)
9398 + seq_printf(file, " %s\n",
9399 + fq_ctrl_text_list[i].txt);
9400 + i++;
9401 + }
9402 + }
9403 + seq_printf(file, " dest_channel: %u\n", fqd.dest.channel);
9404 + seq_printf(file, " dest_wq: %u\n", fqd.dest.wq);
9405 + seq_printf(file, " ics_cred: %u\n", fqd.ics_cred);
9406 + seq_printf(file, " td_mant: %u\n", fqd.td.mant);
9407 + seq_printf(file, " td_exp: %u\n", fqd.td.exp);
9408 +
9409 + seq_printf(file, " ctx_b: 0x%x\n", fqd.context_b);
9410 +
9411 + seq_printf(file, " ctx_a: 0x%llx\n", qm_fqd_stashing_get64(&fqd));
9412 + /* Any stashing configured */
9413 + if ((fqd.context_a.stashing.exclusive & 0x7) == 0)
9414 + seq_puts(file, " ctx_a_stash_exclusive: None\n");
9415 + else {
9416 + seq_puts(file, " ctx_a_stash_exclusive:\n");
9417 + i = 0;
9418 + while (stashing_text_list[i].txt != NULL) {
9419 + if ((fqd.fq_ctrl & 0x7) & stashing_text_list[i].mask)
9420 + seq_printf(file, " %s\n",
9421 + stashing_text_list[i].txt);
9422 + i++;
9423 + }
9424 + }
9425 + seq_printf(file, " ctx_a_stash_annotation_cl: %u\n",
9426 + fqd.context_a.stashing.annotation_cl);
9427 + seq_printf(file, " ctx_a_stash_data_cl: %u\n",
9428 + fqd.context_a.stashing.data_cl);
9429 + seq_printf(file, " ctx_a_stash_context_cl: %u\n",
9430 + fqd.context_a.stashing.context_cl);
9431 + return 0;
9432 +}
9433 +
9434 +static int query_fq_fields_open(struct inode *inode,
9435 + struct file *file)
9436 +{
9437 + return single_open(file, query_fq_fields_show, NULL);
9438 +}
9439 +
9440 +static ssize_t query_fq_fields_write(struct file *f,
9441 + const char __user *buf, size_t count, loff_t *off)
9442 +{
9443 + int ret;
9444 + unsigned long val;
9445 +
9446 + ret = user_input_convert(buf, count, &val);
9447 + if (ret)
9448 + return ret;
9449 + if (val > MAX_FQID)
9450 + return -EINVAL;
9451 + query_fq_fields_data.fqid = (u32)val;
9452 + return count;
9453 +}
9454 +
9455 +static const struct file_operations query_fq_fields_fops = {
9456 + .owner = THIS_MODULE,
9457 + .open = query_fq_fields_open,
9458 + .read = seq_read,
9459 + .write = query_fq_fields_write,
9460 + .release = single_release,
9461 +};
9462 +
9463 +/*******************************************************************************
9464 + * Query WQ lengths
9465 + ******************************************************************************/
9466 +struct query_wq_lengths_data_s {
9467 + union {
9468 + u16 channel_wq; /* ignores wq (3 lsbits) */
9469 + struct {
9470 + u16 id:13; /* qm_channel */
9471 + u16 __reserved:3;
9472 + } __packed channel;
9473 + };
9474 +};
9475 +static struct query_wq_lengths_data_s query_wq_lengths_data;
9476 +static int query_wq_lengths_show(struct seq_file *file, void *offset)
9477 +{
9478 + int ret;
9479 + struct qm_mcr_querywq wq;
9480 + int i;
9481 +
9482 + memset(&wq, 0, sizeof(struct qm_mcr_querywq));
9483 + wq.channel.id = query_wq_lengths_data.channel.id;
9484 + ret = qman_query_wq(0, &wq);
9485 + if (ret)
9486 + return ret;
9487 + seq_printf(file, "Query Result For Channel: 0x%x\n", wq.channel.id);
9488 + for (i = 0; i < 8; i++)
9489 + /* mask out upper 4 bits since they are not part of length */
9490 + seq_printf(file, " wq%d_len : %u\n", i, wq.wq_len[i] & 0x0fff);
9491 + return 0;
9492 +}
9493 +
9494 +static int query_wq_lengths_open(struct inode *inode,
9495 + struct file *file)
9496 +{
9497 + return single_open(file, query_wq_lengths_show, NULL);
9498 +}
9499 +
9500 +static ssize_t query_wq_lengths_write(struct file *f,
9501 + const char __user *buf, size_t count, loff_t *off)
9502 +{
9503 + int ret;
9504 + unsigned long val;
9505 +
9506 + ret = user_input_convert(buf, count, &val);
9507 + if (ret)
9508 + return ret;
9509 + if (val > 0xfff8)
9510 + return -EINVAL;
9511 + query_wq_lengths_data.channel.id = (u16)val;
9512 + return count;
9513 +}
9514 +
9515 +static const struct file_operations query_wq_lengths_fops = {
9516 + .owner = THIS_MODULE,
9517 + .open = query_wq_lengths_open,
9518 + .read = seq_read,
9519 + .write = query_wq_lengths_write,
9520 + .release = single_release,
9521 +};
9522 +
9523 +/*******************************************************************************
9524 + * Query CGR
9525 + ******************************************************************************/
9526 +struct query_cgr_s {
9527 + u8 cgid;
9528 +};
9529 +static struct query_cgr_s query_cgr_data;
9530 +
9531 +static int query_cgr_show(struct seq_file *file, void *offset)
9532 +{
9533 + int ret;
9534 + struct qm_mcr_querycgr cgrd;
9535 + struct qman_cgr cgr;
9536 + int i, j;
9537 + u32 mask;
9538 +
9539 + memset(&cgr, 0, sizeof(cgr));
9540 + memset(&cgrd, 0, sizeof(cgrd));
9541 + cgr.cgrid = query_cgr_data.cgid;
9542 + ret = qman_query_cgr(&cgr, &cgrd);
9543 + if (ret)
9544 + return ret;
9545 + seq_printf(file, "Query CGR id 0x%x\n", cgr.cgrid);
9546 + seq_printf(file, " wr_parm_g MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
9547 + cgrd.cgr.wr_parm_g.MA, cgrd.cgr.wr_parm_g.Mn,
9548 + cgrd.cgr.wr_parm_g.SA, cgrd.cgr.wr_parm_g.Sn,
9549 + cgrd.cgr.wr_parm_g.Pn);
9550 +
9551 + seq_printf(file, " wr_parm_y MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
9552 + cgrd.cgr.wr_parm_y.MA, cgrd.cgr.wr_parm_y.Mn,
9553 + cgrd.cgr.wr_parm_y.SA, cgrd.cgr.wr_parm_y.Sn,
9554 + cgrd.cgr.wr_parm_y.Pn);
9555 +
9556 + seq_printf(file, " wr_parm_r MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
9557 + cgrd.cgr.wr_parm_r.MA, cgrd.cgr.wr_parm_r.Mn,
9558 + cgrd.cgr.wr_parm_r.SA, cgrd.cgr.wr_parm_r.Sn,
9559 + cgrd.cgr.wr_parm_r.Pn);
9560 +
9561 + seq_printf(file, " wr_en_g: %u, wr_en_y: %u, we_en_r: %u\n",
9562 + cgrd.cgr.wr_en_g, cgrd.cgr.wr_en_y, cgrd.cgr.wr_en_r);
9563 +
9564 + seq_printf(file, " cscn_en: %u\n", cgrd.cgr.cscn_en);
9565 + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
9566 + seq_puts(file, " cscn_targ_dcp:\n");
9567 + mask = 0x80000000;
9568 + for (i = 0; i < 32; i++) {
9569 + if (cgrd.cgr.cscn_targ & mask)
9570 + seq_printf(file, " send CSCN to dcp %u\n",
9571 + (31 - i));
9572 + mask >>= 1;
9573 + }
9574 +
9575 + seq_puts(file, " cscn_targ_swp:\n");
9576 + for (i = 0; i < 4; i++) {
9577 + mask = 0x80000000;
9578 + for (j = 0; j < 32; j++) {
9579 + if (cgrd.cscn_targ_swp[i] & mask)
9580 + seq_printf(file, " send CSCN to swp"
9581 + " %u\n", (127 - (i * 32) - j));
9582 + mask >>= 1;
9583 + }
9584 + }
9585 + } else {
9586 + seq_printf(file, " cscn_targ: %u\n", cgrd.cgr.cscn_targ);
9587 + }
9588 + seq_printf(file, " cstd_en: %u\n", cgrd.cgr.cstd_en);
9589 + seq_printf(file, " cs: %u\n", cgrd.cgr.cs);
9590 +
9591 + seq_printf(file, " cs_thresh_TA: %u, cs_thresh_Tn: %u\n",
9592 + cgrd.cgr.cs_thres.TA, cgrd.cgr.cs_thres.Tn);
9593 +
9594 + seq_printf(file, " mode: %s\n",
9595 + (cgrd.cgr.mode & QMAN_CGR_MODE_FRAME) ?
9596 + "frame count" : "byte count");
9597 + seq_printf(file, " i_bcnt: %llu\n", qm_mcr_querycgr_i_get64(&cgrd));
9598 + seq_printf(file, " a_bcnt: %llu\n", qm_mcr_querycgr_a_get64(&cgrd));
9599 +
9600 + return 0;
9601 +}
9602 +
9603 +static int query_cgr_open(struct inode *inode, struct file *file)
9604 +{
9605 + return single_open(file, query_cgr_show, NULL);
9606 +}
9607 +
9608 +static ssize_t query_cgr_write(struct file *f, const char __user *buf,
9609 + size_t count, loff_t *off)
9610 +{
9611 + int ret;
9612 + unsigned long val;
9613 +
9614 + ret = user_input_convert(buf, count, &val);
9615 + if (ret)
9616 + return ret;
9617 + if (val > 0xff)
9618 + return -EINVAL;
9619 + query_cgr_data.cgid = (u8)val;
9620 + return count;
9621 +}
9622 +
9623 +static const struct file_operations query_cgr_fops = {
9624 + .owner = THIS_MODULE,
9625 + .open = query_cgr_open,
9626 + .read = seq_read,
9627 + .write = query_cgr_write,
9628 + .release = single_release,
9629 +};
9630 +
9631 +/*******************************************************************************
9632 + * Test Write CGR
9633 + ******************************************************************************/
9634 +struct test_write_cgr_s {
9635 + u64 i_bcnt;
9636 + u8 cgid;
9637 +};
9638 +static struct test_write_cgr_s test_write_cgr_data;
9639 +
9640 +static int testwrite_cgr_show(struct seq_file *file, void *offset)
9641 +{
9642 + int ret;
9643 + struct qm_mcr_cgrtestwrite result;
9644 + struct qman_cgr cgr;
9645 + u64 i_bcnt;
9646 +
9647 + memset(&cgr, 0, sizeof(struct qman_cgr));
9648 + memset(&result, 0, sizeof(struct qm_mcr_cgrtestwrite));
9649 + cgr.cgrid = test_write_cgr_data.cgid;
9650 + i_bcnt = test_write_cgr_data.i_bcnt;
9651 + ret = qman_testwrite_cgr(&cgr, i_bcnt, &result);
9652 + if (ret)
9653 + return ret;
9654 + seq_printf(file, "CGR Test Write CGR id 0x%x\n", cgr.cgrid);
9655 + seq_printf(file, " wr_parm_g MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
9656 + result.cgr.wr_parm_g.MA, result.cgr.wr_parm_g.Mn,
9657 + result.cgr.wr_parm_g.SA, result.cgr.wr_parm_g.Sn,
9658 + result.cgr.wr_parm_g.Pn);
9659 + seq_printf(file, " wr_parm_y MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
9660 + result.cgr.wr_parm_y.MA, result.cgr.wr_parm_y.Mn,
9661 + result.cgr.wr_parm_y.SA, result.cgr.wr_parm_y.Sn,
9662 + result.cgr.wr_parm_y.Pn);
9663 + seq_printf(file, " wr_parm_r MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
9664 + result.cgr.wr_parm_r.MA, result.cgr.wr_parm_r.Mn,
9665 + result.cgr.wr_parm_r.SA, result.cgr.wr_parm_r.Sn,
9666 + result.cgr.wr_parm_r.Pn);
9667 + seq_printf(file, " wr_en_g: %u, wr_en_y: %u, we_en_r: %u\n",
9668 + result.cgr.wr_en_g, result.cgr.wr_en_y, result.cgr.wr_en_r);
9669 + seq_printf(file, " cscn_en: %u\n", result.cgr.cscn_en);
9670 + seq_printf(file, " cscn_targ: %u\n", result.cgr.cscn_targ);
9671 + seq_printf(file, " cstd_en: %u\n", result.cgr.cstd_en);
9672 + seq_printf(file, " cs: %u\n", result.cgr.cs);
9673 + seq_printf(file, " cs_thresh_TA: %u, cs_thresh_Tn: %u\n",
9674 + result.cgr.cs_thres.TA, result.cgr.cs_thres.Tn);
9675 +
9676 + /* Add Mode for Si 2 */
9677 + seq_printf(file, " mode: %s\n",
9678 + (result.cgr.mode & QMAN_CGR_MODE_FRAME) ?
9679 + "frame count" : "byte count");
9680 +
9681 + seq_printf(file, " i_bcnt: %llu\n",
9682 + qm_mcr_cgrtestwrite_i_get64(&result));
9683 + seq_printf(file, " a_bcnt: %llu\n",
9684 + qm_mcr_cgrtestwrite_a_get64(&result));
9685 + seq_printf(file, " wr_prob_g: %u\n", result.wr_prob_g);
9686 + seq_printf(file, " wr_prob_y: %u\n", result.wr_prob_y);
9687 + seq_printf(file, " wr_prob_r: %u\n", result.wr_prob_r);
9688 + return 0;
9689 +}
9690 +
9691 +static int testwrite_cgr_open(struct inode *inode, struct file *file)
9692 +{
9693 + return single_open(file, testwrite_cgr_show, NULL);
9694 +}
9695 +
9696 +static const struct file_operations testwrite_cgr_fops = {
9697 + .owner = THIS_MODULE,
9698 + .open = testwrite_cgr_open,
9699 + .read = seq_read,
9700 + .release = single_release,
9701 +};
9702 +
9703 +
9704 +static int testwrite_cgr_ibcnt_show(struct seq_file *file, void *offset)
9705 +{
9706 + seq_printf(file, "i_bcnt: %llu\n", test_write_cgr_data.i_bcnt);
9707 + return 0;
9708 +}
9709 +static int testwrite_cgr_ibcnt_open(struct inode *inode, struct file *file)
9710 +{
9711 + return single_open(file, testwrite_cgr_ibcnt_show, NULL);
9712 +}
9713 +
9714 +static ssize_t testwrite_cgr_ibcnt_write(struct file *f, const char __user *buf,
9715 + size_t count, loff_t *off)
9716 +{
9717 + int ret;
9718 + unsigned long val;
9719 +
9720 + ret = user_input_convert(buf, count, &val);
9721 + if (ret)
9722 + return ret;
9723 + test_write_cgr_data.i_bcnt = val;
9724 + return count;
9725 +}
9726 +
9727 +static const struct file_operations teswrite_cgr_ibcnt_fops = {
9728 + .owner = THIS_MODULE,
9729 + .open = testwrite_cgr_ibcnt_open,
9730 + .read = seq_read,
9731 + .write = testwrite_cgr_ibcnt_write,
9732 + .release = single_release,
9733 +};
9734 +
9735 +static int testwrite_cgr_cgrid_show(struct seq_file *file, void *offset)
9736 +{
9737 + seq_printf(file, "cgrid: %u\n", (u32)test_write_cgr_data.cgid);
9738 + return 0;
9739 +}
9740 +static int testwrite_cgr_cgrid_open(struct inode *inode, struct file *file)
9741 +{
9742 + return single_open(file, testwrite_cgr_cgrid_show, NULL);
9743 +}
9744 +
9745 +static ssize_t testwrite_cgr_cgrid_write(struct file *f, const char __user *buf,
9746 + size_t count, loff_t *off)
9747 +{
9748 + int ret;
9749 + unsigned long val;
9750 +
9751 + ret = user_input_convert(buf, count, &val);
9752 + if (ret)
9753 + return ret;
9754 + if (val > 0xff)
9755 + return -EINVAL;
9756 + test_write_cgr_data.cgid = (u8)val;
9757 + return count;
9758 +}
9759 +
9760 +static const struct file_operations teswrite_cgr_cgrid_fops = {
9761 + .owner = THIS_MODULE,
9762 + .open = testwrite_cgr_cgrid_open,
9763 + .read = seq_read,
9764 + .write = testwrite_cgr_cgrid_write,
9765 + .release = single_release,
9766 +};
9767 +
9768 +/*******************************************************************************
9769 + * Query Congestion State
9770 + ******************************************************************************/
9771 +static int query_congestion_show(struct seq_file *file, void *offset)
9772 +{
9773 + int ret;
9774 + struct qm_mcr_querycongestion cs;
9775 + int i, j, in_cong = 0;
9776 + u32 mask;
9777 +
9778 + memset(&cs, 0, sizeof(struct qm_mcr_querycongestion));
9779 + ret = qman_query_congestion(&cs);
9780 + if (ret)
9781 + return ret;
9782 + seq_puts(file, "Query Congestion Result\n");
9783 + for (i = 0; i < 8; i++) {
9784 + mask = 0x80000000;
9785 + for (j = 0; j < 32; j++) {
9786 + if (cs.state.__state[i] & mask) {
9787 + in_cong = 1;
9788 + seq_printf(file, " cg %u: %s\n", (i*32)+j,
9789 + "in congestion");
9790 + }
9791 + mask >>= 1;
9792 + }
9793 + }
9794 + if (!in_cong)
9795 + seq_puts(file, " All congestion groups not congested.\n");
9796 + return 0;
9797 +}
9798 +
9799 +static int query_congestion_open(struct inode *inode, struct file *file)
9800 +{
9801 + return single_open(file, query_congestion_show, NULL);
9802 +}
9803 +
9804 +static const struct file_operations query_congestion_fops = {
9805 + .owner = THIS_MODULE,
9806 + .open = query_congestion_open,
9807 + .read = seq_read,
9808 + .release = single_release,
9809 +};
9810 +
9811 +/*******************************************************************************
9812 + * Query CCGR
9813 + ******************************************************************************/
9814 +struct query_ccgr_s {
9815 + u32 ccgid;
9816 +};
9817 +static struct query_ccgr_s query_ccgr_data;
9818 +
9819 +static int query_ccgr_show(struct seq_file *file, void *offset)
9820 +{
9821 + int ret;
9822 + struct qm_mcr_ceetm_ccgr_query ccgr_query;
9823 + struct qm_mcc_ceetm_ccgr_query query_opts;
9824 + int i, j;
9825 + u32 mask;
9826 +
9827 + memset(&ccgr_query, 0, sizeof(struct qm_mcr_ceetm_ccgr_query));
9828 + memset(&query_opts, 0, sizeof(struct qm_mcc_ceetm_ccgr_query));
9829 +
9830 + if ((qman_ip_rev & 0xFF00) < QMAN_REV30)
9831 + return -EINVAL;
9832 +
9833 + seq_printf(file, "Query CCGID %x\n", query_ccgr_data.ccgid);
9834 + query_opts.dcpid = ((query_ccgr_data.ccgid & 0xFF000000) >> 24);
9835 + query_opts.ccgrid = query_ccgr_data.ccgid & 0x000001FF;
9836 + ret = qman_ceetm_query_ccgr(&query_opts, &ccgr_query);
9837 + if (ret)
9838 + return ret;
9839 + seq_printf(file, "Query CCGR id %x in DCP %d\n", query_opts.ccgrid,
9840 + query_opts.dcpid);
9841 + seq_printf(file, " wr_parm_g MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
9842 + ccgr_query.cm_query.wr_parm_g.MA,
9843 + ccgr_query.cm_query.wr_parm_g.Mn,
9844 + ccgr_query.cm_query.wr_parm_g.SA,
9845 + ccgr_query.cm_query.wr_parm_g.Sn,
9846 + ccgr_query.cm_query.wr_parm_g.Pn);
9847 +
9848 + seq_printf(file, " wr_parm_y MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
9849 + ccgr_query.cm_query.wr_parm_y.MA,
9850 + ccgr_query.cm_query.wr_parm_y.Mn,
9851 + ccgr_query.cm_query.wr_parm_y.SA,
9852 + ccgr_query.cm_query.wr_parm_y.Sn,
9853 + ccgr_query.cm_query.wr_parm_y.Pn);
9854 +
9855 + seq_printf(file, " wr_parm_r MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
9856 + ccgr_query.cm_query.wr_parm_r.MA,
9857 + ccgr_query.cm_query.wr_parm_r.Mn,
9858 + ccgr_query.cm_query.wr_parm_r.SA,
9859 + ccgr_query.cm_query.wr_parm_r.Sn,
9860 + ccgr_query.cm_query.wr_parm_r.Pn);
9861 +
9862 + seq_printf(file, " wr_en_g: %u, wr_en_y: %u, we_en_r: %u\n",
9863 + ccgr_query.cm_query.ctl_wr_en_g,
9864 + ccgr_query.cm_query.ctl_wr_en_y,
9865 + ccgr_query.cm_query.ctl_wr_en_r);
9866 +
9867 + seq_printf(file, " cscn_en: %u\n", ccgr_query.cm_query.ctl_cscn_en);
9868 + seq_puts(file, " cscn_targ_dcp:\n");
9869 + mask = 0x80000000;
9870 + for (i = 0; i < 32; i++) {
9871 + if (ccgr_query.cm_query.cscn_targ_dcp & mask)
9872 + seq_printf(file, " send CSCN to dcp %u\n", (31 - i));
9873 + mask >>= 1;
9874 + }
9875 +
9876 + seq_puts(file, " cscn_targ_swp:\n");
9877 + for (i = 0; i < 4; i++) {
9878 + mask = 0x80000000;
9879 + for (j = 0; j < 32; j++) {
9880 + if (ccgr_query.cm_query.cscn_targ_swp[i] & mask)
9881 + seq_printf(file, " send CSCN to swp"
9882 + "%u\n", (127 - (i * 32) - j));
9883 + mask >>= 1;
9884 + }
9885 + }
9886 +
9887 + seq_printf(file, " td_en: %u\n", ccgr_query.cm_query.ctl_td_en);
9888 +
9889 + seq_printf(file, " cs_thresh_in_TA: %u, cs_thresh_in_Tn: %u\n",
9890 + ccgr_query.cm_query.cs_thres.TA,
9891 + ccgr_query.cm_query.cs_thres.Tn);
9892 +
9893 + seq_printf(file, " cs_thresh_out_TA: %u, cs_thresh_out_Tn: %u\n",
9894 + ccgr_query.cm_query.cs_thres_x.TA,
9895 + ccgr_query.cm_query.cs_thres_x.Tn);
9896 +
9897 + seq_printf(file, " td_thresh_TA: %u, td_thresh_Tn: %u\n",
9898 + ccgr_query.cm_query.td_thres.TA,
9899 + ccgr_query.cm_query.td_thres.Tn);
9900 +
9901 + seq_printf(file, " mode: %s\n",
9902 + (ccgr_query.cm_query.ctl_mode &
9903 + QMAN_CGR_MODE_FRAME) ?
9904 + "frame count" : "byte count");
9905 + seq_printf(file, " i_cnt: %llu\n", (u64)ccgr_query.cm_query.i_cnt);
9906 + seq_printf(file, " a_cnt: %llu\n", (u64)ccgr_query.cm_query.a_cnt);
9907 +
9908 + return 0;
9909 +}
9910 +
9911 +static int query_ccgr_open(struct inode *inode, struct file *file)
9912 +{
9913 + return single_open(file, query_ccgr_show, NULL);
9914 +}
9915 +
9916 +static ssize_t query_ccgr_write(struct file *f, const char __user *buf,
9917 + size_t count, loff_t *off)
9918 +{
9919 + int ret;
9920 + unsigned long val;
9921 +
9922 + ret = user_input_convert(buf, count, &val);
9923 + if (ret)
9924 + return ret;
9925 + query_ccgr_data.ccgid = val;
9926 + return count;
9927 +}
9928 +
9929 +static const struct file_operations query_ccgr_fops = {
9930 + .owner = THIS_MODULE,
9931 + .open = query_ccgr_open,
9932 + .read = seq_read,
9933 + .write = query_ccgr_write,
9934 + .release = single_release,
9935 +};
9936 +/*******************************************************************************
9937 + * QMan register
9938 + ******************************************************************************/
9939 +struct qman_register_s {
9940 + u32 val;
9941 +};
9942 +static struct qman_register_s qman_register_data;
9943 +
9944 +static void init_ccsrmempeek(void)
9945 +{
9946 + struct device_node *dn;
9947 + const u32 *regaddr_p;
9948 +
9949 + dn = of_find_compatible_node(NULL, NULL, "fsl,qman");
9950 + if (!dn) {
9951 + pr_info("No fsl,qman node\n");
9952 + return;
9953 + }
9954 + regaddr_p = of_get_address(dn, 0, &qman_ccsr_size, NULL);
9955 + if (!regaddr_p) {
9956 + of_node_put(dn);
9957 + return;
9958 + }
9959 + qman_ccsr_start = of_translate_address(dn, regaddr_p);
9960 + of_node_put(dn);
9961 +}
9962 +/* This function provides access to QMan ccsr memory map */
9963 +static int qman_ccsrmempeek(u32 *val, u32 offset)
9964 +{
9965 + void __iomem *addr;
9966 + u64 phys_addr;
9967 +
9968 + if (!qman_ccsr_start)
9969 + return -EINVAL;
9970 +
9971 + if (offset > (qman_ccsr_size - sizeof(u32)))
9972 + return -EINVAL;
9973 +
9974 + phys_addr = qman_ccsr_start + offset;
9975 + addr = ioremap(phys_addr, sizeof(u32));
9976 + if (!addr) {
9977 + pr_err("ccsrmempeek, ioremap failed\n");
9978 + return -EINVAL;
9979 + }
9980 + *val = in_be32(addr);
9981 + iounmap(addr);
9982 + return 0;
9983 +}
9984 +
9985 +static int qman_ccsrmempeek_show(struct seq_file *file, void *offset)
9986 +{
9987 + u32 b;
9988 +
9989 + qman_ccsrmempeek(&b, qman_register_data.val);
9990 + seq_printf(file, "QMan register offset = 0x%x\n",
9991 + qman_register_data.val);
9992 + seq_printf(file, "value = 0x%08x\n", b);
9993 +
9994 + return 0;
9995 +}
9996 +
9997 +static int qman_ccsrmempeek_open(struct inode *inode, struct file *file)
9998 +{
9999 + return single_open(file, qman_ccsrmempeek_show, NULL);
10000 +}
10001 +
10002 +static ssize_t qman_ccsrmempeek_write(struct file *f, const char __user *buf,
10003 + size_t count, loff_t *off)
10004 +{
10005 + int ret;
10006 + unsigned long val;
10007 +
10008 + ret = user_input_convert(buf, count, &val);
10009 + if (ret)
10010 + return ret;
10011 + /* multiple of 4 */
10012 + if (val > (qman_ccsr_size - sizeof(u32))) {
10013 + pr_info("Input 0x%lx > 0x%llx\n",
10014 + val, (qman_ccsr_size - sizeof(u32)));
10015 + return -EINVAL;
10016 + }
10017 + if (val & 0x3) {
10018 + pr_info("Input 0x%lx not multiple of 4\n", val);
10019 + return -EINVAL;
10020 + }
10021 + qman_register_data.val = val;
10022 + return count;
10023 +}
10024 +
10025 +static const struct file_operations qman_ccsrmempeek_fops = {
10026 + .owner = THIS_MODULE,
10027 + .open = qman_ccsrmempeek_open,
10028 + .read = seq_read,
10029 + .write = qman_ccsrmempeek_write,
10030 +};
10031 +
10032 +/*******************************************************************************
10033 + * QMan state
10034 + ******************************************************************************/
10035 +static int qman_fqd_state_show(struct seq_file *file, void *offset)
10036 +{
10037 + struct qm_mcr_queryfq_np np;
10038 + struct qman_fq fq;
10039 + struct line_buffer_fq line_buf;
10040 + int ret, i;
10041 + u8 *state = file->private;
10042 + u32 qm_fq_state_cnt[ARRAY_SIZE(fqd_states)];
10043 +
10044 + memset(qm_fq_state_cnt, 0, sizeof(qm_fq_state_cnt));
10045 + memset(&line_buf, 0, sizeof(line_buf));
10046 +
10047 + seq_printf(file, "List of fq ids in state: %s\n", state_txt[*state]);
10048 +
10049 + for (i = 1; i < fqid_max; i++) {
10050 + fq.fqid = i;
10051 + ret = qman_query_fq_np(&fq, &np);
10052 + if (ret)
10053 + return ret;
10054 + if (*state == (np.state & QM_MCR_NP_STATE_MASK))
10055 + add_to_line_buffer(&line_buf, fq.fqid, file);
10056 + /* Keep a summary count of all states */
10057 + if ((np.state & QM_MCR_NP_STATE_MASK) < ARRAY_SIZE(fqd_states))
10058 + qm_fq_state_cnt[(np.state & QM_MCR_NP_STATE_MASK)]++;
10059 + }
10060 + flush_line_buffer(&line_buf, file);
10061 +
10062 + for (i = 0; i < ARRAY_SIZE(fqd_states); i++) {
10063 + seq_printf(file, "%s count = %u\n", state_txt[i],
10064 + qm_fq_state_cnt[i]);
10065 + }
10066 + return 0;
10067 +}
10068 +
10069 +static int qman_fqd_state_open(struct inode *inode, struct file *file)
10070 +{
10071 + return single_open(file, qman_fqd_state_show, inode->i_private);
10072 +}
10073 +
10074 +static const struct file_operations qman_fqd_state_fops = {
10075 + .owner = THIS_MODULE,
10076 + .open = qman_fqd_state_open,
10077 + .read = seq_read,
10078 +};
10079 +
10080 +static int qman_fqd_ctrl_show(struct seq_file *file, void *offset)
10081 +{
10082 + struct qm_fqd fqd;
10083 + struct qman_fq fq;
10084 + u32 fq_en_cnt = 0, fq_di_cnt = 0;
10085 + int ret, i;
10086 + struct mask_filter_s *data = file->private;
10087 + const char *ctrl_txt = get_fqd_ctrl_text(data->mask);
10088 + struct line_buffer_fq line_buf;
10089 +
10090 + memset(&line_buf, 0, sizeof(line_buf));
10091 + seq_printf(file, "List of fq ids with: %s :%s\n",
10092 + ctrl_txt, (data->filter) ? "enabled" : "disabled");
10093 + for (i = 1; i < fqid_max; i++) {
10094 + fq.fqid = i;
10095 + memset(&fqd, 0, sizeof(struct qm_fqd));
10096 + ret = qman_query_fq(&fq, &fqd);
10097 + if (ret)
10098 + return ret;
10099 + if (data->filter) {
10100 + if (fqd.fq_ctrl & data->mask)
10101 + add_to_line_buffer(&line_buf, fq.fqid, file);
10102 + } else {
10103 + if (!(fqd.fq_ctrl & data->mask))
10104 + add_to_line_buffer(&line_buf, fq.fqid, file);
10105 + }
10106 + if (fqd.fq_ctrl & data->mask)
10107 + fq_en_cnt++;
10108 + else
10109 + fq_di_cnt++;
10110 + }
10111 + flush_line_buffer(&line_buf, file);
10112 +
10113 + seq_printf(file, "Total FQD with: %s : enabled = %u\n",
10114 + ctrl_txt, fq_en_cnt);
10115 + seq_printf(file, "Total FQD with: %s : disabled = %u\n",
10116 + ctrl_txt, fq_di_cnt);
10117 + return 0;
10118 +}
10119 +
10120 +/*******************************************************************************
10121 + * QMan ctrl CGE, TDE, ORP, CTX, CPC, SFDR, BLOCK, HOLD, CACHE
10122 + ******************************************************************************/
10123 +static int qman_fqd_ctrl_open(struct inode *inode, struct file *file)
10124 +{
10125 + return single_open(file, qman_fqd_ctrl_show, inode->i_private);
10126 +}
10127 +
10128 +static const struct file_operations qman_fqd_ctrl_fops = {
10129 + .owner = THIS_MODULE,
10130 + .open = qman_fqd_ctrl_open,
10131 + .read = seq_read,
10132 +};
10133 +
10134 +/*******************************************************************************
10135 + * QMan ctrl summary
10136 + ******************************************************************************/
10137 +/*******************************************************************************
10138 + * QMan summary state
10139 + ******************************************************************************/
10140 +static int qman_fqd_non_prog_summary_show(struct seq_file *file, void *offset)
10141 +{
10142 + struct qm_mcr_queryfq_np np;
10143 + struct qman_fq fq;
10144 + int ret, i;
10145 + u32 qm_fq_state_cnt[ARRAY_SIZE(fqd_states)];
10146 +
10147 + memset(qm_fq_state_cnt, 0, sizeof(qm_fq_state_cnt));
10148 +
10149 + for (i = 1; i < fqid_max; i++) {
10150 + fq.fqid = i;
10151 + ret = qman_query_fq_np(&fq, &np);
10152 + if (ret)
10153 + return ret;
10154 + /* Keep a summary count of all states */
10155 + if ((np.state & QM_MCR_NP_STATE_MASK) < ARRAY_SIZE(fqd_states))
10156 + qm_fq_state_cnt[(np.state & QM_MCR_NP_STATE_MASK)]++;
10157 + }
10158 +
10159 + for (i = 0; i < ARRAY_SIZE(fqd_states); i++) {
10160 + seq_printf(file, "%s count = %u\n", state_txt[i],
10161 + qm_fq_state_cnt[i]);
10162 + }
10163 + return 0;
10164 +}
10165 +
10166 +static int qman_fqd_prog_summary_show(struct seq_file *file, void *offset)
10167 +{
10168 + struct qm_fqd fqd;
10169 + struct qman_fq fq;
10170 + int ret, i , j;
10171 + u32 qm_prog_cnt[ARRAY_SIZE(mask_filter)/2];
10172 +
10173 + memset(qm_prog_cnt, 0, sizeof(qm_prog_cnt));
10174 +
10175 + for (i = 1; i < fqid_max; i++) {
10176 + memset(&fqd, 0, sizeof(struct qm_fqd));
10177 + fq.fqid = i;
10178 + ret = qman_query_fq(&fq, &fqd);
10179 + if (ret)
10180 + return ret;
10181 + /* Keep a summary count of all states */
10182 + for (j = 0; j < ARRAY_SIZE(mask_filter); j += 2)
10183 + if ((fqd.fq_ctrl & QM_FQCTRL_MASK) &
10184 + mask_filter[j].mask)
10185 + qm_prog_cnt[j/2]++;
10186 + }
10187 + for (i = 0; i < ARRAY_SIZE(mask_filter) / 2; i++) {
10188 + seq_printf(file, "%s count = %u\n",
10189 + get_fqd_ctrl_text(mask_filter[i*2].mask),
10190 + qm_prog_cnt[i]);
10191 + }
10192 + return 0;
10193 +}
10194 +
10195 +static int qman_fqd_summary_show(struct seq_file *file, void *offset)
10196 +{
10197 + int ret;
10198 +
10199 + /* Display summary of non programmable fields */
10200 + ret = qman_fqd_non_prog_summary_show(file, offset);
10201 + if (ret)
10202 + return ret;
10203 + seq_puts(file, "-----------------------------------------\n");
10204 + /* Display programmable fields */
10205 + ret = qman_fqd_prog_summary_show(file, offset);
10206 + if (ret)
10207 + return ret;
10208 + return 0;
10209 +}
10210 +
10211 +static int qman_fqd_summary_open(struct inode *inode, struct file *file)
10212 +{
10213 + return single_open(file, qman_fqd_summary_show, NULL);
10214 +}
10215 +
10216 +static const struct file_operations qman_fqd_summary_fops = {
10217 + .owner = THIS_MODULE,
10218 + .open = qman_fqd_summary_open,
10219 + .read = seq_read,
10220 +};
10221 +
10222 +/*******************************************************************************
10223 + * QMan destination work queue
10224 + ******************************************************************************/
10225 +struct qman_dest_wq_s {
10226 + u16 wq_id;
10227 +};
10228 +static struct qman_dest_wq_s qman_dest_wq_data = {
10229 + .wq_id = 0,
10230 +};
10231 +
10232 +static int qman_fqd_dest_wq_show(struct seq_file *file, void *offset)
10233 +{
10234 + struct qm_fqd fqd;
10235 + struct qman_fq fq;
10236 + int ret, i;
10237 + u16 *wq, wq_id = qman_dest_wq_data.wq_id;
10238 + struct line_buffer_fq line_buf;
10239 +
10240 + memset(&line_buf, 0, sizeof(line_buf));
10241 + /* use vmalloc : need to allocate large memory region and don't
10242 + * require the memory to be physically contiguous. */
10243 + wq = vzalloc(sizeof(u16) * (0xFFFF+1));
10244 + if (!wq)
10245 + return -ENOMEM;
10246 +
10247 + seq_printf(file, "List of fq ids with destination work queue id"
10248 + " = 0x%x\n", wq_id);
10249 +
10250 + for (i = 1; i < fqid_max; i++) {
10251 + fq.fqid = i;
10252 + memset(&fqd, 0, sizeof(struct qm_fqd));
10253 + ret = qman_query_fq(&fq, &fqd);
10254 + if (ret) {
10255 + vfree(wq);
10256 + return ret;
10257 + }
10258 + if (wq_id == fqd.dest_wq)
10259 + add_to_line_buffer(&line_buf, fq.fqid, file);
10260 + wq[fqd.dest_wq]++;
10261 + }
10262 + flush_line_buffer(&line_buf, file);
10263 +
10264 + seq_puts(file, "Summary of all FQD destination work queue values\n");
10265 + for (i = 0; i < 0xFFFF; i++) {
10266 + if (wq[i])
10267 + seq_printf(file, "Channel: 0x%x WQ: 0x%x WQ_ID: 0x%x, "
10268 + "count = %u\n", i >> 3, i & 0x3, i, wq[i]);
10269 + }
10270 + vfree(wq);
10271 + return 0;
10272 +}
10273 +
10274 +static ssize_t qman_fqd_dest_wq_write(struct file *f, const char __user *buf,
10275 + size_t count, loff_t *off)
10276 +{
10277 + int ret;
10278 + unsigned long val;
10279 +
10280 + ret = user_input_convert(buf, count, &val);
10281 + if (ret)
10282 + return ret;
10283 + if (val > 0xFFFF)
10284 + return -EINVAL;
10285 + qman_dest_wq_data.wq_id = val;
10286 + return count;
10287 +}
10288 +
10289 +static int qman_fqd_dest_wq_open(struct inode *inode, struct file *file)
10290 +{
10291 + return single_open(file, qman_fqd_dest_wq_show, NULL);
10292 +}
10293 +
10294 +static const struct file_operations qman_fqd_dest_wq_fops = {
10295 + .owner = THIS_MODULE,
10296 + .open = qman_fqd_dest_wq_open,
10297 + .read = seq_read,
10298 + .write = qman_fqd_dest_wq_write,
10299 +};
10300 +
10301 +/*******************************************************************************
10302 + * QMan Intra-Class Scheduling Credit
10303 + ******************************************************************************/
10304 +static int qman_fqd_cred_show(struct seq_file *file, void *offset)
10305 +{
10306 + struct qm_fqd fqd;
10307 + struct qman_fq fq;
10308 + int ret, i;
10309 + u32 fq_cnt = 0;
10310 + struct line_buffer_fq line_buf;
10311 +
10312 + memset(&line_buf, 0, sizeof(line_buf));
10313 + seq_puts(file, "List of fq ids with Intra-Class Scheduling Credit > 0"
10314 + "\n");
10315 +
10316 + for (i = 1; i < fqid_max; i++) {
10317 + fq.fqid = i;
10318 + memset(&fqd, 0, sizeof(struct qm_fqd));
10319 + ret = qman_query_fq(&fq, &fqd);
10320 + if (ret)
10321 + return ret;
10322 + if (fqd.ics_cred > 0) {
10323 + add_to_line_buffer(&line_buf, fq.fqid, file);
10324 + fq_cnt++;
10325 + }
10326 + }
10327 + flush_line_buffer(&line_buf, file);
10328 +
10329 + seq_printf(file, "Total FQD with ics_cred > 0 = %d\n", fq_cnt);
10330 + return 0;
10331 +}
10332 +
10333 +static int qman_fqd_cred_open(struct inode *inode, struct file *file)
10334 +{
10335 + return single_open(file, qman_fqd_cred_show, NULL);
10336 +}
10337 +
10338 +static const struct file_operations qman_fqd_cred_fops = {
10339 + .owner = THIS_MODULE,
10340 + .open = qman_fqd_cred_open,
10341 + .read = seq_read,
10342 +};
10343 +
10344 +/*******************************************************************************
10345 + * Class Queue Fields
10346 + ******************************************************************************/
10347 +struct query_cq_fields_data_s {
10348 + u32 cqid;
10349 +};
10350 +
10351 +static struct query_cq_fields_data_s query_cq_fields_data = {
10352 + .cqid = 1,
10353 +};
10354 +
10355 +static int query_cq_fields_show(struct seq_file *file, void *offset)
10356 +{
10357 + int ret;
10358 + struct qm_mcr_ceetm_cq_query query_result;
10359 + unsigned int cqid;
10360 + unsigned int portal;
10361 +
10362 + if ((qman_ip_rev & 0xFF00) < QMAN_REV30)
10363 + return -EINVAL;
10364 +
10365 + cqid = query_cq_fields_data.cqid & 0x00FFFFFF;
10366 + portal = query_cq_fields_data.cqid >> 24;
10367 + if (portal > qm_dc_portal_fman1)
10368 + return -EINVAL;
10369 +
10370 + ret = qman_ceetm_query_cq(cqid, portal, &query_result);
10371 + if (ret)
10372 + return ret;
10373 + seq_printf(file, "Query CQ Fields Result cqid 0x%x on DCP %d\n",
10374 + cqid, portal);
10375 + seq_printf(file, " ccgid: %u\n", query_result.ccgid);
10376 + seq_printf(file, " state: %u\n", query_result.state);
10377 + seq_printf(file, " pfdr_hptr: %u\n", query_result.pfdr_hptr);
10378 + seq_printf(file, " pfdr_tptr: %u\n", query_result.pfdr_tptr);
10379 + seq_printf(file, " od1_xsfdr: %u\n", query_result.od1_xsfdr);
10380 + seq_printf(file, " od2_xsfdr: %u\n", query_result.od2_xsfdr);
10381 + seq_printf(file, " od3_xsfdr: %u\n", query_result.od3_xsfdr);
10382 + seq_printf(file, " od4_xsfdr: %u\n", query_result.od4_xsfdr);
10383 + seq_printf(file, " od5_xsfdr: %u\n", query_result.od5_xsfdr);
10384 + seq_printf(file, " od6_xsfdr: %u\n", query_result.od6_xsfdr);
10385 + seq_printf(file, " ra1_xsfdr: %u\n", query_result.ra1_xsfdr);
10386 + seq_printf(file, " ra2_xsfdr: %u\n", query_result.ra2_xsfdr);
10387 + seq_printf(file, " frame_count: %u\n", query_result.frm_cnt);
10388 +
10389 + return 0;
10390 +}
10391 +
10392 +static int query_cq_fields_open(struct inode *inode,
10393 + struct file *file)
10394 +{
10395 + return single_open(file, query_cq_fields_show, NULL);
10396 +}
10397 +
10398 +static ssize_t query_cq_fields_write(struct file *f,
10399 + const char __user *buf, size_t count, loff_t *off)
10400 +{
10401 + int ret;
10402 + unsigned long val;
10403 +
10404 + ret = user_input_convert(buf, count, &val);
10405 + if (ret)
10406 + return ret;
10407 + query_cq_fields_data.cqid = (u32)val;
10408 + return count;
10409 +}
10410 +
10411 +static const struct file_operations query_cq_fields_fops = {
10412 + .owner = THIS_MODULE,
10413 + .open = query_cq_fields_open,
10414 + .read = seq_read,
10415 + .write = query_cq_fields_write,
10416 + .release = single_release,
10417 +};
10418 +
10419 +/*******************************************************************************
10420 + * READ CEETM_XSFDR_IN_USE
10421 + ******************************************************************************/
10422 +struct query_ceetm_xsfdr_data_s {
10423 + enum qm_dc_portal dcp_portal;
10424 +};
10425 +
10426 +static struct query_ceetm_xsfdr_data_s query_ceetm_xsfdr_data;
10427 +
10428 +static int query_ceetm_xsfdr_show(struct seq_file *file, void *offset)
10429 +{
10430 + int ret;
10431 + unsigned int xsfdr_in_use;
10432 + enum qm_dc_portal portal;
10433 +
10434 +
10435 + if (qman_ip_rev < QMAN_REV31)
10436 + return -EINVAL;
10437 +
10438 + portal = query_ceetm_xsfdr_data.dcp_portal;
10439 + ret = qman_ceetm_get_xsfdr(portal, &xsfdr_in_use);
10440 + if (ret) {
10441 + seq_printf(file, "Read CEETM_XSFDR_IN_USE on DCP %d failed\n",
10442 + portal);
10443 + return ret;
10444 + }
10445 +
10446 + seq_printf(file, "DCP%d: CEETM_XSFDR_IN_USE number is %u\n", portal,
10447 + (xsfdr_in_use & 0x1FFF));
10448 + return 0;
10449 +}
10450 +
10451 +static int query_ceetm_xsfdr_open(struct inode *inode,
10452 + struct file *file)
10453 +{
10454 + return single_open(file, query_ceetm_xsfdr_show, NULL);
10455 +}
10456 +
10457 +static ssize_t query_ceetm_xsfdr_write(struct file *f,
10458 + const char __user *buf, size_t count, loff_t *off)
10459 +{
10460 + int ret;
10461 + unsigned long val;
10462 +
10463 + ret = user_input_convert(buf, count, &val);
10464 + if (ret)
10465 + return ret;
10466 + if (val > qm_dc_portal_fman1)
10467 + return -EINVAL;
10468 + query_ceetm_xsfdr_data.dcp_portal = (u32)val;
10469 + return count;
10470 +}
10471 +
10472 +static const struct file_operations query_ceetm_xsfdr_fops = {
10473 + .owner = THIS_MODULE,
10474 + .open = query_ceetm_xsfdr_open,
10475 + .read = seq_read,
10476 + .write = query_ceetm_xsfdr_write,
10477 + .release = single_release,
10478 +};
10479 +
10480 +/* helper macros used in qman_debugfs_module_init */
10481 +#define QMAN_DBGFS_ENTRY(name, mode, parent, data, fops) \
10482 + do { \
10483 + d = debugfs_create_file(name, \
10484 + mode, parent, \
10485 + data, \
10486 + fops); \
10487 + if (d == NULL) { \
10488 + ret = -ENOMEM; \
10489 + goto _return; \
10490 + } \
10491 + } while (0)
10492 +
10493 +/* dfs_root as parent */
10494 +#define QMAN_DBGFS_ENTRY_ROOT(name, mode, data, fops) \
10495 + QMAN_DBGFS_ENTRY(name, mode, dfs_root, data, fops)
10496 +
10497 +/* fqd_root as parent */
10498 +#define QMAN_DBGFS_ENTRY_FQDROOT(name, mode, data, fops) \
10499 + QMAN_DBGFS_ENTRY(name, mode, fqd_root, data, fops)
10500 +
10501 +/* fqd state */
10502 +#define QMAN_DBGFS_ENTRY_FQDSTATE(name, index) \
10503 + QMAN_DBGFS_ENTRY_FQDROOT(name, S_IRUGO, \
10504 + (void *)&mask_filter[index], &qman_fqd_ctrl_fops)
10505 +
10506 +static int __init qman_debugfs_module_init(void)
10507 +{
10508 + int ret = 0;
10509 + struct dentry *d, *fqd_root;
10510 + u32 reg;
10511 +
10512 + fqid_max = 0;
10513 + init_ccsrmempeek();
10514 + if (qman_ccsr_start) {
10515 + if (!qman_ccsrmempeek(&reg, QM_FQD_AR)) {
10516 + /* extract the size of the FQD window */
10517 + reg = reg & 0x3f;
10518 + /* calculate valid frame queue descriptor range */
10519 + fqid_max = (1 << (reg + 1)) / QM_FQD_BLOCK_SIZE;
10520 + }
10521 + }
10522 + dfs_root = debugfs_create_dir("qman", NULL);
10523 + fqd_root = debugfs_create_dir("fqd", dfs_root);
10524 + if (dfs_root == NULL || fqd_root == NULL) {
10525 + ret = -ENOMEM;
10526 + pr_err("Cannot create qman/fqd debugfs dir\n");
10527 + goto _return;
10528 + }
10529 + if (fqid_max) {
10530 + QMAN_DBGFS_ENTRY_ROOT("ccsrmempeek", S_IRUGO | S_IWUGO,
10531 + NULL, &qman_ccsrmempeek_fops);
10532 + }
10533 + QMAN_DBGFS_ENTRY_ROOT("query_fq_np_fields", S_IRUGO | S_IWUGO,
10534 + &query_fq_np_fields_data, &query_fq_np_fields_fops);
10535 +
10536 + QMAN_DBGFS_ENTRY_ROOT("query_fq_fields", S_IRUGO | S_IWUGO,
10537 + &query_fq_fields_data, &query_fq_fields_fops);
10538 +
10539 + QMAN_DBGFS_ENTRY_ROOT("query_wq_lengths", S_IRUGO | S_IWUGO,
10540 + &query_wq_lengths_data, &query_wq_lengths_fops);
10541 +
10542 + QMAN_DBGFS_ENTRY_ROOT("query_cgr", S_IRUGO | S_IWUGO,
10543 + &query_cgr_data, &query_cgr_fops);
10544 +
10545 + QMAN_DBGFS_ENTRY_ROOT("query_congestion", S_IRUGO,
10546 + NULL, &query_congestion_fops);
10547 +
10548 + QMAN_DBGFS_ENTRY_ROOT("testwrite_cgr", S_IRUGO,
10549 + NULL, &testwrite_cgr_fops);
10550 +
10551 + QMAN_DBGFS_ENTRY_ROOT("testwrite_cgr_cgrid", S_IRUGO | S_IWUGO,
10552 + NULL, &teswrite_cgr_cgrid_fops);
10553 +
10554 + QMAN_DBGFS_ENTRY_ROOT("testwrite_cgr_ibcnt", S_IRUGO | S_IWUGO,
10555 + NULL, &teswrite_cgr_ibcnt_fops);
10556 +
10557 + QMAN_DBGFS_ENTRY_ROOT("query_ceetm_ccgr", S_IRUGO | S_IWUGO,
10558 + &query_ccgr_data, &query_ccgr_fops);
10559 + /* Create files with fqd_root as parent */
10560 +
10561 + QMAN_DBGFS_ENTRY_FQDROOT("stateoos", S_IRUGO,
10562 + (void *)&fqd_states[QM_MCR_NP_STATE_OOS], &qman_fqd_state_fops);
10563 +
10564 + QMAN_DBGFS_ENTRY_FQDROOT("state_retired", S_IRUGO,
10565 + (void *)&fqd_states[QM_MCR_NP_STATE_RETIRED],
10566 + &qman_fqd_state_fops);
10567 +
10568 + QMAN_DBGFS_ENTRY_FQDROOT("state_tentatively_sched", S_IRUGO,
10569 + (void *)&fqd_states[QM_MCR_NP_STATE_TEN_SCHED],
10570 + &qman_fqd_state_fops);
10571 +
10572 + QMAN_DBGFS_ENTRY_FQDROOT("state_truly_sched", S_IRUGO,
10573 + (void *)&fqd_states[QM_MCR_NP_STATE_TRU_SCHED],
10574 + &qman_fqd_state_fops);
10575 +
10576 + QMAN_DBGFS_ENTRY_FQDROOT("state_parked", S_IRUGO,
10577 + (void *)&fqd_states[QM_MCR_NP_STATE_PARKED],
10578 + &qman_fqd_state_fops);
10579 +
10580 + QMAN_DBGFS_ENTRY_FQDROOT("state_active", S_IRUGO,
10581 + (void *)&fqd_states[QM_MCR_NP_STATE_ACTIVE],
10582 + &qman_fqd_state_fops);
10583 + QMAN_DBGFS_ENTRY_ROOT("query_cq_fields", S_IRUGO | S_IWUGO,
10584 + &query_cq_fields_data, &query_cq_fields_fops);
10585 + QMAN_DBGFS_ENTRY_ROOT("query_ceetm_xsfdr_in_use", S_IRUGO | S_IWUGO,
10586 + &query_ceetm_xsfdr_data, &query_ceetm_xsfdr_fops);
10587 +
10588 +
10589 + QMAN_DBGFS_ENTRY_FQDSTATE("cge_enable", 17);
10590 +
10591 + QMAN_DBGFS_ENTRY_FQDSTATE("cge_disable", 16);
10592 +
10593 + QMAN_DBGFS_ENTRY_FQDSTATE("tde_enable", 15);
10594 +
10595 + QMAN_DBGFS_ENTRY_FQDSTATE("tde_disable", 14);
10596 +
10597 + QMAN_DBGFS_ENTRY_FQDSTATE("orp_enable", 13);
10598 +
10599 + QMAN_DBGFS_ENTRY_FQDSTATE("orp_disable", 12);
10600 +
10601 + QMAN_DBGFS_ENTRY_FQDSTATE("ctx_a_stashing_enable", 11);
10602 +
10603 + QMAN_DBGFS_ENTRY_FQDSTATE("ctx_a_stashing_disable", 10);
10604 +
10605 + QMAN_DBGFS_ENTRY_FQDSTATE("cpc_enable", 9);
10606 +
10607 + QMAN_DBGFS_ENTRY_FQDSTATE("cpc_disable", 8);
10608 +
10609 + QMAN_DBGFS_ENTRY_FQDSTATE("sfdr_enable", 7);
10610 +
10611 + QMAN_DBGFS_ENTRY_FQDSTATE("sfdr_disable", 6);
10612 +
10613 + QMAN_DBGFS_ENTRY_FQDSTATE("avoid_blocking_enable", 5);
10614 +
10615 + QMAN_DBGFS_ENTRY_FQDSTATE("avoid_blocking_disable", 4);
10616 +
10617 + QMAN_DBGFS_ENTRY_FQDSTATE("hold_active_enable", 3);
10618 +
10619 + QMAN_DBGFS_ENTRY_FQDSTATE("hold_active_disable", 2);
10620 +
10621 + QMAN_DBGFS_ENTRY_FQDSTATE("prefer_in_cache_enable", 1);
10622 +
10623 + QMAN_DBGFS_ENTRY_FQDSTATE("prefer_in_cache_disable", 0);
10624 +
10625 + QMAN_DBGFS_ENTRY_FQDROOT("summary", S_IRUGO,
10626 + NULL, &qman_fqd_summary_fops);
10627 +
10628 + QMAN_DBGFS_ENTRY_FQDROOT("wq", S_IRUGO | S_IWUGO,
10629 + NULL, &qman_fqd_dest_wq_fops);
10630 +
10631 + QMAN_DBGFS_ENTRY_FQDROOT("cred", S_IRUGO,
10632 + NULL, &qman_fqd_cred_fops);
10633 +
10634 + return 0;
10635 +
10636 +_return:
10637 + debugfs_remove_recursive(dfs_root);
10638 + return ret;
10639 +}
10640 +
10641 +static void __exit qman_debugfs_module_exit(void)
10642 +{
10643 + debugfs_remove_recursive(dfs_root);
10644 +}
10645 +
10646 +module_init(qman_debugfs_module_init);
10647 +module_exit(qman_debugfs_module_exit);
10648 +MODULE_LICENSE("Dual BSD/GPL");
10649 --- /dev/null
10650 +++ b/drivers/staging/fsl_qbman/qman_driver.c
10651 @@ -0,0 +1,961 @@
10652 +/* Copyright 2008-2012 Freescale Semiconductor, Inc.
10653 + *
10654 + * Redistribution and use in source and binary forms, with or without
10655 + * modification, are permitted provided that the following conditions are met:
10656 + * * Redistributions of source code must retain the above copyright
10657 + * notice, this list of conditions and the following disclaimer.
10658 + * * Redistributions in binary form must reproduce the above copyright
10659 + * notice, this list of conditions and the following disclaimer in the
10660 + * documentation and/or other materials provided with the distribution.
10661 + * * Neither the name of Freescale Semiconductor nor the
10662 + * names of its contributors may be used to endorse or promote products
10663 + * derived from this software without specific prior written permission.
10664 + *
10665 + *
10666 + * ALTERNATIVELY, this software may be distributed under the terms of the
10667 + * GNU General Public License ("GPL") as published by the Free Software
10668 + * Foundation, either version 2 of that License or (at your option) any
10669 + * later version.
10670 + *
10671 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
10672 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
10673 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
10674 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
10675 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
10676 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
10677 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
10678 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
10679 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
10680 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
10681 + */
10682 +
10683 +#include "qman_private.h"
10684 +
10685 +#include <asm/smp.h> /* hard_smp_processor_id() if !CONFIG_SMP */
10686 +#ifdef CONFIG_HOTPLUG_CPU
10687 +#include <linux/cpu.h>
10688 +#endif
10689 +
10690 +/* Global variable containing revision id (even on non-control plane systems
10691 + * where CCSR isn't available) */
10692 +u16 qman_ip_rev;
10693 +EXPORT_SYMBOL(qman_ip_rev);
10694 +u8 qman_ip_cfg;
10695 +EXPORT_SYMBOL(qman_ip_cfg);
10696 +u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1;
10697 +EXPORT_SYMBOL(qm_channel_pool1);
10698 +u16 qm_channel_caam = QMAN_CHANNEL_CAAM;
10699 +EXPORT_SYMBOL(qm_channel_caam);
10700 +u16 qm_channel_pme = QMAN_CHANNEL_PME;
10701 +EXPORT_SYMBOL(qm_channel_pme);
10702 +u16 qm_channel_dce = QMAN_CHANNEL_DCE;
10703 +EXPORT_SYMBOL(qm_channel_dce);
10704 +u16 qman_portal_max;
10705 +EXPORT_SYMBOL(qman_portal_max);
10706 +
10707 +u32 qman_clk;
10708 +struct qm_ceetm qman_ceetms[QMAN_CEETM_MAX];
10709 +/* the qman ceetm instances on the given SoC */
10710 +u8 num_ceetms;
10711 +
10712 +/* For these variables, and the portal-initialisation logic, the
10713 + * comments in bman_driver.c apply here so won't be repeated. */
10714 +static struct qman_portal *shared_portals[NR_CPUS];
10715 +static int num_shared_portals;
10716 +static int shared_portals_idx;
10717 +static LIST_HEAD(unused_pcfgs);
10718 +static DEFINE_SPINLOCK(unused_pcfgs_lock);
10719 +
10720 +/* A SDQCR mask comprising all the available/visible pool channels */
10721 +static u32 pools_sdqcr;
10722 +
10723 +#define STR_ERR_NOPROP "No '%s' property in node %s\n"
10724 +#define STR_ERR_CELL "'%s' is not a %d-cell range in node %s\n"
10725 +#define STR_FQID_RANGE "fsl,fqid-range"
10726 +#define STR_POOL_CHAN_RANGE "fsl,pool-channel-range"
10727 +#define STR_CGRID_RANGE "fsl,cgrid-range"
10728 +
10729 +/* A "fsl,fqid-range" node; release the given range to the allocator */
10730 +static __init int fsl_fqid_range_init(struct device_node *node)
10731 +{
10732 + int ret;
10733 + const u32 *range = of_get_property(node, STR_FQID_RANGE, &ret);
10734 + if (!range) {
10735 + pr_err(STR_ERR_NOPROP, STR_FQID_RANGE, node->full_name);
10736 + return -EINVAL;
10737 + }
10738 + if (ret != 8) {
10739 + pr_err(STR_ERR_CELL, STR_FQID_RANGE, 2, node->full_name);
10740 + return -EINVAL;
10741 + }
10742 + qman_seed_fqid_range(be32_to_cpu(range[0]), be32_to_cpu(range[1]));
10743 + pr_info("Qman: FQID allocator includes range %d:%d\n",
10744 + be32_to_cpu(range[0]), be32_to_cpu(range[1]));
10745 + return 0;
10746 +}
10747 +
10748 +/* A "fsl,pool-channel-range" node; add to the SDQCR mask only */
10749 +static __init int fsl_pool_channel_range_sdqcr(struct device_node *node)
10750 +{
10751 + int ret;
10752 + const u32 *chanid = of_get_property(node, STR_POOL_CHAN_RANGE, &ret);
10753 + if (!chanid) {
10754 + pr_err(STR_ERR_NOPROP, STR_POOL_CHAN_RANGE, node->full_name);
10755 + return -EINVAL;
10756 + }
10757 + if (ret != 8) {
10758 + pr_err(STR_ERR_CELL, STR_POOL_CHAN_RANGE, 1, node->full_name);
10759 + return -EINVAL;
10760 + }
10761 + for (ret = 0; ret < be32_to_cpu(chanid[1]); ret++)
10762 + pools_sdqcr |= QM_SDQCR_CHANNELS_POOL_CONV(be32_to_cpu(chanid[0]) + ret);
10763 + return 0;
10764 +}
10765 +
10766 +/* A "fsl,pool-channel-range" node; release the given range to the allocator */
10767 +static __init int fsl_pool_channel_range_init(struct device_node *node)
10768 +{
10769 + int ret;
10770 + const u32 *chanid = of_get_property(node, STR_POOL_CHAN_RANGE, &ret);
10771 + if (!chanid) {
10772 + pr_err(STR_ERR_NOPROP, STR_POOL_CHAN_RANGE, node->full_name);
10773 + return -EINVAL;
10774 + }
10775 + if (ret != 8) {
10776 + pr_err(STR_ERR_CELL, STR_POOL_CHAN_RANGE, 1, node->full_name);
10777 + return -EINVAL;
10778 + }
10779 + qman_seed_pool_range(be32_to_cpu(chanid[0]), be32_to_cpu(chanid[1]));
10780 + pr_info("Qman: pool channel allocator includes range %d:%d\n",
10781 + be32_to_cpu(chanid[0]), be32_to_cpu(chanid[1]));
10782 + return 0;
10783 +}
10784 +
10785 +/* A "fsl,cgrid-range" node; release the given range to the allocator */
10786 +static __init int fsl_cgrid_range_init(struct device_node *node)
10787 +{
10788 + struct qman_cgr cgr;
10789 + int ret, errors = 0;
10790 + const u32 *range = of_get_property(node, STR_CGRID_RANGE, &ret);
10791 + if (!range) {
10792 + pr_err(STR_ERR_NOPROP, STR_CGRID_RANGE, node->full_name);
10793 + return -EINVAL;
10794 + }
10795 + if (ret != 8) {
10796 + pr_err(STR_ERR_CELL, STR_CGRID_RANGE, 2, node->full_name);
10797 + return -EINVAL;
10798 + }
10799 + qman_seed_cgrid_range(be32_to_cpu(range[0]), be32_to_cpu(range[1]));
10800 + pr_info("Qman: CGRID allocator includes range %d:%d\n",
10801 + be32_to_cpu(range[0]), be32_to_cpu(range[1]));
10802 + for (cgr.cgrid = 0; cgr.cgrid < __CGR_NUM; cgr.cgrid++) {
10803 + ret = qman_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL);
10804 + if (ret)
10805 + errors++;
10806 + }
10807 + if (errors)
10808 + pr_err("Warning: %d error%s while initialising CGRs %d:%d\n",
10809 + errors, (errors > 1) ? "s" : "", range[0], range[1]);
10810 + return 0;
10811 +}
10812 +
10813 +static __init int fsl_ceetm_init(struct device_node *node)
10814 +{
10815 + enum qm_dc_portal dcp_portal;
10816 + struct qm_ceetm_sp *sp;
10817 + struct qm_ceetm_lni *lni;
10818 + int ret, i;
10819 + const u32 *range;
10820 +
10821 + /* Find LFQID range */
10822 + range = of_get_property(node, "fsl,ceetm-lfqid-range", &ret);
10823 + if (!range) {
10824 + pr_err("No fsl,ceetm-lfqid-range in node %s\n",
10825 + node->full_name);
10826 + return -EINVAL;
10827 + }
10828 + if (ret != 8) {
10829 + pr_err("fsl,ceetm-lfqid-range is not a 2-cell range in node"
10830 + " %s\n", node->full_name);
10831 + return -EINVAL;
10832 + }
10833 +
10834 + dcp_portal = (be32_to_cpu(range[0]) & 0x0F0000) >> 16;
10835 + if (dcp_portal > qm_dc_portal_fman1) {
10836 + pr_err("The DCP portal %d doesn't support CEETM\n", dcp_portal);
10837 + return -EINVAL;
10838 + }
10839 +
10840 + if (dcp_portal == qm_dc_portal_fman0)
10841 + qman_seed_ceetm0_lfqid_range(be32_to_cpu(range[0]), be32_to_cpu(range[1]));
10842 + if (dcp_portal == qm_dc_portal_fman1)
10843 + qman_seed_ceetm1_lfqid_range(be32_to_cpu(range[0]), be32_to_cpu(range[1]));
10844 + pr_debug("Qman: The lfqid allocator of CEETM %d includes range"
10845 + " 0x%x:0x%x\n", dcp_portal, be32_to_cpu(range[0]), be32_to_cpu(range[1]));
10846 +
10847 + qman_ceetms[dcp_portal].idx = dcp_portal;
10848 + INIT_LIST_HEAD(&qman_ceetms[dcp_portal].sub_portals);
10849 + INIT_LIST_HEAD(&qman_ceetms[dcp_portal].lnis);
10850 +
10851 + /* Find Sub-portal range */
10852 + range = of_get_property(node, "fsl,ceetm-sp-range", &ret);
10853 + if (!range) {
10854 + pr_err("No fsl,ceetm-sp-range in node %s\n", node->full_name);
10855 + return -EINVAL;
10856 + }
10857 + if (ret != 8) {
10858 + pr_err("fsl,ceetm-sp-range is not a 2-cell range in node %s\n",
10859 + node->full_name);
10860 + return -EINVAL;
10861 + }
10862 +
10863 + for (i = 0; i < be32_to_cpu(range[1]); i++) {
10864 + sp = kzalloc(sizeof(*sp), GFP_KERNEL);
10865 + if (!sp) {
10866 + pr_err("Can't alloc memory for sub-portal %d\n",
10867 + range[0] + i);
10868 + return -ENOMEM;
10869 + }
10870 + sp->idx = be32_to_cpu(range[0]) + i;
10871 + sp->dcp_idx = dcp_portal;
10872 + sp->is_claimed = 0;
10873 + list_add_tail(&sp->node, &qman_ceetms[dcp_portal].sub_portals);
10874 + sp++;
10875 + }
10876 + pr_debug("Qman: Reserve sub-portal %d:%d for CEETM %d\n",
10877 + be32_to_cpu(range[0]), be32_to_cpu(range[1]), dcp_portal);
10878 + qman_ceetms[dcp_portal].sp_range[0] = be32_to_cpu(range[0]);
10879 + qman_ceetms[dcp_portal].sp_range[1] = be32_to_cpu(range[1]);
10880 +
10881 + /* Find LNI range */
10882 + range = of_get_property(node, "fsl,ceetm-lni-range", &ret);
10883 + if (!range) {
10884 + pr_err("No fsl,ceetm-lni-range in node %s\n", node->full_name);
10885 + return -EINVAL;
10886 + }
10887 + if (ret != 8) {
10888 + pr_err("fsl,ceetm-lni-range is not a 2-cell range in node %s\n",
10889 + node->full_name);
10890 + return -EINVAL;
10891 + }
10892 +
10893 + for (i = 0; i < be32_to_cpu(range[1]); i++) {
10894 + lni = kzalloc(sizeof(*lni), GFP_KERNEL);
10895 + if (!lni) {
10896 + pr_err("Can't alloc memory for LNI %d\n",
10897 + range[0] + i);
10898 + return -ENOMEM;
10899 + }
10900 + lni->idx = be32_to_cpu(range[0]) + i;
10901 + lni->dcp_idx = dcp_portal;
10902 + lni->is_claimed = 0;
10903 + INIT_LIST_HEAD(&lni->channels);
10904 + list_add_tail(&lni->node, &qman_ceetms[dcp_portal].lnis);
10905 + lni++;
10906 + }
10907 + pr_debug("Qman: Reserve LNI %d:%d for CEETM %d\n",
10908 + be32_to_cpu(range[0]), be32_to_cpu(range[1]), dcp_portal);
10909 + qman_ceetms[dcp_portal].lni_range[0] = be32_to_cpu(range[0]);
10910 + qman_ceetms[dcp_portal].lni_range[1] = be32_to_cpu(range[1]);
10911 +
10912 + /* Find CEETM channel range */
10913 + range = of_get_property(node, "fsl,ceetm-channel-range", &ret);
10914 + if (!range) {
10915 + pr_err("No fsl,ceetm-channel-range in node %s\n",
10916 + node->full_name);
10917 + return -EINVAL;
10918 + }
10919 + if (ret != 8) {
10920 + pr_err("fsl,ceetm-channel-range is not a 2-cell range in node"
10921 + "%s\n", node->full_name);
10922 + return -EINVAL;
10923 + }
10924 +
10925 + if (dcp_portal == qm_dc_portal_fman0)
10926 + qman_seed_ceetm0_channel_range(be32_to_cpu(range[0]), be32_to_cpu(range[1]));
10927 + if (dcp_portal == qm_dc_portal_fman1)
10928 + qman_seed_ceetm1_channel_range(be32_to_cpu(range[0]), be32_to_cpu(range[1]));
10929 + pr_debug("Qman: The channel allocator of CEETM %d includes"
10930 + " range %d:%d\n", dcp_portal, be32_to_cpu(range[0]), be32_to_cpu(range[1]));
10931 +
10932 + /* Set CEETM PRES register */
10933 + ret = qman_ceetm_set_prescaler(dcp_portal);
10934 + if (ret)
10935 + return ret;
10936 + return 0;
10937 +}
10938 +
10939 +static void qman_get_ip_revision(struct device_node *dn)
10940 +{
10941 + u16 ip_rev = 0;
10942 + u8 ip_cfg = QMAN_REV_CFG_0;
10943 + for_each_compatible_node(dn, NULL, "fsl,qman-portal") {
10944 + if (!of_device_is_available(dn))
10945 + continue;
10946 + if (of_device_is_compatible(dn, "fsl,qman-portal-1.0") ||
10947 + of_device_is_compatible(dn, "fsl,qman-portal-1.0.0")) {
10948 + pr_err("QMAN rev1.0 on P4080 rev1 is not supported!\n");
10949 + BUG_ON(1);
10950 + } else if (of_device_is_compatible(dn, "fsl,qman-portal-1.1") ||
10951 + of_device_is_compatible(dn, "fsl,qman-portal-1.1.0")) {
10952 + ip_rev = QMAN_REV11;
10953 + qman_portal_max = 10;
10954 + } else if (of_device_is_compatible(dn, "fsl,qman-portal-1.2") ||
10955 + of_device_is_compatible(dn, "fsl,qman-portal-1.2.0")) {
10956 + ip_rev = QMAN_REV12;
10957 + qman_portal_max = 10;
10958 + } else if (of_device_is_compatible(dn, "fsl,qman-portal-2.0") ||
10959 + of_device_is_compatible(dn, "fsl,qman-portal-2.0.0")) {
10960 + ip_rev = QMAN_REV20;
10961 + qman_portal_max = 3;
10962 + } else if (of_device_is_compatible(dn,
10963 + "fsl,qman-portal-3.0.0")) {
10964 + ip_rev = QMAN_REV30;
10965 + qman_portal_max = 50;
10966 + } else if (of_device_is_compatible(dn,
10967 + "fsl,qman-portal-3.0.1")) {
10968 + ip_rev = QMAN_REV30;
10969 + qman_portal_max = 25;
10970 + ip_cfg = QMAN_REV_CFG_1;
10971 + } else if (of_device_is_compatible(dn,
10972 + "fsl,qman-portal-3.1.0")) {
10973 + ip_rev = QMAN_REV31;
10974 + qman_portal_max = 50;
10975 + } else if (of_device_is_compatible(dn,
10976 + "fsl,qman-portal-3.1.1")) {
10977 + ip_rev = QMAN_REV31;
10978 + qman_portal_max = 25;
10979 + ip_cfg = QMAN_REV_CFG_1;
10980 + } else if (of_device_is_compatible(dn,
10981 + "fsl,qman-portal-3.1.2")) {
10982 + ip_rev = QMAN_REV31;
10983 + qman_portal_max = 18;
10984 + ip_cfg = QMAN_REV_CFG_2;
10985 + } else if (of_device_is_compatible(dn,
10986 + "fsl,qman-portal-3.1.3")) {
10987 + ip_rev = QMAN_REV31;
10988 + qman_portal_max = 10;
10989 + ip_cfg = QMAN_REV_CFG_3;
10990 + } else if (of_device_is_compatible(dn,
10991 + "fsl,qman-portal-3.2.0")) {
10992 + ip_rev = QMAN_REV32;
10993 + qman_portal_max = 10;
10994 + ip_cfg = QMAN_REV_CFG_3; // TODO: Verify for ls1043
10995 + } else if (of_device_is_compatible(dn,
10996 + "fsl,qman-portal-3.2.1")) {
10997 + ip_rev = QMAN_REV32;
10998 + qman_portal_max = 10;
10999 + ip_cfg = QMAN_REV_CFG_3;
11000 + } else {
11001 + pr_warn("unknown QMan version in portal node,"
11002 + "default to rev1.1\n");
11003 + ip_rev = QMAN_REV11;
11004 + qman_portal_max = 10;
11005 + }
11006 +
11007 + if (!qman_ip_rev) {
11008 + if (ip_rev) {
11009 + qman_ip_rev = ip_rev;
11010 + qman_ip_cfg = ip_cfg;
11011 + } else {
11012 + pr_warn("unknown Qman version,"
11013 + " default to rev1.1\n");
11014 + qman_ip_rev = QMAN_REV11;
11015 + qman_ip_cfg = QMAN_REV_CFG_0;
11016 + }
11017 + } else if (ip_rev && (qman_ip_rev != ip_rev))
11018 + pr_warn("Revision=0x%04x, but portal '%s' has"
11019 + " 0x%04x\n",
11020 + qman_ip_rev, dn->full_name, ip_rev);
11021 + if (qman_ip_rev == ip_rev)
11022 + break;
11023 + }
11024 +}
11025 +
11026 +/* Parse a portal node, perform generic mapping duties and return the config. It
11027 + * is not known at this stage for what purpose (or even if) the portal will be
11028 + * used. */
11029 +static struct qm_portal_config * __init parse_pcfg(struct device_node *node)
11030 +{
11031 + struct qm_portal_config *pcfg;
11032 + const u32 *index_p;
11033 + u32 index, channel;
11034 + int irq, ret;
11035 + resource_size_t len;
11036 +
11037 + pcfg = kmalloc(sizeof(*pcfg), GFP_KERNEL);
11038 + if (!pcfg) {
11039 + pr_err("can't allocate portal config");
11040 + return NULL;
11041 + }
11042 +
11043 + /*
11044 + * This is a *horrible hack*, but the IOMMU/PAMU driver needs a
11045 + * 'struct device' in order to get the PAMU stashing setup and the QMan
11046 + * portal [driver] won't function at all without ring stashing
11047 + *
11048 + * Making the QMan portal driver nice and proper is part of the
11049 + * upstreaming effort
11050 + */
11051 + pcfg->dev.bus = &platform_bus_type;
11052 + pcfg->dev.of_node = node;
11053 +#ifdef CONFIG_FSL_PAMU
11054 + pcfg->dev.archdata.iommu_domain = NULL;
11055 +#endif
11056 +
11057 + ret = of_address_to_resource(node, DPA_PORTAL_CE,
11058 + &pcfg->addr_phys[DPA_PORTAL_CE]);
11059 + if (ret) {
11060 + pr_err("Can't get %s property '%s'\n", node->full_name,
11061 + "reg::CE");
11062 + goto err;
11063 + }
11064 + ret = of_address_to_resource(node, DPA_PORTAL_CI,
11065 + &pcfg->addr_phys[DPA_PORTAL_CI]);
11066 + if (ret) {
11067 + pr_err("Can't get %s property '%s'\n", node->full_name,
11068 + "reg::CI");
11069 + goto err;
11070 + }
11071 + index_p = of_get_property(node, "cell-index", &ret);
11072 + if (!index_p || (ret != 4)) {
11073 + pr_err("Can't get %s property '%s'\n", node->full_name,
11074 + "cell-index");
11075 + goto err;
11076 + }
11077 + index = be32_to_cpu(*index_p);
11078 + if (index >= qman_portal_max) {
11079 + pr_err("QMan portal index %d is beyond max (%d)\n",
11080 + index, qman_portal_max);
11081 + goto err;
11082 + }
11083 +
11084 + channel = index + QM_CHANNEL_SWPORTAL0;
11085 + pcfg->public_cfg.channel = channel;
11086 + pcfg->public_cfg.cpu = -1;
11087 + irq = irq_of_parse_and_map(node, 0);
11088 + if (irq == 0) {
11089 + pr_err("Can't get %s property '%s'\n", node->full_name,
11090 + "interrupts");
11091 + goto err;
11092 + }
11093 + pcfg->public_cfg.irq = irq;
11094 + pcfg->public_cfg.index = index;
11095 +#ifdef CONFIG_FSL_QMAN_CONFIG
11096 + /* We need the same LIODN offset for all portals */
11097 + qman_liodn_fixup(pcfg->public_cfg.channel);
11098 +#endif
11099 +
11100 + len = resource_size(&pcfg->addr_phys[DPA_PORTAL_CE]);
11101 + if (len != (unsigned long)len)
11102 + goto err;
11103 +
11104 +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
11105 + pcfg->addr_virt[DPA_PORTAL_CE] = ioremap_cache_ns(
11106 + pcfg->addr_phys[DPA_PORTAL_CE].start,
11107 + resource_size(&pcfg->addr_phys[DPA_PORTAL_CE]));
11108 +
11109 + pcfg->addr_virt[DPA_PORTAL_CI] = ioremap(
11110 + pcfg->addr_phys[DPA_PORTAL_CI].start,
11111 + resource_size(&pcfg->addr_phys[DPA_PORTAL_CI]));
11112 +#else
11113 + pcfg->addr_virt[DPA_PORTAL_CE] = ioremap_prot(
11114 + pcfg->addr_phys[DPA_PORTAL_CE].start,
11115 + (unsigned long)len,
11116 + 0);
11117 + pcfg->addr_virt[DPA_PORTAL_CI] = ioremap_prot(
11118 + pcfg->addr_phys[DPA_PORTAL_CI].start,
11119 + resource_size(&pcfg->addr_phys[DPA_PORTAL_CI]),
11120 + _PAGE_GUARDED | _PAGE_NO_CACHE);
11121 +#endif
11122 + return pcfg;
11123 +err:
11124 + kfree(pcfg);
11125 + return NULL;
11126 +}
11127 +
11128 +static struct qm_portal_config *get_pcfg(struct list_head *list)
11129 +{
11130 + struct qm_portal_config *pcfg;
11131 + if (list_empty(list))
11132 + return NULL;
11133 + pcfg = list_entry(list->prev, struct qm_portal_config, list);
11134 + list_del(&pcfg->list);
11135 + return pcfg;
11136 +}
11137 +
11138 +static struct qm_portal_config *get_pcfg_idx(struct list_head *list, u32 idx)
11139 +{
11140 + struct qm_portal_config *pcfg;
11141 + if (list_empty(list))
11142 + return NULL;
11143 + list_for_each_entry(pcfg, list, list) {
11144 + if (pcfg->public_cfg.index == idx) {
11145 + list_del(&pcfg->list);
11146 + return pcfg;
11147 + }
11148 + }
11149 + return NULL;
11150 +}
11151 +
11152 +static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
11153 +{
11154 +#ifdef CONFIG_FSL_PAMU
11155 + int ret;
11156 + int window_count = 1;
11157 + struct iommu_domain_geometry geom_attr;
11158 + struct pamu_stash_attribute stash_attr;
11159 +
11160 + pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type);
11161 + if (!pcfg->iommu_domain) {
11162 + pr_err(KBUILD_MODNAME ":%s(): iommu_domain_alloc() failed",
11163 + __func__);
11164 + goto _no_iommu;
11165 + }
11166 + geom_attr.aperture_start = 0;
11167 + geom_attr.aperture_end =
11168 + ((dma_addr_t)1 << min(8 * sizeof(dma_addr_t), (size_t)36)) - 1;
11169 + geom_attr.force_aperture = true;
11170 + ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_GEOMETRY,
11171 + &geom_attr);
11172 + if (ret < 0) {
11173 + pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
11174 + __func__, ret);
11175 + goto _iommu_domain_free;
11176 + }
11177 + ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS,
11178 + &window_count);
11179 + if (ret < 0) {
11180 + pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
11181 + __func__, ret);
11182 + goto _iommu_domain_free;
11183 + }
11184 + stash_attr.cpu = cpu;
11185 + stash_attr.cache = PAMU_ATTR_CACHE_L1;
11186 + /* set stash information for the window */
11187 + stash_attr.window = 0;
11188 + ret = iommu_domain_set_attr(pcfg->iommu_domain,
11189 + DOMAIN_ATTR_FSL_PAMU_STASH,
11190 + &stash_attr);
11191 + if (ret < 0) {
11192 + pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
11193 + __func__, ret);
11194 + goto _iommu_domain_free;
11195 + }
11196 + ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36,
11197 + IOMMU_READ | IOMMU_WRITE);
11198 + if (ret < 0) {
11199 + pr_err(KBUILD_MODNAME ":%s(): iommu_domain_window_enable() = %d",
11200 + __func__, ret);
11201 + goto _iommu_domain_free;
11202 + }
11203 + ret = iommu_attach_device(pcfg->iommu_domain, &pcfg->dev);
11204 + if (ret < 0) {
11205 + pr_err(KBUILD_MODNAME ":%s(): iommu_device_attach() = %d",
11206 + __func__, ret);
11207 + goto _iommu_domain_free;
11208 + }
11209 + ret = iommu_domain_set_attr(pcfg->iommu_domain,
11210 + DOMAIN_ATTR_FSL_PAMU_ENABLE,
11211 + &window_count);
11212 + if (ret < 0) {
11213 + pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
11214 + __func__, ret);
11215 + goto _iommu_detach_device;
11216 + }
11217 +
11218 +_no_iommu:
11219 +#endif
11220 +#ifdef CONFIG_FSL_QMAN_CONFIG
11221 + if (qman_set_sdest(pcfg->public_cfg.channel, cpu))
11222 +#endif
11223 + pr_warn("Failed to set QMan portal's stash request queue\n");
11224 +
11225 + return;
11226 +
11227 +#ifdef CONFIG_FSL_PAMU
11228 +_iommu_detach_device:
11229 + iommu_detach_device(pcfg->iommu_domain, NULL);
11230 +_iommu_domain_free:
11231 + iommu_domain_free(pcfg->iommu_domain);
11232 +#endif
11233 +}
11234 +
11235 +struct qm_portal_config *qm_get_unused_portal_idx(u32 idx)
11236 +{
11237 + struct qm_portal_config *ret;
11238 + spin_lock(&unused_pcfgs_lock);
11239 + if (idx == QBMAN_ANY_PORTAL_IDX)
11240 + ret = get_pcfg(&unused_pcfgs);
11241 + else
11242 + ret = get_pcfg_idx(&unused_pcfgs, idx);
11243 + spin_unlock(&unused_pcfgs_lock);
11244 + /* Bind stashing LIODNs to the CPU we are currently executing on, and
11245 + * set the portal to use the stashing request queue corresonding to the
11246 + * cpu as well. The user-space driver assumption is that the pthread has
11247 + * to already be affine to one cpu only before opening a portal. If that
11248 + * check is circumvented, the only risk is a performance degradation -
11249 + * stashing will go to whatever cpu they happened to be running on when
11250 + * opening the device file, and if that isn't the cpu they subsequently
11251 + * bind to and do their polling on, tough. */
11252 + if (ret)
11253 + portal_set_cpu(ret, hard_smp_processor_id());
11254 + return ret;
11255 +}
11256 +
11257 +struct qm_portal_config *qm_get_unused_portal(void)
11258 +{
11259 + return qm_get_unused_portal_idx(QBMAN_ANY_PORTAL_IDX);
11260 +}
11261 +
11262 +void qm_put_unused_portal(struct qm_portal_config *pcfg)
11263 +{
11264 + spin_lock(&unused_pcfgs_lock);
11265 + list_add(&pcfg->list, &unused_pcfgs);
11266 + spin_unlock(&unused_pcfgs_lock);
11267 +}
11268 +
11269 +static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg)
11270 +{
11271 + struct qman_portal *p;
11272 +
11273 + pcfg->iommu_domain = NULL;
11274 + portal_set_cpu(pcfg, pcfg->public_cfg.cpu);
11275 + p = qman_create_affine_portal(pcfg, NULL);
11276 + if (p) {
11277 + u32 irq_sources = 0;
11278 + /* Determine what should be interrupt-vs-poll driven */
11279 +#ifdef CONFIG_FSL_DPA_PIRQ_SLOW
11280 + irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI |
11281 + QM_PIRQ_CSCI | QM_PIRQ_CCSCI;
11282 +#endif
11283 +#ifdef CONFIG_FSL_DPA_PIRQ_FAST
11284 + irq_sources |= QM_PIRQ_DQRI;
11285 +#endif
11286 + qman_p_irqsource_add(p, irq_sources);
11287 + pr_info("Qman portal %sinitialised, cpu %d\n",
11288 + pcfg->public_cfg.is_shared ? "(shared) " : "",
11289 + pcfg->public_cfg.cpu);
11290 + } else
11291 + pr_crit("Qman portal failure on cpu %d\n",
11292 + pcfg->public_cfg.cpu);
11293 + return p;
11294 +}
11295 +
11296 +static void init_slave(int cpu)
11297 +{
11298 + struct qman_portal *p;
11299 + struct cpumask oldmask = current->cpus_allowed;
11300 + set_cpus_allowed_ptr(current, get_cpu_mask(cpu));
11301 + p = qman_create_affine_slave(shared_portals[shared_portals_idx++], cpu);
11302 + if (!p)
11303 + pr_err("Qman slave portal failure on cpu %d\n", cpu);
11304 + else
11305 + pr_info("Qman portal %sinitialised, cpu %d\n", "(slave) ", cpu);
11306 + set_cpus_allowed_ptr(current, &oldmask);
11307 + if (shared_portals_idx >= num_shared_portals)
11308 + shared_portals_idx = 0;
11309 +}
11310 +
11311 +static struct cpumask want_unshared __initdata;
11312 +static struct cpumask want_shared __initdata;
11313 +
11314 +static int __init parse_qportals(char *str)
11315 +{
11316 + return parse_portals_bootarg(str, &want_shared, &want_unshared,
11317 + "qportals");
11318 +}
11319 +__setup("qportals=", parse_qportals);
11320 +
11321 +static void qman_portal_update_sdest(const struct qm_portal_config *pcfg,
11322 + unsigned int cpu)
11323 +{
11324 +#ifdef CONFIG_FSL_PAMU
11325 + struct pamu_stash_attribute stash_attr;
11326 + int ret;
11327 +
11328 + if (pcfg->iommu_domain) {
11329 + stash_attr.cpu = cpu;
11330 + stash_attr.cache = PAMU_ATTR_CACHE_L1;
11331 + /* set stash information for the window */
11332 + stash_attr.window = 0;
11333 + ret = iommu_domain_set_attr(pcfg->iommu_domain,
11334 + DOMAIN_ATTR_FSL_PAMU_STASH, &stash_attr);
11335 + if (ret < 0) {
11336 + pr_err("Failed to update pamu stash setting\n");
11337 + return;
11338 + }
11339 + }
11340 +#endif
11341 +#ifdef CONFIG_FSL_QMAN_CONFIG
11342 + if (qman_set_sdest(pcfg->public_cfg.channel, cpu))
11343 + pr_warn("Failed to update portal's stash request queue\n");
11344 +#endif
11345 +}
11346 +
11347 +static int qman_offline_cpu(unsigned int cpu)
11348 +{
11349 + struct qman_portal *p;
11350 + const struct qm_portal_config *pcfg;
11351 + p = (struct qman_portal *)affine_portals[cpu];
11352 + if (p) {
11353 + pcfg = qman_get_qm_portal_config(p);
11354 + if (pcfg) {
11355 + irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(0));
11356 + qman_portal_update_sdest(pcfg, 0);
11357 + }
11358 + }
11359 + return 0;
11360 +}
11361 +
11362 +#ifdef CONFIG_HOTPLUG_CPU
11363 +static int qman_online_cpu(unsigned int cpu)
11364 +{
11365 + struct qman_portal *p;
11366 + const struct qm_portal_config *pcfg;
11367 + p = (struct qman_portal *)affine_portals[cpu];
11368 + if (p) {
11369 + pcfg = qman_get_qm_portal_config(p);
11370 + if (pcfg) {
11371 + irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(cpu));
11372 + qman_portal_update_sdest(pcfg, cpu);
11373 + }
11374 + }
11375 + return 0;
11376 +}
11377 +
11378 +#endif /* CONFIG_HOTPLUG_CPU */
11379 +
11380 +__init int qman_init(void)
11381 +{
11382 + struct cpumask slave_cpus;
11383 + struct cpumask unshared_cpus = *cpu_none_mask;
11384 + struct cpumask shared_cpus = *cpu_none_mask;
11385 + LIST_HEAD(unshared_pcfgs);
11386 + LIST_HEAD(shared_pcfgs);
11387 + struct device_node *dn;
11388 + struct qm_portal_config *pcfg;
11389 + struct qman_portal *p;
11390 + int cpu, ret;
11391 + const u32 *clk;
11392 + struct cpumask offline_cpus;
11393 +
11394 + /* Initialise the Qman (CCSR) device */
11395 + for_each_compatible_node(dn, NULL, "fsl,qman") {
11396 + if (!qman_init_ccsr(dn))
11397 + pr_info("Qman err interrupt handler present\n");
11398 + else
11399 + pr_err("Qman CCSR setup failed\n");
11400 +
11401 + clk = of_get_property(dn, "clock-frequency", NULL);
11402 + if (!clk)
11403 + pr_warn("Can't find Qman clock frequency\n");
11404 + else
11405 + qman_clk = be32_to_cpu(*clk);
11406 + }
11407 +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
11408 + /* Setup lookup table for FQ demux */
11409 + ret = qman_setup_fq_lookup_table(get_qman_fqd_size()/64);
11410 + if (ret)
11411 + return ret;
11412 +#endif
11413 +
11414 + /* Get qman ip revision */
11415 + qman_get_ip_revision(dn);
11416 + if ((qman_ip_rev & 0xff00) >= QMAN_REV30) {
11417 + qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3;
11418 + qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
11419 + qm_channel_pme = QMAN_CHANNEL_PME_REV3;
11420 + }
11421 +
11422 + if ((qman_ip_rev == QMAN_REV31) && (qman_ip_cfg == QMAN_REV_CFG_2))
11423 + qm_channel_dce = QMAN_CHANNEL_DCE_QMANREV312;
11424 +
11425 + /*
11426 + * Parse the ceetm node to get how many ceetm instances are supported
11427 + * on the current silicon. num_ceetms must be confirmed before portals
11428 + * are intiailized.
11429 + */
11430 + num_ceetms = 0;
11431 + for_each_compatible_node(dn, NULL, "fsl,qman-ceetm")
11432 + num_ceetms++;
11433 +
11434 + /* Parse pool channels into the SDQCR mask. (Must happen before portals
11435 + * are initialised.) */
11436 + for_each_compatible_node(dn, NULL, "fsl,pool-channel-range") {
11437 + ret = fsl_pool_channel_range_sdqcr(dn);
11438 + if (ret)
11439 + return ret;
11440 + }
11441 +
11442 + memset(affine_portals, 0, sizeof(void *) * num_possible_cpus());
11443 + /* Initialise portals. See bman_driver.c for comments */
11444 + for_each_compatible_node(dn, NULL, "fsl,qman-portal") {
11445 + if (!of_device_is_available(dn))
11446 + continue;
11447 + pcfg = parse_pcfg(dn);
11448 + if (pcfg) {
11449 + pcfg->public_cfg.pools = pools_sdqcr;
11450 + list_add_tail(&pcfg->list, &unused_pcfgs);
11451 + }
11452 + }
11453 + for_each_possible_cpu(cpu) {
11454 + if (cpumask_test_cpu(cpu, &want_shared)) {
11455 + pcfg = get_pcfg(&unused_pcfgs);
11456 + if (!pcfg)
11457 + break;
11458 + pcfg->public_cfg.cpu = cpu;
11459 + list_add_tail(&pcfg->list, &shared_pcfgs);
11460 + cpumask_set_cpu(cpu, &shared_cpus);
11461 + }
11462 + if (cpumask_test_cpu(cpu, &want_unshared)) {
11463 + if (cpumask_test_cpu(cpu, &shared_cpus))
11464 + continue;
11465 + pcfg = get_pcfg(&unused_pcfgs);
11466 + if (!pcfg)
11467 + break;
11468 + pcfg->public_cfg.cpu = cpu;
11469 + list_add_tail(&pcfg->list, &unshared_pcfgs);
11470 + cpumask_set_cpu(cpu, &unshared_cpus);
11471 + }
11472 + }
11473 + if (list_empty(&shared_pcfgs) && list_empty(&unshared_pcfgs)) {
11474 + for_each_online_cpu(cpu) {
11475 + pcfg = get_pcfg(&unused_pcfgs);
11476 + if (!pcfg)
11477 + break;
11478 + pcfg->public_cfg.cpu = cpu;
11479 + list_add_tail(&pcfg->list, &unshared_pcfgs);
11480 + cpumask_set_cpu(cpu, &unshared_cpus);
11481 + }
11482 + }
11483 + cpumask_andnot(&slave_cpus, cpu_possible_mask, &shared_cpus);
11484 + cpumask_andnot(&slave_cpus, &slave_cpus, &unshared_cpus);
11485 + if (cpumask_empty(&slave_cpus)) {
11486 + if (!list_empty(&shared_pcfgs)) {
11487 + cpumask_or(&unshared_cpus, &unshared_cpus,
11488 + &shared_cpus);
11489 + cpumask_clear(&shared_cpus);
11490 + list_splice_tail(&shared_pcfgs, &unshared_pcfgs);
11491 + INIT_LIST_HEAD(&shared_pcfgs);
11492 + }
11493 + } else {
11494 + if (list_empty(&shared_pcfgs)) {
11495 + pcfg = get_pcfg(&unshared_pcfgs);
11496 + if (!pcfg) {
11497 + pr_crit("No QMan portals available!\n");
11498 + return 0;
11499 + }
11500 + cpumask_clear_cpu(pcfg->public_cfg.cpu, &unshared_cpus);
11501 + cpumask_set_cpu(pcfg->public_cfg.cpu, &shared_cpus);
11502 + list_add_tail(&pcfg->list, &shared_pcfgs);
11503 + }
11504 + }
11505 + list_for_each_entry(pcfg, &unshared_pcfgs, list) {
11506 + pcfg->public_cfg.is_shared = 0;
11507 + p = init_pcfg(pcfg);
11508 + if (!p) {
11509 + pr_crit("Unable to configure portals\n");
11510 + return 0;
11511 + }
11512 + }
11513 + list_for_each_entry(pcfg, &shared_pcfgs, list) {
11514 + pcfg->public_cfg.is_shared = 1;
11515 + p = init_pcfg(pcfg);
11516 + if (p)
11517 + shared_portals[num_shared_portals++] = p;
11518 + }
11519 + if (!cpumask_empty(&slave_cpus))
11520 + for_each_cpu(cpu, &slave_cpus)
11521 + init_slave(cpu);
11522 + pr_info("Qman portals initialised\n");
11523 + cpumask_andnot(&offline_cpus, cpu_possible_mask, cpu_online_mask);
11524 + for_each_cpu(cpu, &offline_cpus)
11525 + qman_offline_cpu(cpu);
11526 +#ifdef CONFIG_HOTPLUG_CPU
11527 + ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
11528 + "soc/qman_portal:online",
11529 + qman_online_cpu, qman_offline_cpu);
11530 + if (ret < 0) {
11531 + pr_err("qman: failed to register hotplug callbacks.\n");
11532 + return ret;
11533 + }
11534 +#endif
11535 + return 0;
11536 +}
11537 +
11538 +__init int qman_resource_init(void)
11539 +{
11540 + struct device_node *dn;
11541 + int ret;
11542 +
11543 + /* Initialise FQID allocation ranges */
11544 + for_each_compatible_node(dn, NULL, "fsl,fqid-range") {
11545 + ret = fsl_fqid_range_init(dn);
11546 + if (ret)
11547 + return ret;
11548 + }
11549 + /* Initialise CGRID allocation ranges */
11550 + for_each_compatible_node(dn, NULL, "fsl,cgrid-range") {
11551 + ret = fsl_cgrid_range_init(dn);
11552 + if (ret)
11553 + return ret;
11554 + }
11555 + /* Parse pool channels into the allocator. (Must happen after portals
11556 + * are initialised.) */
11557 + for_each_compatible_node(dn, NULL, "fsl,pool-channel-range") {
11558 + ret = fsl_pool_channel_range_init(dn);
11559 + if (ret)
11560 + return ret;
11561 + }
11562 +
11563 + /* Parse CEETM */
11564 + for_each_compatible_node(dn, NULL, "fsl,qman-ceetm") {
11565 + ret = fsl_ceetm_init(dn);
11566 + if (ret)
11567 + return ret;
11568 + }
11569 + return 0;
11570 +}
11571 +
11572 +#ifdef CONFIG_SUSPEND
11573 +void suspend_unused_qportal(void)
11574 +{
11575 + struct qm_portal_config *pcfg;
11576 +
11577 + if (list_empty(&unused_pcfgs))
11578 + return;
11579 +
11580 + list_for_each_entry(pcfg, &unused_pcfgs, list) {
11581 +#ifdef CONFIG_PM_DEBUG
11582 + pr_info("Need to save qportal %d\n", pcfg->public_cfg.index);
11583 +#endif
11584 + /* save isdr, disable all via isdr, clear isr */
11585 + pcfg->saved_isdr =
11586 + __raw_readl(pcfg->addr_virt[DPA_PORTAL_CI] + 0xe08);
11587 + __raw_writel(0xffffffff, pcfg->addr_virt[DPA_PORTAL_CI] +
11588 + 0xe08);
11589 + __raw_writel(0xffffffff, pcfg->addr_virt[DPA_PORTAL_CI] +
11590 + 0xe00);
11591 + }
11592 + return;
11593 +}
11594 +
11595 +void resume_unused_qportal(void)
11596 +{
11597 + struct qm_portal_config *pcfg;
11598 +
11599 + if (list_empty(&unused_pcfgs))
11600 + return;
11601 +
11602 + list_for_each_entry(pcfg, &unused_pcfgs, list) {
11603 +#ifdef CONFIG_PM_DEBUG
11604 + pr_info("Need to resume qportal %d\n", pcfg->public_cfg.index);
11605 +#endif
11606 + /* restore isdr */
11607 + __raw_writel(pcfg->saved_isdr,
11608 + pcfg->addr_virt[DPA_PORTAL_CI] + 0xe08);
11609 + }
11610 + return;
11611 +}
11612 +#endif
11613 --- /dev/null
11614 +++ b/drivers/staging/fsl_qbman/qman_high.c
11615 @@ -0,0 +1,5669 @@
11616 +/* Copyright 2008-2012 Freescale Semiconductor, Inc.
11617 + *
11618 + * Redistribution and use in source and binary forms, with or without
11619 + * modification, are permitted provided that the following conditions are met:
11620 + * * Redistributions of source code must retain the above copyright
11621 + * notice, this list of conditions and the following disclaimer.
11622 + * * Redistributions in binary form must reproduce the above copyright
11623 + * notice, this list of conditions and the following disclaimer in the
11624 + * documentation and/or other materials provided with the distribution.
11625 + * * Neither the name of Freescale Semiconductor nor the
11626 + * names of its contributors may be used to endorse or promote products
11627 + * derived from this software without specific prior written permission.
11628 + *
11629 + *
11630 + * ALTERNATIVELY, this software may be distributed under the terms of the
11631 + * GNU General Public License ("GPL") as published by the Free Software
11632 + * Foundation, either version 2 of that License or (at your option) any
11633 + * later version.
11634 + *
11635 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
11636 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
11637 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
11638 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
11639 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
11640 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
11641 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
11642 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
11643 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11644 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11645 + */
11646 +
11647 +#include "qman_low.h"
11648 +
11649 +/* Compilation constants */
11650 +#define DQRR_MAXFILL 15
11651 +#define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */
11652 +#define IRQNAME "QMan portal %d"
11653 +#define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */
11654 +
11655 +/* Divide 'n' by 'd', rounding down if 'r' is negative, rounding up if it's
11656 + * positive, and rounding to the closest value if it's zero. NB, this macro
11657 + * implicitly upgrades parameters to unsigned 64-bit, so feed it with types
11658 + * that are compatible with this. NB, these arguments should not be expressions
11659 + * unless it is safe for them to be evaluated multiple times. Eg. do not pass
11660 + * in "some_value++" as a parameter to the macro! */
11661 +#define ROUNDING(n, d, r) \
11662 + (((r) < 0) ? div64_u64((n), (d)) : \
11663 + (((r) > 0) ? div64_u64(((n) + (d) - 1), (d)) : \
11664 + div64_u64(((n) + ((d) / 2)), (d))))
11665 +
11666 +/* Lock/unlock frame queues, subject to the "LOCKED" flag. This is about
11667 + * inter-processor locking only. Note, FQLOCK() is always called either under a
11668 + * local_irq_save() or from interrupt context - hence there's no need for irq
11669 + * protection (and indeed, attempting to nest irq-protection doesn't work, as
11670 + * the "irq en/disable" machinery isn't recursive...). */
11671 +#define FQLOCK(fq) \
11672 + do { \
11673 + struct qman_fq *__fq478 = (fq); \
11674 + if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
11675 + spin_lock(&__fq478->fqlock); \
11676 + } while (0)
11677 +#define FQUNLOCK(fq) \
11678 + do { \
11679 + struct qman_fq *__fq478 = (fq); \
11680 + if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
11681 + spin_unlock(&__fq478->fqlock); \
11682 + } while (0)
11683 +
11684 +static inline void fq_set(struct qman_fq *fq, u32 mask)
11685 +{
11686 + set_bits(mask, &fq->flags);
11687 +}
11688 +static inline void fq_clear(struct qman_fq *fq, u32 mask)
11689 +{
11690 + clear_bits(mask, &fq->flags);
11691 +}
11692 +static inline int fq_isset(struct qman_fq *fq, u32 mask)
11693 +{
11694 + return fq->flags & mask;
11695 +}
11696 +static inline int fq_isclear(struct qman_fq *fq, u32 mask)
11697 +{
11698 + return !(fq->flags & mask);
11699 +}
11700 +
11701 +struct qman_portal {
11702 + struct qm_portal p;
11703 + unsigned long bits; /* PORTAL_BITS_*** - dynamic, strictly internal */
11704 + unsigned long irq_sources;
11705 + u32 use_eqcr_ci_stashing;
11706 + u32 slowpoll; /* only used when interrupts are off */
11707 + struct qman_fq *vdqcr_owned; /* only 1 volatile dequeue at a time */
11708 +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
11709 + struct qman_fq *eqci_owned; /* only 1 enqueue WAIT_SYNC at a time */
11710 +#endif
11711 +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
11712 + raw_spinlock_t sharing_lock; /* only used if is_shared */
11713 + int is_shared;
11714 + struct qman_portal *sharing_redirect;
11715 +#endif
11716 + u32 sdqcr;
11717 + int dqrr_disable_ref;
11718 + /* A portal-specific handler for DCP ERNs. If this is NULL, the global
11719 + * handler is called instead. */
11720 + qman_cb_dc_ern cb_dc_ern;
11721 + /* When the cpu-affine portal is activated, this is non-NULL */
11722 + const struct qm_portal_config *config;
11723 + /* This is needed for providing a non-NULL device to dma_map_***() */
11724 + struct platform_device *pdev;
11725 + struct dpa_rbtree retire_table;
11726 + char irqname[MAX_IRQNAME];
11727 + /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
11728 + struct qman_cgrs *cgrs;
11729 + /* linked-list of CSCN handlers. */
11730 + struct list_head cgr_cbs;
11731 + /* list lock */
11732 + spinlock_t cgr_lock;
11733 + /* 2-element array. ccgrs[0] is mask, ccgrs[1] is snapshot. */
11734 + struct qman_ccgrs *ccgrs[QMAN_CEETM_MAX];
11735 + /* 256-element array, each is a linked-list of CCSCN handlers. */
11736 + struct list_head ccgr_cbs[QMAN_CEETM_MAX];
11737 + /* list lock */
11738 + spinlock_t ccgr_lock;
11739 + /* track if memory was allocated by the driver */
11740 + u8 alloced;
11741 + /* power management data */
11742 + u32 save_isdr;
11743 +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
11744 + /* Keep a shadow copy of the DQRR on LE systems as the SW needs to
11745 + * do byte swaps of DQRR read only memory. First entry must be aligned
11746 + * to 2 ** 10 to ensure DQRR index calculations based shadow copy
11747 + * address (6 bits for address shift + 4 bits for the DQRR size).
11748 + */
11749 + struct qm_dqrr_entry shadow_dqrr[QM_DQRR_SIZE] __aligned(1024);
11750 +#endif
11751 +};
11752 +
11753 +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
11754 +#define PORTAL_IRQ_LOCK(p, irqflags) \
11755 + do { \
11756 + if ((p)->is_shared) \
11757 + raw_spin_lock_irqsave(&(p)->sharing_lock, irqflags); \
11758 + else \
11759 + local_irq_save(irqflags); \
11760 + } while (0)
11761 +#define PORTAL_IRQ_UNLOCK(p, irqflags) \
11762 + do { \
11763 + if ((p)->is_shared) \
11764 + raw_spin_unlock_irqrestore(&(p)->sharing_lock, \
11765 + irqflags); \
11766 + else \
11767 + local_irq_restore(irqflags); \
11768 + } while (0)
11769 +#else
11770 +#define PORTAL_IRQ_LOCK(p, irqflags) local_irq_save(irqflags)
11771 +#define PORTAL_IRQ_UNLOCK(p, irqflags) local_irq_restore(irqflags)
11772 +#endif
11773 +
11774 +/* Global handler for DCP ERNs. Used when the portal receiving the message does
11775 + * not have a portal-specific handler. */
11776 +static qman_cb_dc_ern cb_dc_ern;
11777 +
11778 +static cpumask_t affine_mask;
11779 +static DEFINE_SPINLOCK(affine_mask_lock);
11780 +static u16 affine_channels[NR_CPUS];
11781 +static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal);
11782 +void *affine_portals[NR_CPUS];
11783 +
11784 +/* "raw" gets the cpu-local struct whether it's a redirect or not. */
11785 +static inline struct qman_portal *get_raw_affine_portal(void)
11786 +{
11787 + return &get_cpu_var(qman_affine_portal);
11788 +}
11789 +/* For ops that can redirect, this obtains the portal to use */
11790 +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
11791 +static inline struct qman_portal *get_affine_portal(void)
11792 +{
11793 + struct qman_portal *p = get_raw_affine_portal();
11794 + if (p->sharing_redirect)
11795 + return p->sharing_redirect;
11796 + return p;
11797 +}
11798 +#else
11799 +#define get_affine_portal() get_raw_affine_portal()
11800 +#endif
11801 +/* For every "get", there must be a "put" */
11802 +static inline void put_affine_portal(void)
11803 +{
11804 + put_cpu_var(qman_affine_portal);
11805 +}
11806 +/* Exception: poll functions assume the caller is cpu-affine and in no risk of
11807 + * re-entrance, which are the two reasons we usually use the get/put_cpu_var()
11808 + * semantic - ie. to disable pre-emption. Some use-cases expect the execution
11809 + * context to remain as non-atomic during poll-triggered callbacks as it was
11810 + * when the poll API was first called (eg. NAPI), so we go out of our way in
11811 + * this case to not disable pre-emption. */
11812 +static inline struct qman_portal *get_poll_portal(void)
11813 +{
11814 + return &get_cpu_var(qman_affine_portal);
11815 +}
11816 +#define put_poll_portal()
11817 +
11818 +/* This gives a FQID->FQ lookup to cover the fact that we can't directly demux
11819 + * retirement notifications (the fact they are sometimes h/w-consumed means that
11820 + * contextB isn't always a s/w demux - and as we can't know which case it is
11821 + * when looking at the notification, we have to use the slow lookup for all of
11822 + * them). NB, it's possible to have multiple FQ objects refer to the same FQID
11823 + * (though at most one of them should be the consumer), so this table isn't for
11824 + * all FQs - FQs are added when retirement commands are issued, and removed when
11825 + * they complete, which also massively reduces the size of this table. */
11826 +IMPLEMENT_DPA_RBTREE(fqtree, struct qman_fq, node, fqid);
11827 +
11828 +/* This is what everything can wait on, even if it migrates to a different cpu
11829 + * to the one whose affine portal it is waiting on. */
11830 +static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
11831 +
11832 +static inline int table_push_fq(struct qman_portal *p, struct qman_fq *fq)
11833 +{
11834 + int ret = fqtree_push(&p->retire_table, fq);
11835 + if (ret)
11836 + pr_err("ERROR: double FQ-retirement %d\n", fq->fqid);
11837 + return ret;
11838 +}
11839 +
11840 +static inline void table_del_fq(struct qman_portal *p, struct qman_fq *fq)
11841 +{
11842 + fqtree_del(&p->retire_table, fq);
11843 +}
11844 +
11845 +static inline struct qman_fq *table_find_fq(struct qman_portal *p, u32 fqid)
11846 +{
11847 + return fqtree_find(&p->retire_table, fqid);
11848 +}
11849 +
11850 +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
11851 +static void **qman_fq_lookup_table;
11852 +static size_t qman_fq_lookup_table_size;
11853 +
11854 +int qman_setup_fq_lookup_table(size_t num_entries)
11855 +{
11856 + num_entries++;
11857 + /* Allocate 1 more entry since the first entry is not used */
11858 + qman_fq_lookup_table = vzalloc((num_entries * sizeof(void *)));
11859 + if (!qman_fq_lookup_table) {
11860 + pr_err("QMan: Could not allocate fq lookup table\n");
11861 + return -ENOMEM;
11862 + }
11863 + qman_fq_lookup_table_size = num_entries;
11864 + pr_info("QMan: Allocated lookup table at %p, entry count %lu\n",
11865 + qman_fq_lookup_table,
11866 + (unsigned long)qman_fq_lookup_table_size);
11867 + return 0;
11868 +}
11869 +
11870 +/* global structure that maintains fq object mapping */
11871 +static DEFINE_SPINLOCK(fq_hash_table_lock);
11872 +
11873 +static int find_empty_fq_table_entry(u32 *entry, struct qman_fq *fq)
11874 +{
11875 + u32 i;
11876 +
11877 + spin_lock(&fq_hash_table_lock);
11878 + /* Can't use index zero because this has special meaning
11879 + * in context_b field. */
11880 + for (i = 1; i < qman_fq_lookup_table_size; i++) {
11881 + if (qman_fq_lookup_table[i] == NULL) {
11882 + *entry = i;
11883 + qman_fq_lookup_table[i] = fq;
11884 + spin_unlock(&fq_hash_table_lock);
11885 + return 0;
11886 + }
11887 + }
11888 + spin_unlock(&fq_hash_table_lock);
11889 + return -ENOMEM;
11890 +}
11891 +
11892 +static void clear_fq_table_entry(u32 entry)
11893 +{
11894 + spin_lock(&fq_hash_table_lock);
11895 + BUG_ON(entry >= qman_fq_lookup_table_size);
11896 + qman_fq_lookup_table[entry] = NULL;
11897 + spin_unlock(&fq_hash_table_lock);
11898 +}
11899 +
11900 +static inline struct qman_fq *get_fq_table_entry(u32 entry)
11901 +{
11902 + BUG_ON(entry >= qman_fq_lookup_table_size);
11903 + return qman_fq_lookup_table[entry];
11904 +}
11905 +#endif
11906 +
11907 +static inline void cpu_to_hw_fqd(struct qm_fqd *fqd)
11908 +{
11909 + /* Byteswap the FQD to HW format */
11910 + fqd->fq_ctrl = cpu_to_be16(fqd->fq_ctrl);
11911 + fqd->dest_wq = cpu_to_be16(fqd->dest_wq);
11912 + fqd->ics_cred = cpu_to_be16(fqd->ics_cred);
11913 + fqd->context_b = cpu_to_be32(fqd->context_b);
11914 + fqd->context_a.opaque = cpu_to_be64(fqd->context_a.opaque);
11915 +}
11916 +
11917 +static inline void hw_fqd_to_cpu(struct qm_fqd *fqd)
11918 +{
11919 + /* Byteswap the FQD to CPU format */
11920 + fqd->fq_ctrl = be16_to_cpu(fqd->fq_ctrl);
11921 + fqd->dest_wq = be16_to_cpu(fqd->dest_wq);
11922 + fqd->ics_cred = be16_to_cpu(fqd->ics_cred);
11923 + fqd->context_b = be32_to_cpu(fqd->context_b);
11924 + fqd->context_a.opaque = be64_to_cpu(fqd->context_a.opaque);
11925 +}
11926 +
11927 +/* Swap a 40 bit address */
11928 +static inline u64 cpu_to_be40(u64 in)
11929 +{
11930 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
11931 + return in;
11932 +#else
11933 + u64 out = 0;
11934 + u8 *p = (u8 *) &out;
11935 + p[0] = in >> 32;
11936 + p[1] = in >> 24;
11937 + p[2] = in >> 16;
11938 + p[3] = in >> 8;
11939 + p[4] = in >> 0;
11940 + return out;
11941 +#endif
11942 +}
11943 +static inline u64 be40_to_cpu(u64 in)
11944 +{
11945 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
11946 + return in;
11947 +#else
11948 + u64 out = 0;
11949 + u8 *pout = (u8 *) &out;
11950 + u8 *pin = (u8 *) &in;
11951 + pout[0] = pin[4];
11952 + pout[1] = pin[3];
11953 + pout[2] = pin[2];
11954 + pout[3] = pin[1];
11955 + pout[4] = pin[0];
11956 + return out;
11957 +#endif
11958 +}
11959 +
11960 +/* Swap a 24 bit value */
11961 +static inline u32 cpu_to_be24(u32 in)
11962 +{
11963 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
11964 + return in;
11965 +#else
11966 + u32 out = 0;
11967 + u8 *p = (u8 *) &out;
11968 + p[0] = in >> 16;
11969 + p[1] = in >> 8;
11970 + p[2] = in >> 0;
11971 + return out;
11972 +#endif
11973 +}
11974 +
11975 +static inline u32 be24_to_cpu(u32 in)
11976 +{
11977 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
11978 + return in;
11979 +#else
11980 + u32 out = 0;
11981 + u8 *pout = (u8 *) &out;
11982 + u8 *pin = (u8 *) &in;
11983 + pout[0] = pin[2];
11984 + pout[1] = pin[1];
11985 + pout[2] = pin[0];
11986 + return out;
11987 +#endif
11988 +}
11989 +
11990 +static inline u64 be48_to_cpu(u64 in)
11991 +{
11992 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
11993 + return in;
11994 +#else
11995 + u64 out = 0;
11996 + u8 *pout = (u8 *) &out;
11997 + u8 *pin = (u8 *) &in;
11998 +
11999 + pout[0] = pin[5];
12000 + pout[1] = pin[4];
12001 + pout[2] = pin[3];
12002 + pout[3] = pin[2];
12003 + pout[4] = pin[1];
12004 + pout[5] = pin[0];
12005 + return out;
12006 +#endif
12007 +}
12008 +static inline void cpu_to_hw_fd(struct qm_fd *fd)
12009 +{
12010 + fd->opaque_addr = cpu_to_be64(fd->opaque_addr);
12011 + fd->status = cpu_to_be32(fd->status);
12012 + fd->opaque = cpu_to_be32(fd->opaque);
12013 +}
12014 +
12015 +static inline void hw_fd_to_cpu(struct qm_fd *fd)
12016 +{
12017 + fd->opaque_addr = be64_to_cpu(fd->opaque_addr);
12018 + fd->status = be32_to_cpu(fd->status);
12019 + fd->opaque = be32_to_cpu(fd->opaque);
12020 +}
12021 +
12022 +static inline void hw_cq_query_to_cpu(struct qm_mcr_ceetm_cq_query *cq_query)
12023 +{
12024 + cq_query->ccgid = be16_to_cpu(cq_query->ccgid);
12025 + cq_query->state = be16_to_cpu(cq_query->state);
12026 + cq_query->pfdr_hptr = be24_to_cpu(cq_query->pfdr_hptr);
12027 + cq_query->pfdr_tptr = be24_to_cpu(cq_query->pfdr_tptr);
12028 + cq_query->od1_xsfdr = be16_to_cpu(cq_query->od1_xsfdr);
12029 + cq_query->od2_xsfdr = be16_to_cpu(cq_query->od2_xsfdr);
12030 + cq_query->od3_xsfdr = be16_to_cpu(cq_query->od3_xsfdr);
12031 + cq_query->od4_xsfdr = be16_to_cpu(cq_query->od4_xsfdr);
12032 + cq_query->od5_xsfdr = be16_to_cpu(cq_query->od5_xsfdr);
12033 + cq_query->od6_xsfdr = be16_to_cpu(cq_query->od6_xsfdr);
12034 + cq_query->ra1_xsfdr = be16_to_cpu(cq_query->ra1_xsfdr);
12035 + cq_query->ra2_xsfdr = be16_to_cpu(cq_query->ra2_xsfdr);
12036 + cq_query->frm_cnt = be24_to_cpu(cq_query->frm_cnt);
12037 +}
12038 +
12039 +static inline void hw_ccgr_query_to_cpu(struct qm_mcr_ceetm_ccgr_query *ccgr_q)
12040 +{
12041 + int i;
12042 +
12043 + ccgr_q->cm_query.cs_thres.hword =
12044 + be16_to_cpu(ccgr_q->cm_query.cs_thres.hword);
12045 + ccgr_q->cm_query.cs_thres_x.hword =
12046 + be16_to_cpu(ccgr_q->cm_query.cs_thres_x.hword);
12047 + ccgr_q->cm_query.td_thres.hword =
12048 + be16_to_cpu(ccgr_q->cm_query.td_thres.hword);
12049 + ccgr_q->cm_query.wr_parm_g.word =
12050 + be32_to_cpu(ccgr_q->cm_query.wr_parm_g.word);
12051 + ccgr_q->cm_query.wr_parm_y.word =
12052 + be32_to_cpu(ccgr_q->cm_query.wr_parm_y.word);
12053 + ccgr_q->cm_query.wr_parm_r.word =
12054 + be32_to_cpu(ccgr_q->cm_query.wr_parm_r.word);
12055 + ccgr_q->cm_query.cscn_targ_dcp =
12056 + be16_to_cpu(ccgr_q->cm_query.cscn_targ_dcp);
12057 + ccgr_q->cm_query.i_cnt = be40_to_cpu(ccgr_q->cm_query.i_cnt);
12058 + ccgr_q->cm_query.a_cnt = be40_to_cpu(ccgr_q->cm_query.a_cnt);
12059 + for (i = 0; i < ARRAY_SIZE(ccgr_q->cm_query.cscn_targ_swp); i++)
12060 + ccgr_q->cm_query.cscn_targ_swp[i] =
12061 + be32_to_cpu(ccgr_q->cm_query.cscn_targ_swp[i]);
12062 +}
12063 +
12064 +/* In the case that slow- and fast-path handling are both done by qman_poll()
12065 + * (ie. because there is no interrupt handling), we ought to balance how often
12066 + * we do the fast-path poll versus the slow-path poll. We'll use two decrementer
12067 + * sources, so we call the fast poll 'n' times before calling the slow poll
12068 + * once. The idle decrementer constant is used when the last slow-poll detected
12069 + * no work to do, and the busy decrementer constant when the last slow-poll had
12070 + * work to do. */
12071 +#define SLOW_POLL_IDLE 1000
12072 +#define SLOW_POLL_BUSY 10
12073 +static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
12074 +static inline unsigned int __poll_portal_fast(struct qman_portal *p,
12075 + unsigned int poll_limit);
12076 +
12077 +/* Portal interrupt handler */
12078 +static irqreturn_t portal_isr(__always_unused int irq, void *ptr)
12079 +{
12080 + struct qman_portal *p = ptr;
12081 + /*
12082 + * The CSCI/CCSCI source is cleared inside __poll_portal_slow(), because
12083 + * it could race against a Query Congestion State command also given
12084 + * as part of the handling of this interrupt source. We mustn't
12085 + * clear it a second time in this top-level function.
12086 + */
12087 + u32 clear = QM_DQAVAIL_MASK | (p->irq_sources &
12088 + ~(QM_PIRQ_CSCI | QM_PIRQ_CCSCI));
12089 + u32 is = qm_isr_status_read(&p->p) & p->irq_sources;
12090 + /* DQRR-handling if it's interrupt-driven */
12091 + if (is & QM_PIRQ_DQRI)
12092 + __poll_portal_fast(p, CONFIG_FSL_QMAN_POLL_LIMIT);
12093 + /* Handling of anything else that's interrupt-driven */
12094 + clear |= __poll_portal_slow(p, is);
12095 + qm_isr_status_clear(&p->p, clear);
12096 + return IRQ_HANDLED;
12097 +}
12098 +
12099 +/* This inner version is used privately by qman_create_affine_portal(), as well
12100 + * as by the exported qman_stop_dequeues(). */
12101 +static inline void qman_stop_dequeues_ex(struct qman_portal *p)
12102 +{
12103 + unsigned long irqflags __maybe_unused;
12104 + PORTAL_IRQ_LOCK(p, irqflags);
12105 + if (!(p->dqrr_disable_ref++))
12106 + qm_dqrr_set_maxfill(&p->p, 0);
12107 + PORTAL_IRQ_UNLOCK(p, irqflags);
12108 +}
12109 +
12110 +static int drain_mr_fqrni(struct qm_portal *p)
12111 +{
12112 + const struct qm_mr_entry *msg;
12113 +loop:
12114 + msg = qm_mr_current(p);
12115 + if (!msg) {
12116 + /* if MR was full and h/w had other FQRNI entries to produce, we
12117 + * need to allow it time to produce those entries once the
12118 + * existing entries are consumed. A worst-case situation
12119 + * (fully-loaded system) means h/w sequencers may have to do 3-4
12120 + * other things before servicing the portal's MR pump, each of
12121 + * which (if slow) may take ~50 qman cycles (which is ~200
12122 + * processor cycles). So rounding up and then multiplying this
12123 + * worst-case estimate by a factor of 10, just to be
12124 + * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
12125 + * one entry at a time, so h/w has an opportunity to produce new
12126 + * entries well before the ring has been fully consumed, so
12127 + * we're being *really* paranoid here. */
12128 + u64 now, then = mfatb();
12129 + do {
12130 + now = mfatb();
12131 + } while ((then + 10000) > now);
12132 + msg = qm_mr_current(p);
12133 + if (!msg)
12134 + return 0;
12135 + }
12136 + if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
12137 + /* We aren't draining anything but FQRNIs */
12138 + pr_err("QMan found verb 0x%x in MR\n", msg->verb);
12139 + return -1;
12140 + }
12141 + qm_mr_next(p);
12142 + qm_mr_cci_consume(p, 1);
12143 + goto loop;
12144 +}
12145 +
12146 +#ifdef CONFIG_SUSPEND
12147 +static int _qman_portal_suspend_noirq(struct device *dev)
12148 +{
12149 + struct qman_portal *p = (struct qman_portal *)dev->platform_data;
12150 +#ifdef CONFIG_PM_DEBUG
12151 + struct platform_device *pdev = to_platform_device(dev);
12152 +#endif
12153 +
12154 + p->save_isdr = qm_isr_disable_read(&p->p);
12155 + qm_isr_disable_write(&p->p, 0xffffffff);
12156 + qm_isr_status_clear(&p->p, 0xffffffff);
12157 +#ifdef CONFIG_PM_DEBUG
12158 + pr_info("Suspend for %s\n", pdev->name);
12159 +#endif
12160 + return 0;
12161 +}
12162 +
12163 +static int _qman_portal_resume_noirq(struct device *dev)
12164 +{
12165 + struct qman_portal *p = (struct qman_portal *)dev->platform_data;
12166 +
12167 + /* restore isdr */
12168 + qm_isr_disable_write(&p->p, p->save_isdr);
12169 + return 0;
12170 +}
12171 +#else
12172 +#define _qman_portal_suspend_noirq NULL
12173 +#define _qman_portal_resume_noirq NULL
12174 +#endif
12175 +
12176 +struct dev_pm_domain qman_portal_device_pm_domain = {
12177 + .ops = {
12178 + USE_PLATFORM_PM_SLEEP_OPS
12179 + .suspend_noirq = _qman_portal_suspend_noirq,
12180 + .resume_noirq = _qman_portal_resume_noirq,
12181 + }
12182 +};
12183 +
12184 +struct qman_portal *qman_create_portal(
12185 + struct qman_portal *portal,
12186 + const struct qm_portal_config *config,
12187 + const struct qman_cgrs *cgrs)
12188 +{
12189 + struct qm_portal *__p;
12190 + char buf[16];
12191 + int ret;
12192 + u32 isdr;
12193 +
12194 + if (!portal) {
12195 + portal = kmalloc(sizeof(*portal), GFP_KERNEL);
12196 + if (!portal)
12197 + return portal;
12198 + portal->alloced = 1;
12199 + } else
12200 + portal->alloced = 0;
12201 +
12202 + __p = &portal->p;
12203 +
12204 +#if (defined CONFIG_PPC || defined CONFIG_PPC64) && defined CONFIG_FSL_PAMU
12205 + /* PAMU is required for stashing */
12206 + portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ?
12207 + 1 : 0);
12208 +#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64)
12209 + portal->use_eqcr_ci_stashing = 1;
12210 +#else
12211 + portal->use_eqcr_ci_stashing = 0;
12212 +#endif
12213 +
12214 + /* prep the low-level portal struct with the mapped addresses from the
12215 + * config, everything that follows depends on it and "config" is more
12216 + * for (de)reference... */
12217 + __p->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE];
12218 + __p->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI];
12219 + /*
12220 + * If CI-stashing is used, the current defaults use a threshold of 3,
12221 + * and stash with high-than-DQRR priority.
12222 + */
12223 + if (qm_eqcr_init(__p, qm_eqcr_pvb,
12224 + portal->use_eqcr_ci_stashing ? 3 : 0, 1)) {
12225 + pr_err("Qman EQCR initialisation failed\n");
12226 + goto fail_eqcr;
12227 + }
12228 + if (qm_dqrr_init(__p, config, qm_dqrr_dpush, qm_dqrr_pvb,
12229 + qm_dqrr_cdc, DQRR_MAXFILL)) {
12230 + pr_err("Qman DQRR initialisation failed\n");
12231 + goto fail_dqrr;
12232 + }
12233 + if (qm_mr_init(__p, qm_mr_pvb, qm_mr_cci)) {
12234 + pr_err("Qman MR initialisation failed\n");
12235 + goto fail_mr;
12236 + }
12237 + if (qm_mc_init(__p)) {
12238 + pr_err("Qman MC initialisation failed\n");
12239 + goto fail_mc;
12240 + }
12241 + if (qm_isr_init(__p)) {
12242 + pr_err("Qman ISR initialisation failed\n");
12243 + goto fail_isr;
12244 + }
12245 + /* static interrupt-gating controls */
12246 + qm_dqrr_set_ithresh(__p, CONFIG_FSL_QMAN_PIRQ_DQRR_ITHRESH);
12247 + qm_mr_set_ithresh(__p, CONFIG_FSL_QMAN_PIRQ_MR_ITHRESH);
12248 + qm_isr_set_iperiod(__p, CONFIG_FSL_QMAN_PIRQ_IPERIOD);
12249 + portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL);
12250 + if (!portal->cgrs)
12251 + goto fail_cgrs;
12252 + /* initial snapshot is no-depletion */
12253 + qman_cgrs_init(&portal->cgrs[1]);
12254 + if (cgrs)
12255 + portal->cgrs[0] = *cgrs;
12256 + else
12257 + /* if the given mask is NULL, assume all CGRs can be seen */
12258 + qman_cgrs_fill(&portal->cgrs[0]);
12259 + INIT_LIST_HEAD(&portal->cgr_cbs);
12260 + spin_lock_init(&portal->cgr_lock);
12261 + if (num_ceetms) {
12262 + for (ret = 0; ret < num_ceetms; ret++) {
12263 + portal->ccgrs[ret] = kmalloc(2 *
12264 + sizeof(struct qman_ccgrs), GFP_KERNEL);
12265 + if (!portal->ccgrs[ret])
12266 + goto fail_ccgrs;
12267 + qman_ccgrs_init(&portal->ccgrs[ret][1]);
12268 + qman_ccgrs_fill(&portal->ccgrs[ret][0]);
12269 + INIT_LIST_HEAD(&portal->ccgr_cbs[ret]);
12270 + }
12271 + }
12272 + spin_lock_init(&portal->ccgr_lock);
12273 + portal->bits = 0;
12274 + portal->slowpoll = 0;
12275 +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
12276 + portal->eqci_owned = NULL;
12277 +#endif
12278 +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
12279 + raw_spin_lock_init(&portal->sharing_lock);
12280 + portal->is_shared = config->public_cfg.is_shared;
12281 + portal->sharing_redirect = NULL;
12282 +#endif
12283 + portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
12284 + QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
12285 + QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
12286 + portal->dqrr_disable_ref = 0;
12287 + portal->cb_dc_ern = NULL;
12288 + sprintf(buf, "qportal-%d", config->public_cfg.channel);
12289 + portal->pdev = platform_device_alloc(buf, -1);
12290 + if (!portal->pdev) {
12291 + pr_err("qman_portal - platform_device_alloc() failed\n");
12292 + goto fail_devalloc;
12293 + }
12294 +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
12295 + portal->pdev->dev.coherent_dma_mask = DMA_BIT_MASK(40);
12296 + portal->pdev->dev.dma_mask = &portal->pdev->dev.coherent_dma_mask;
12297 +#else
12298 + if (dma_set_mask(&portal->pdev->dev, DMA_BIT_MASK(40))) {
12299 + pr_err("qman_portal - dma_set_mask() failed\n");
12300 + goto fail_devadd;
12301 + }
12302 +#endif
12303 + portal->pdev->dev.pm_domain = &qman_portal_device_pm_domain;
12304 + portal->pdev->dev.platform_data = portal;
12305 + ret = platform_device_add(portal->pdev);
12306 + if (ret) {
12307 + pr_err("qman_portal - platform_device_add() failed\n");
12308 + goto fail_devadd;
12309 + }
12310 + dpa_rbtree_init(&portal->retire_table);
12311 + isdr = 0xffffffff;
12312 + qm_isr_disable_write(__p, isdr);
12313 + portal->irq_sources = 0;
12314 + qm_isr_enable_write(__p, portal->irq_sources);
12315 + qm_isr_status_clear(__p, 0xffffffff);
12316 + snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, config->public_cfg.cpu);
12317 + if (request_irq(config->public_cfg.irq, portal_isr, 0, portal->irqname,
12318 + portal)) {
12319 + pr_err("request_irq() failed\n");
12320 + goto fail_irq;
12321 + }
12322 + if ((config->public_cfg.cpu != -1) &&
12323 + irq_can_set_affinity(config->public_cfg.irq) &&
12324 + irq_set_affinity(config->public_cfg.irq,
12325 + cpumask_of(config->public_cfg.cpu))) {
12326 + pr_err("irq_set_affinity() failed\n");
12327 + goto fail_affinity;
12328 + }
12329 +
12330 + /* Need EQCR to be empty before continuing */
12331 + isdr ^= QM_PIRQ_EQCI;
12332 + qm_isr_disable_write(__p, isdr);
12333 + ret = qm_eqcr_get_fill(__p);
12334 + if (ret) {
12335 + pr_err("Qman EQCR unclean\n");
12336 + goto fail_eqcr_empty;
12337 + }
12338 + isdr ^= (QM_PIRQ_DQRI | QM_PIRQ_MRI);
12339 + qm_isr_disable_write(__p, isdr);
12340 + if (qm_dqrr_current(__p) != NULL) {
12341 + pr_err("Qman DQRR unclean\n");
12342 + qm_dqrr_cdc_consume_n(__p, 0xffff);
12343 + }
12344 + if (qm_mr_current(__p) != NULL) {
12345 + /* special handling, drain just in case it's a few FQRNIs */
12346 + if (drain_mr_fqrni(__p)) {
12347 + const struct qm_mr_entry *e = qm_mr_current(__p);
12348 + /*
12349 + * Message ring cannot be empty no need to check
12350 + * qm_mr_current returned successfully
12351 + */
12352 + pr_err("Qman MR unclean, MR VERB 0x%x, rc 0x%x\n, addr 0x%x",
12353 + e->verb, e->ern.rc, e->ern.fd.addr_lo);
12354 + goto fail_dqrr_mr_empty;
12355 + }
12356 + }
12357 + /* Success */
12358 + portal->config = config;
12359 + qm_isr_disable_write(__p, 0);
12360 + qm_isr_uninhibit(__p);
12361 + /* Write a sane SDQCR */
12362 + qm_dqrr_sdqcr_set(__p, portal->sdqcr);
12363 + return portal;
12364 +fail_dqrr_mr_empty:
12365 +fail_eqcr_empty:
12366 +fail_affinity:
12367 + free_irq(config->public_cfg.irq, portal);
12368 +fail_irq:
12369 + platform_device_del(portal->pdev);
12370 +fail_devadd:
12371 + platform_device_put(portal->pdev);
12372 +fail_devalloc:
12373 + if (num_ceetms)
12374 + for (ret = 0; ret < num_ceetms; ret++)
12375 + kfree(portal->ccgrs[ret]);
12376 +fail_ccgrs:
12377 + kfree(portal->cgrs);
12378 +fail_cgrs:
12379 + qm_isr_finish(__p);
12380 +fail_isr:
12381 + qm_mc_finish(__p);
12382 +fail_mc:
12383 + qm_mr_finish(__p);
12384 +fail_mr:
12385 + qm_dqrr_finish(__p);
12386 +fail_dqrr:
12387 + qm_eqcr_finish(__p);
12388 +fail_eqcr:
12389 + if (portal->alloced)
12390 + kfree(portal);
12391 + return NULL;
12392 +}
12393 +
12394 +struct qman_portal *qman_create_affine_portal(
12395 + const struct qm_portal_config *config,
12396 + const struct qman_cgrs *cgrs)
12397 +{
12398 + struct qman_portal *res;
12399 + struct qman_portal *portal;
12400 +
12401 + portal = &per_cpu(qman_affine_portal, config->public_cfg.cpu);
12402 + res = qman_create_portal(portal, config, cgrs);
12403 + if (res) {
12404 + spin_lock(&affine_mask_lock);
12405 + cpumask_set_cpu(config->public_cfg.cpu, &affine_mask);
12406 + affine_channels[config->public_cfg.cpu] =
12407 + config->public_cfg.channel;
12408 + affine_portals[config->public_cfg.cpu] = portal;
12409 + spin_unlock(&affine_mask_lock);
12410 + }
12411 + return res;
12412 +}
12413 +
12414 +/* These checks are BUG_ON()s because the driver is already supposed to avoid
12415 + * these cases. */
12416 +struct qman_portal *qman_create_affine_slave(struct qman_portal *redirect,
12417 + int cpu)
12418 +{
12419 +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
12420 + struct qman_portal *p;
12421 + p = &per_cpu(qman_affine_portal, cpu);
12422 + /* Check that we don't already have our own portal */
12423 + BUG_ON(p->config);
12424 + /* Check that we aren't already slaving to another portal */
12425 + BUG_ON(p->is_shared);
12426 + /* Check that 'redirect' is prepared to have us */
12427 + BUG_ON(!redirect->config->public_cfg.is_shared);
12428 + /* These are the only elements to initialise when redirecting */
12429 + p->irq_sources = 0;
12430 + p->sharing_redirect = redirect;
12431 + affine_portals[cpu] = p;
12432 + return p;
12433 +#else
12434 + BUG();
12435 + return NULL;
12436 +#endif
12437 +}
12438 +
12439 +void qman_destroy_portal(struct qman_portal *qm)
12440 +{
12441 + const struct qm_portal_config *pcfg;
12442 + int i;
12443 +
12444 + /* Stop dequeues on the portal */
12445 + qm_dqrr_sdqcr_set(&qm->p, 0);
12446 +
12447 + /* NB we do this to "quiesce" EQCR. If we add enqueue-completions or
12448 + * something related to QM_PIRQ_EQCI, this may need fixing.
12449 + * Also, due to the prefetching model used for CI updates in the enqueue
12450 + * path, this update will only invalidate the CI cacheline *after*
12451 + * working on it, so we need to call this twice to ensure a full update
12452 + * irrespective of where the enqueue processing was at when the teardown
12453 + * began. */
12454 + qm_eqcr_cce_update(&qm->p);
12455 + qm_eqcr_cce_update(&qm->p);
12456 + pcfg = qm->config;
12457 +
12458 + free_irq(pcfg->public_cfg.irq, qm);
12459 +
12460 + kfree(qm->cgrs);
12461 + if (num_ceetms)
12462 + for (i = 0; i < num_ceetms; i++)
12463 + kfree(qm->ccgrs[i]);
12464 + qm_isr_finish(&qm->p);
12465 + qm_mc_finish(&qm->p);
12466 + qm_mr_finish(&qm->p);
12467 + qm_dqrr_finish(&qm->p);
12468 + qm_eqcr_finish(&qm->p);
12469 +
12470 + platform_device_del(qm->pdev);
12471 + platform_device_put(qm->pdev);
12472 +
12473 + qm->config = NULL;
12474 + if (qm->alloced)
12475 + kfree(qm);
12476 +}
12477 +
12478 +const struct qm_portal_config *qman_destroy_affine_portal(void)
12479 +{
12480 + /* We don't want to redirect if we're a slave, use "raw" */
12481 + struct qman_portal *qm = get_raw_affine_portal();
12482 + const struct qm_portal_config *pcfg;
12483 + int cpu;
12484 +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
12485 + if (qm->sharing_redirect) {
12486 + qm->sharing_redirect = NULL;
12487 + put_affine_portal();
12488 + return NULL;
12489 + }
12490 + qm->is_shared = 0;
12491 +#endif
12492 + pcfg = qm->config;
12493 + cpu = pcfg->public_cfg.cpu;
12494 +
12495 + qman_destroy_portal(qm);
12496 +
12497 + spin_lock(&affine_mask_lock);
12498 + cpumask_clear_cpu(cpu, &affine_mask);
12499 + spin_unlock(&affine_mask_lock);
12500 + put_affine_portal();
12501 + return pcfg;
12502 +}
12503 +
12504 +const struct qman_portal_config *qman_p_get_portal_config(struct qman_portal *p)
12505 +{
12506 + return &p->config->public_cfg;
12507 +}
12508 +EXPORT_SYMBOL(qman_p_get_portal_config);
12509 +
12510 +const struct qman_portal_config *qman_get_portal_config(void)
12511 +{
12512 + struct qman_portal *p = get_affine_portal();
12513 + const struct qman_portal_config *ret = qman_p_get_portal_config(p);
12514 + put_affine_portal();
12515 + return ret;
12516 +}
12517 +EXPORT_SYMBOL(qman_get_portal_config);
12518 +
12519 +/* Inline helper to reduce nesting in __poll_portal_slow() */
12520 +static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
12521 + const struct qm_mr_entry *msg, u8 verb)
12522 +{
12523 + FQLOCK(fq);
12524 + switch (verb) {
12525 + case QM_MR_VERB_FQRL:
12526 + DPA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
12527 + fq_clear(fq, QMAN_FQ_STATE_ORL);
12528 + table_del_fq(p, fq);
12529 + break;
12530 + case QM_MR_VERB_FQRN:
12531 + DPA_ASSERT((fq->state == qman_fq_state_parked) ||
12532 + (fq->state == qman_fq_state_sched));
12533 + DPA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
12534 + fq_clear(fq, QMAN_FQ_STATE_CHANGING);
12535 + if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
12536 + fq_set(fq, QMAN_FQ_STATE_NE);
12537 + if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
12538 + fq_set(fq, QMAN_FQ_STATE_ORL);
12539 + else
12540 + table_del_fq(p, fq);
12541 + fq->state = qman_fq_state_retired;
12542 + break;
12543 + case QM_MR_VERB_FQPN:
12544 + DPA_ASSERT(fq->state == qman_fq_state_sched);
12545 + DPA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
12546 + fq->state = qman_fq_state_parked;
12547 + }
12548 + FQUNLOCK(fq);
12549 +}
12550 +
12551 +static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
12552 +{
12553 + const struct qm_mr_entry *msg;
12554 + struct qm_mr_entry swapped_msg;
12555 + int k;
12556 +
12557 + if (is & QM_PIRQ_CSCI) {
12558 + struct qman_cgrs rr, c;
12559 + struct qm_mc_result *mcr;
12560 + struct qman_cgr *cgr;
12561 + unsigned long irqflags __maybe_unused;
12562 +
12563 + spin_lock_irqsave(&p->cgr_lock, irqflags);
12564 + /*
12565 + * The CSCI bit must be cleared _before_ issuing the
12566 + * Query Congestion State command, to ensure that a long
12567 + * CGR State Change callback cannot miss an intervening
12568 + * state change.
12569 + */
12570 + qm_isr_status_clear(&p->p, QM_PIRQ_CSCI);
12571 + qm_mc_start(&p->p);
12572 + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
12573 + while (!(mcr = qm_mc_result(&p->p)))
12574 + cpu_relax();
12575 + for (k = 0; k < 8; k++)
12576 + mcr->querycongestion.state.__state[k] = be32_to_cpu(
12577 + mcr->querycongestion.state.__state[k]);
12578 + /* mask out the ones I'm not interested in */
12579 + qman_cgrs_and(&rr, (const struct qman_cgrs *)
12580 + &mcr->querycongestion.state, &p->cgrs[0]);
12581 + /* check previous snapshot for delta, enter/exit congestion */
12582 + qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
12583 + /* update snapshot */
12584 + qman_cgrs_cp(&p->cgrs[1], &rr);
12585 + /* Invoke callback */
12586 + list_for_each_entry(cgr, &p->cgr_cbs, node)
12587 + if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
12588 + cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
12589 + spin_unlock_irqrestore(&p->cgr_lock, irqflags);
12590 + }
12591 + if (is & QM_PIRQ_CCSCI) {
12592 + struct qman_ccgrs rr, c, congestion_result;
12593 + struct qm_mc_result *mcr;
12594 + struct qm_mc_command *mcc;
12595 + struct qm_ceetm_ccg *ccg;
12596 + unsigned long irqflags __maybe_unused;
12597 + int i, j;
12598 +
12599 + spin_lock_irqsave(&p->ccgr_lock, irqflags);
12600 + /*
12601 + * The CCSCI bit must be cleared _before_ issuing the
12602 + * Query Congestion State command, to ensure that a long
12603 + * CCGR State Change callback cannot miss an intervening
12604 + * state change.
12605 + */
12606 + qm_isr_status_clear(&p->p, QM_PIRQ_CCSCI);
12607 +
12608 + for (i = 0; i < num_ceetms; i++) {
12609 + for (j = 0; j < 2; j++) {
12610 + mcc = qm_mc_start(&p->p);
12611 + mcc->ccgr_query.ccgrid = cpu_to_be16(
12612 + CEETM_QUERY_CONGESTION_STATE | j);
12613 + mcc->ccgr_query.dcpid = i;
12614 + qm_mc_commit(&p->p, QM_CEETM_VERB_CCGR_QUERY);
12615 + while (!(mcr = qm_mc_result(&p->p)))
12616 + cpu_relax();
12617 + for (k = 0; k < 8; k++)
12618 + mcr->ccgr_query.congestion_state.state.
12619 + __state[k] = be32_to_cpu(
12620 + mcr->ccgr_query.
12621 + congestion_state.state.
12622 + __state[k]);
12623 + congestion_result.q[j] =
12624 + mcr->ccgr_query.congestion_state.state;
12625 + }
12626 + /* mask out the ones I'm not interested in */
12627 + qman_ccgrs_and(&rr, &congestion_result,
12628 + &p->ccgrs[i][0]);
12629 + /*
12630 + * check previous snapshot for delta, enter/exit
12631 + * congestion.
12632 + */
12633 + qman_ccgrs_xor(&c, &rr, &p->ccgrs[i][1]);
12634 + /* update snapshot */
12635 + qman_ccgrs_cp(&p->ccgrs[i][1], &rr);
12636 + /* Invoke callback */
12637 + list_for_each_entry(ccg, &p->ccgr_cbs[i], cb_node)
12638 + if (ccg->cb && qman_ccgrs_get(&c,
12639 + (ccg->parent->idx << 4) | ccg->idx))
12640 + ccg->cb(ccg, ccg->cb_ctx,
12641 + qman_ccgrs_get(&rr,
12642 + (ccg->parent->idx << 4)
12643 + | ccg->idx));
12644 + }
12645 + spin_unlock_irqrestore(&p->ccgr_lock, irqflags);
12646 + }
12647 +
12648 +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
12649 + if (is & QM_PIRQ_EQCI) {
12650 + unsigned long irqflags;
12651 + PORTAL_IRQ_LOCK(p, irqflags);
12652 + p->eqci_owned = NULL;
12653 + PORTAL_IRQ_UNLOCK(p, irqflags);
12654 + wake_up(&affine_queue);
12655 + }
12656 +#endif
12657 +
12658 + if (is & QM_PIRQ_EQRI) {
12659 + unsigned long irqflags __maybe_unused;
12660 + PORTAL_IRQ_LOCK(p, irqflags);
12661 + qm_eqcr_cce_update(&p->p);
12662 + qm_eqcr_set_ithresh(&p->p, 0);
12663 + PORTAL_IRQ_UNLOCK(p, irqflags);
12664 + wake_up(&affine_queue);
12665 + }
12666 +
12667 + if (is & QM_PIRQ_MRI) {
12668 + struct qman_fq *fq;
12669 + u8 verb, num = 0;
12670 +mr_loop:
12671 + qm_mr_pvb_update(&p->p);
12672 + msg = qm_mr_current(&p->p);
12673 + if (!msg)
12674 + goto mr_done;
12675 + swapped_msg = *msg;
12676 + hw_fd_to_cpu(&swapped_msg.ern.fd);
12677 + verb = msg->verb & QM_MR_VERB_TYPE_MASK;
12678 + /* The message is a software ERN iff the 0x20 bit is set */
12679 + if (verb & 0x20) {
12680 + switch (verb) {
12681 + case QM_MR_VERB_FQRNI:
12682 + /* nada, we drop FQRNIs on the floor */
12683 + break;
12684 + case QM_MR_VERB_FQRN:
12685 + case QM_MR_VERB_FQRL:
12686 + /* Lookup in the retirement table */
12687 + fq = table_find_fq(p, be32_to_cpu(msg->fq.fqid));
12688 + BUG_ON(!fq);
12689 + fq_state_change(p, fq, &swapped_msg, verb);
12690 + if (fq->cb.fqs)
12691 + fq->cb.fqs(p, fq, &swapped_msg);
12692 + break;
12693 + case QM_MR_VERB_FQPN:
12694 + /* Parked */
12695 +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
12696 + fq = get_fq_table_entry(
12697 + be32_to_cpu(msg->fq.contextB));
12698 +#else
12699 + fq = (void *)(uintptr_t)
12700 + be32_to_cpu(msg->fq.contextB);
12701 +#endif
12702 + fq_state_change(p, fq, msg, verb);
12703 + if (fq->cb.fqs)
12704 + fq->cb.fqs(p, fq, &swapped_msg);
12705 + break;
12706 + case QM_MR_VERB_DC_ERN:
12707 + /* DCP ERN */
12708 + if (p->cb_dc_ern)
12709 + p->cb_dc_ern(p, msg);
12710 + else if (cb_dc_ern)
12711 + cb_dc_ern(p, msg);
12712 + else {
12713 + static int warn_once;
12714 + if (!warn_once) {
12715 + pr_crit("Leaking DCP ERNs!\n");
12716 + warn_once = 1;
12717 + }
12718 + }
12719 + break;
12720 + default:
12721 + pr_crit("Invalid MR verb 0x%02x\n", verb);
12722 + }
12723 + } else {
12724 + /* Its a software ERN */
12725 +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
12726 + fq = get_fq_table_entry(be32_to_cpu(msg->ern.tag));
12727 +#else
12728 + fq = (void *)(uintptr_t)be32_to_cpu(msg->ern.tag);
12729 +#endif
12730 + fq->cb.ern(p, fq, &swapped_msg);
12731 + }
12732 + num++;
12733 + qm_mr_next(&p->p);
12734 + goto mr_loop;
12735 +mr_done:
12736 + qm_mr_cci_consume(&p->p, num);
12737 + }
12738 + /*
12739 + * QM_PIRQ_CSCI/CCSCI has already been cleared, as part of its specific
12740 + * processing. If that interrupt source has meanwhile been re-asserted,
12741 + * we mustn't clear it here (or in the top-level interrupt handler).
12742 + */
12743 + return is & (QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI);
12744 +}
12745 +
12746 +/* remove some slowish-path stuff from the "fast path" and make sure it isn't
12747 + * inlined. */
12748 +static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
12749 +{
12750 + p->vdqcr_owned = NULL;
12751 + FQLOCK(fq);
12752 + fq_clear(fq, QMAN_FQ_STATE_VDQCR);
12753 + FQUNLOCK(fq);
12754 + wake_up(&affine_queue);
12755 +}
12756 +
12757 +/* Copy a DQRR entry ensuring reads reach QBMan in order */
12758 +static inline void safe_copy_dqrr(struct qm_dqrr_entry *dst,
12759 + const struct qm_dqrr_entry *src)
12760 +{
12761 + int i = 0;
12762 + const u64 *s64 = (u64*)src;
12763 + u64 *d64 = (u64*)dst;
12764 +
12765 + /* DQRR only has 32 bytes of valid data so only need to
12766 + * copy 4 - 64 bit values */
12767 + *d64 = *s64;
12768 +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
12769 + {
12770 + u32 res, zero = 0;
12771 + /* Create a dependancy after copying first bytes ensures no wrap
12772 + transaction generated to QBMan */
12773 + /* Logical AND the value pointed to by s64 with 0x0 and
12774 + store the result in res */
12775 + asm volatile("and %[result], %[in1], %[in2]"
12776 + : [result] "=r" (res)
12777 + : [in1] "r" (zero), [in2] "r" (*s64)
12778 + : "memory");
12779 + /* Add res to s64 - this creates a dependancy on the result of
12780 + reading the value of s64 before the next read. The side
12781 + effect of this is that the core must stall until the first
12782 + aligned read is complete therefore preventing a WRAP
12783 + transaction to be seen by the QBMan */
12784 + asm volatile("add %[result], %[in1], %[in2]"
12785 + : [result] "=r" (s64)
12786 + : [in1] "r" (res), [in2] "r" (s64)
12787 + : "memory");
12788 + }
12789 +#endif
12790 + /* Copy the last 3 64 bit parts */
12791 + d64++; s64++;
12792 + for (;i<3; i++)
12793 + *d64++ = *s64++;
12794 +}
12795 +
12796 +/* Look: no locks, no irq_save()s, no preempt_disable()s! :-) The only states
12797 + * that would conflict with other things if they ran at the same time on the
12798 + * same cpu are;
12799 + *
12800 + * (i) setting/clearing vdqcr_owned, and
12801 + * (ii) clearing the NE (Not Empty) flag.
12802 + *
12803 + * Both are safe. Because;
12804 + *
12805 + * (i) this clearing can only occur after qman_volatile_dequeue() has set the
12806 + * vdqcr_owned field (which it does before setting VDQCR), and
12807 + * qman_volatile_dequeue() blocks interrupts and preemption while this is
12808 + * done so that we can't interfere.
12809 + * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
12810 + * with (i) that API prevents us from interfering until it's safe.
12811 + *
12812 + * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far
12813 + * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
12814 + * advantage comes from this function not having to "lock" anything at all.
12815 + *
12816 + * Note also that the callbacks are invoked at points which are safe against the
12817 + * above potential conflicts, but that this function itself is not re-entrant
12818 + * (this is because the function tracks one end of each FIFO in the portal and
12819 + * we do *not* want to lock that). So the consequence is that it is safe for
12820 + * user callbacks to call into any Qman API *except* qman_poll() (as that's the
12821 + * sole API that could be invoking the callback through this function).
12822 + */
12823 +static inline unsigned int __poll_portal_fast(struct qman_portal *p,
12824 + unsigned int poll_limit)
12825 +{
12826 + const struct qm_dqrr_entry *dq;
12827 + struct qman_fq *fq;
12828 + enum qman_cb_dqrr_result res;
12829 + unsigned int limit = 0;
12830 +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
12831 + struct qm_dqrr_entry *shadow;
12832 + const struct qm_dqrr_entry *orig_dq;
12833 +#endif
12834 +loop:
12835 + qm_dqrr_pvb_update(&p->p);
12836 + dq = qm_dqrr_current(&p->p);
12837 + if (!dq)
12838 + goto done;
12839 +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
12840 + /* If running on an LE system the fields of the
12841 + dequeue entry must be swapped. Because the
12842 + QMan HW will ignore writes the DQRR entry is
12843 + copied and the index stored within the copy */
12844 + shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
12845 + /* Use safe copy here to avoid WRAP transaction */
12846 + safe_copy_dqrr(shadow, dq);
12847 + orig_dq = dq;
12848 + dq = shadow;
12849 + shadow->fqid = be32_to_cpu(shadow->fqid);
12850 + shadow->contextB = be32_to_cpu(shadow->contextB);
12851 + shadow->seqnum = be16_to_cpu(shadow->seqnum);
12852 + hw_fd_to_cpu(&shadow->fd);
12853 +#endif
12854 + if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
12855 + /* VDQCR: don't trust contextB as the FQ may have been
12856 + * configured for h/w consumption and we're draining it
12857 + * post-retirement. */
12858 + fq = p->vdqcr_owned;
12859 + /* We only set QMAN_FQ_STATE_NE when retiring, so we only need
12860 + * to check for clearing it when doing volatile dequeues. It's
12861 + * one less thing to check in the critical path (SDQCR). */
12862 + if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
12863 + fq_clear(fq, QMAN_FQ_STATE_NE);
12864 + /* this is duplicated from the SDQCR code, but we have stuff to
12865 + * do before *and* after this callback, and we don't want
12866 + * multiple if()s in the critical path (SDQCR). */
12867 + res = fq->cb.dqrr(p, fq, dq);
12868 + if (res == qman_cb_dqrr_stop)
12869 + goto done;
12870 + /* Check for VDQCR completion */
12871 + if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
12872 + clear_vdqcr(p, fq);
12873 + } else {
12874 + /* SDQCR: contextB points to the FQ */
12875 +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
12876 + fq = get_fq_table_entry(dq->contextB);
12877 +#else
12878 + fq = (void *)(uintptr_t)dq->contextB;
12879 +#endif
12880 + /* Now let the callback do its stuff */
12881 + res = fq->cb.dqrr(p, fq, dq);
12882 +
12883 + /* The callback can request that we exit without consuming this
12884 + * entry nor advancing; */
12885 + if (res == qman_cb_dqrr_stop)
12886 + goto done;
12887 + }
12888 + /* Interpret 'dq' from a driver perspective. */
12889 + /* Parking isn't possible unless HELDACTIVE was set. NB,
12890 + * FORCEELIGIBLE implies HELDACTIVE, so we only need to
12891 + * check for HELDACTIVE to cover both. */
12892 + DPA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
12893 + (res != qman_cb_dqrr_park));
12894 +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
12895 + if (res != qman_cb_dqrr_defer)
12896 + qm_dqrr_cdc_consume_1ptr(&p->p, orig_dq,
12897 + (res == qman_cb_dqrr_park));
12898 +#else
12899 + /* Defer just means "skip it, I'll consume it myself later on" */
12900 + if (res != qman_cb_dqrr_defer)
12901 + qm_dqrr_cdc_consume_1ptr(&p->p, dq, (res == qman_cb_dqrr_park));
12902 +#endif
12903 + /* Move forward */
12904 + qm_dqrr_next(&p->p);
12905 + /* Entry processed and consumed, increment our counter. The callback can
12906 + * request that we exit after consuming the entry, and we also exit if
12907 + * we reach our processing limit, so loop back only if neither of these
12908 + * conditions is met. */
12909 + if ((++limit < poll_limit) && (res != qman_cb_dqrr_consume_stop))
12910 + goto loop;
12911 +done:
12912 + return limit;
12913 +}
12914 +
12915 +u32 qman_irqsource_get(void)
12916 +{
12917 + /* "irqsource" and "poll" APIs mustn't redirect when sharing, they
12918 + * should shut the user out if they are not the primary CPU hosting the
12919 + * portal. That's why we use the "raw" interface. */
12920 + struct qman_portal *p = get_raw_affine_portal();
12921 + u32 ret = p->irq_sources & QM_PIRQ_VISIBLE;
12922 + put_affine_portal();
12923 + return ret;
12924 +}
12925 +EXPORT_SYMBOL(qman_irqsource_get);
12926 +
12927 +int qman_p_irqsource_add(struct qman_portal *p, u32 bits __maybe_unused)
12928 +{
12929 + __maybe_unused unsigned long irqflags;
12930 +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
12931 + if (p->sharing_redirect)
12932 + return -EINVAL;
12933 + else
12934 +#endif
12935 + {
12936 + bits = bits & QM_PIRQ_VISIBLE;
12937 + PORTAL_IRQ_LOCK(p, irqflags);
12938 +
12939 + /* Clear any previously remaining interrupt conditions in
12940 + * QCSP_ISR. This prevents raising a false interrupt when
12941 + * interrupt conditions are enabled in QCSP_IER.
12942 + */
12943 + qm_isr_status_clear(&p->p, bits);
12944 + set_bits(bits, &p->irq_sources);
12945 + qm_isr_enable_write(&p->p, p->irq_sources);
12946 + PORTAL_IRQ_UNLOCK(p, irqflags);
12947 + }
12948 + return 0;
12949 +}
12950 +EXPORT_SYMBOL(qman_p_irqsource_add);
12951 +
12952 +int qman_irqsource_add(u32 bits __maybe_unused)
12953 +{
12954 + struct qman_portal *p = get_raw_affine_portal();
12955 + int ret;
12956 + ret = qman_p_irqsource_add(p, bits);
12957 + put_affine_portal();
12958 + return ret;
12959 +}
12960 +EXPORT_SYMBOL(qman_irqsource_add);
12961 +
12962 +int qman_p_irqsource_remove(struct qman_portal *p, u32 bits)
12963 +{
12964 + __maybe_unused unsigned long irqflags;
12965 + u32 ier;
12966 +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
12967 + if (p->sharing_redirect) {
12968 + put_affine_portal();
12969 + return -EINVAL;
12970 + }
12971 +#endif
12972 + /* Our interrupt handler only processes+clears status register bits that
12973 + * are in p->irq_sources. As we're trimming that mask, if one of them
12974 + * were to assert in the status register just before we remove it from
12975 + * the enable register, there would be an interrupt-storm when we
12976 + * release the IRQ lock. So we wait for the enable register update to
12977 + * take effect in h/w (by reading it back) and then clear all other bits
12978 + * in the status register. Ie. we clear them from ISR once it's certain
12979 + * IER won't allow them to reassert. */
12980 + PORTAL_IRQ_LOCK(p, irqflags);
12981 + bits &= QM_PIRQ_VISIBLE;
12982 + clear_bits(bits, &p->irq_sources);
12983 + qm_isr_enable_write(&p->p, p->irq_sources);
12984 +
12985 + ier = qm_isr_enable_read(&p->p);
12986 + /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
12987 + * data-dependency, ie. to protect against re-ordering. */
12988 + qm_isr_status_clear(&p->p, ~ier);
12989 + PORTAL_IRQ_UNLOCK(p, irqflags);
12990 + return 0;
12991 +}
12992 +EXPORT_SYMBOL(qman_p_irqsource_remove);
12993 +
12994 +int qman_irqsource_remove(u32 bits)
12995 +{
12996 + struct qman_portal *p = get_raw_affine_portal();
12997 + int ret;
12998 + ret = qman_p_irqsource_remove(p, bits);
12999 + put_affine_portal();
13000 + return ret;
13001 +}
13002 +EXPORT_SYMBOL(qman_irqsource_remove);
13003 +
13004 +const cpumask_t *qman_affine_cpus(void)
13005 +{
13006 + return &affine_mask;
13007 +}
13008 +EXPORT_SYMBOL(qman_affine_cpus);
13009 +
13010 +u16 qman_affine_channel(int cpu)
13011 +{
13012 + if (cpu < 0) {
13013 + struct qman_portal *portal = get_raw_affine_portal();
13014 +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
13015 + BUG_ON(portal->sharing_redirect);
13016 +#endif
13017 + cpu = portal->config->public_cfg.cpu;
13018 + put_affine_portal();
13019 + }
13020 + BUG_ON(!cpumask_test_cpu(cpu, &affine_mask));
13021 + return affine_channels[cpu];
13022 +}
13023 +EXPORT_SYMBOL(qman_affine_channel);
13024 +
13025 +void *qman_get_affine_portal(int cpu)
13026 +{
13027 + return affine_portals[cpu];
13028 +}
13029 +EXPORT_SYMBOL(qman_get_affine_portal);
13030 +
13031 +int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
13032 +{
13033 + int ret;
13034 +
13035 +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
13036 + if (unlikely(p->sharing_redirect))
13037 + ret = -EINVAL;
13038 + else
13039 +#endif
13040 + {
13041 + BUG_ON(p->irq_sources & QM_PIRQ_DQRI);
13042 + ret = __poll_portal_fast(p, limit);
13043 + }
13044 + return ret;
13045 +}
13046 +EXPORT_SYMBOL(qman_p_poll_dqrr);
13047 +
13048 +int qman_poll_dqrr(unsigned int limit)
13049 +{
13050 + struct qman_portal *p = get_poll_portal();
13051 + int ret;
13052 + ret = qman_p_poll_dqrr(p, limit);
13053 + put_poll_portal();
13054 + return ret;
13055 +}
13056 +EXPORT_SYMBOL(qman_poll_dqrr);
13057 +
13058 +u32 qman_p_poll_slow(struct qman_portal *p)
13059 +{
13060 + u32 ret;
13061 +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
13062 + if (unlikely(p->sharing_redirect))
13063 + ret = (u32)-1;
13064 + else
13065 +#endif
13066 + {
13067 + u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources;
13068 + ret = __poll_portal_slow(p, is);
13069 + qm_isr_status_clear(&p->p, ret);
13070 + }
13071 + return ret;
13072 +}
13073 +EXPORT_SYMBOL(qman_p_poll_slow);
13074 +
13075 +u32 qman_poll_slow(void)
13076 +{
13077 + struct qman_portal *p = get_poll_portal();
13078 + u32 ret;
13079 + ret = qman_p_poll_slow(p);
13080 + put_poll_portal();
13081 + return ret;
13082 +}
13083 +EXPORT_SYMBOL(qman_poll_slow);
13084 +
13085 +/* Legacy wrapper */
13086 +void qman_p_poll(struct qman_portal *p)
13087 +{
13088 +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
13089 + if (unlikely(p->sharing_redirect))
13090 + return;
13091 +#endif
13092 + if ((~p->irq_sources) & QM_PIRQ_SLOW) {
13093 + if (!(p->slowpoll--)) {
13094 + u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources;
13095 + u32 active = __poll_portal_slow(p, is);
13096 + if (active) {
13097 + qm_isr_status_clear(&p->p, active);
13098 + p->slowpoll = SLOW_POLL_BUSY;
13099 + } else
13100 + p->slowpoll = SLOW_POLL_IDLE;
13101 + }
13102 + }
13103 + if ((~p->irq_sources) & QM_PIRQ_DQRI)
13104 + __poll_portal_fast(p, CONFIG_FSL_QMAN_POLL_LIMIT);
13105 +}
13106 +EXPORT_SYMBOL(qman_p_poll);
13107 +
13108 +void qman_poll(void)
13109 +{
13110 + struct qman_portal *p = get_poll_portal();
13111 + qman_p_poll(p);
13112 + put_poll_portal();
13113 +}
13114 +EXPORT_SYMBOL(qman_poll);
13115 +
13116 +void qman_p_stop_dequeues(struct qman_portal *p)
13117 +{
13118 + qman_stop_dequeues_ex(p);
13119 +}
13120 +EXPORT_SYMBOL(qman_p_stop_dequeues);
13121 +
13122 +void qman_stop_dequeues(void)
13123 +{
13124 + struct qman_portal *p = get_affine_portal();
13125 + qman_p_stop_dequeues(p);
13126 + put_affine_portal();
13127 +}
13128 +EXPORT_SYMBOL(qman_stop_dequeues);
13129 +
13130 +void qman_p_start_dequeues(struct qman_portal *p)
13131 +{
13132 + unsigned long irqflags __maybe_unused;
13133 + PORTAL_IRQ_LOCK(p, irqflags);
13134 + DPA_ASSERT(p->dqrr_disable_ref > 0);
13135 + if (!(--p->dqrr_disable_ref))
13136 + qm_dqrr_set_maxfill(&p->p, DQRR_MAXFILL);
13137 + PORTAL_IRQ_UNLOCK(p, irqflags);
13138 +}
13139 +EXPORT_SYMBOL(qman_p_start_dequeues);
13140 +
13141 +void qman_start_dequeues(void)
13142 +{
13143 + struct qman_portal *p = get_affine_portal();
13144 + qman_p_start_dequeues(p);
13145 + put_affine_portal();
13146 +}
13147 +EXPORT_SYMBOL(qman_start_dequeues);
13148 +
13149 +void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools)
13150 +{
13151 + unsigned long irqflags __maybe_unused;
13152 + PORTAL_IRQ_LOCK(p, irqflags);
13153 + pools &= p->config->public_cfg.pools;
13154 + p->sdqcr |= pools;
13155 + qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
13156 + PORTAL_IRQ_UNLOCK(p, irqflags);
13157 +}
13158 +EXPORT_SYMBOL(qman_p_static_dequeue_add);
13159 +
13160 +void qman_static_dequeue_add(u32 pools)
13161 +{
13162 + struct qman_portal *p = get_affine_portal();
13163 + qman_p_static_dequeue_add(p, pools);
13164 + put_affine_portal();
13165 +}
13166 +EXPORT_SYMBOL(qman_static_dequeue_add);
13167 +
13168 +void qman_p_static_dequeue_del(struct qman_portal *p, u32 pools)
13169 +{
13170 + unsigned long irqflags __maybe_unused;
13171 + PORTAL_IRQ_LOCK(p, irqflags);
13172 + pools &= p->config->public_cfg.pools;
13173 + p->sdqcr &= ~pools;
13174 + qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
13175 + PORTAL_IRQ_UNLOCK(p, irqflags);
13176 +}
13177 +EXPORT_SYMBOL(qman_p_static_dequeue_del);
13178 +
13179 +void qman_static_dequeue_del(u32 pools)
13180 +{
13181 + struct qman_portal *p = get_affine_portal();
13182 + qman_p_static_dequeue_del(p, pools);
13183 + put_affine_portal();
13184 +}
13185 +EXPORT_SYMBOL(qman_static_dequeue_del);
13186 +
13187 +u32 qman_p_static_dequeue_get(struct qman_portal *p)
13188 +{
13189 + return p->sdqcr;
13190 +}
13191 +EXPORT_SYMBOL(qman_p_static_dequeue_get);
13192 +
13193 +u32 qman_static_dequeue_get(void)
13194 +{
13195 + struct qman_portal *p = get_affine_portal();
13196 + u32 ret = qman_p_static_dequeue_get(p);
13197 + put_affine_portal();
13198 + return ret;
13199 +}
13200 +EXPORT_SYMBOL(qman_static_dequeue_get);
13201 +
13202 +void qman_p_dca(struct qman_portal *p, struct qm_dqrr_entry *dq,
13203 + int park_request)
13204 +{
13205 + qm_dqrr_cdc_consume_1ptr(&p->p, dq, park_request);
13206 +}
13207 +EXPORT_SYMBOL(qman_p_dca);
13208 +
13209 +void qman_dca(struct qm_dqrr_entry *dq, int park_request)
13210 +{
13211 + struct qman_portal *p = get_affine_portal();
13212 + qman_p_dca(p, dq, park_request);
13213 + put_affine_portal();
13214 +}
13215 +EXPORT_SYMBOL(qman_dca);
13216 +
13217 +/*******************/
13218 +/* Frame queue API */
13219 +/*******************/
13220 +
13221 +static const char *mcr_result_str(u8 result)
13222 +{
13223 + switch (result) {
13224 + case QM_MCR_RESULT_NULL:
13225 + return "QM_MCR_RESULT_NULL";
13226 + case QM_MCR_RESULT_OK:
13227 + return "QM_MCR_RESULT_OK";
13228 + case QM_MCR_RESULT_ERR_FQID:
13229 + return "QM_MCR_RESULT_ERR_FQID";
13230 + case QM_MCR_RESULT_ERR_FQSTATE:
13231 + return "QM_MCR_RESULT_ERR_FQSTATE";
13232 + case QM_MCR_RESULT_ERR_NOTEMPTY:
13233 + return "QM_MCR_RESULT_ERR_NOTEMPTY";
13234 + case QM_MCR_RESULT_PENDING:
13235 + return "QM_MCR_RESULT_PENDING";
13236 + case QM_MCR_RESULT_ERR_BADCOMMAND:
13237 + return "QM_MCR_RESULT_ERR_BADCOMMAND";
13238 + }
13239 + return "<unknown MCR result>";
13240 +}
13241 +
13242 +int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
13243 +{
13244 + struct qm_fqd fqd;
13245 + struct qm_mcr_queryfq_np np;
13246 + struct qm_mc_command *mcc;
13247 + struct qm_mc_result *mcr;
13248 + struct qman_portal *p;
13249 + unsigned long irqflags __maybe_unused;
13250 +
13251 + if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
13252 + int ret = qman_alloc_fqid(&fqid);
13253 + if (ret)
13254 + return ret;
13255 + }
13256 + spin_lock_init(&fq->fqlock);
13257 + fq->fqid = fqid;
13258 + fq->flags = flags;
13259 + fq->state = qman_fq_state_oos;
13260 + fq->cgr_groupid = 0;
13261 +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
13262 + if (unlikely(find_empty_fq_table_entry(&fq->key, fq)))
13263 + return -ENOMEM;
13264 +#endif
13265 + if (!(flags & QMAN_FQ_FLAG_AS_IS) || (flags & QMAN_FQ_FLAG_NO_MODIFY))
13266 + return 0;
13267 + /* Everything else is AS_IS support */
13268 + p = get_affine_portal();
13269 + PORTAL_IRQ_LOCK(p, irqflags);
13270 + mcc = qm_mc_start(&p->p);
13271 + mcc->queryfq.fqid = cpu_to_be32(fqid);
13272 + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
13273 + while (!(mcr = qm_mc_result(&p->p)))
13274 + cpu_relax();
13275 + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ);
13276 + if (mcr->result != QM_MCR_RESULT_OK) {
13277 + pr_err("QUERYFQ failed: %s\n", mcr_result_str(mcr->result));
13278 + goto err;
13279 + }
13280 + fqd = mcr->queryfq.fqd;
13281 + hw_fqd_to_cpu(&fqd);
13282 + mcc = qm_mc_start(&p->p);
13283 + mcc->queryfq_np.fqid = cpu_to_be32(fqid);
13284 + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
13285 + while (!(mcr = qm_mc_result(&p->p)))
13286 + cpu_relax();
13287 + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ_NP);
13288 + if (mcr->result != QM_MCR_RESULT_OK) {
13289 + pr_err("QUERYFQ_NP failed: %s\n", mcr_result_str(mcr->result));
13290 + goto err;
13291 + }
13292 + np = mcr->queryfq_np;
13293 + /* Phew, have queryfq and queryfq_np results, stitch together
13294 + * the FQ object from those. */
13295 + fq->cgr_groupid = fqd.cgid;
13296 + switch (np.state & QM_MCR_NP_STATE_MASK) {
13297 + case QM_MCR_NP_STATE_OOS:
13298 + break;
13299 + case QM_MCR_NP_STATE_RETIRED:
13300 + fq->state = qman_fq_state_retired;
13301 + if (np.frm_cnt)
13302 + fq_set(fq, QMAN_FQ_STATE_NE);
13303 + break;
13304 + case QM_MCR_NP_STATE_TEN_SCHED:
13305 + case QM_MCR_NP_STATE_TRU_SCHED:
13306 + case QM_MCR_NP_STATE_ACTIVE:
13307 + fq->state = qman_fq_state_sched;
13308 + if (np.state & QM_MCR_NP_STATE_R)
13309 + fq_set(fq, QMAN_FQ_STATE_CHANGING);
13310 + break;
13311 + case QM_MCR_NP_STATE_PARKED:
13312 + fq->state = qman_fq_state_parked;
13313 + break;
13314 + default:
13315 + DPA_ASSERT(NULL == "invalid FQ state");
13316 + }
13317 + if (fqd.fq_ctrl & QM_FQCTRL_CGE)
13318 + fq->state |= QMAN_FQ_STATE_CGR_EN;
13319 + PORTAL_IRQ_UNLOCK(p, irqflags);
13320 + put_affine_portal();
13321 + return 0;
13322 +err:
13323 + PORTAL_IRQ_UNLOCK(p, irqflags);
13324 + put_affine_portal();
13325 + if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID)
13326 + qman_release_fqid(fqid);
13327 + return -EIO;
13328 +}
13329 +EXPORT_SYMBOL(qman_create_fq);
13330 +
13331 +void qman_destroy_fq(struct qman_fq *fq, u32 flags __maybe_unused)
13332 +{
13333 +
13334 + /* We don't need to lock the FQ as it is a pre-condition that the FQ be
13335 + * quiesced. Instead, run some checks. */
13336 + switch (fq->state) {
13337 + case qman_fq_state_parked:
13338 + DPA_ASSERT(flags & QMAN_FQ_DESTROY_PARKED);
13339 + case qman_fq_state_oos:
13340 + if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
13341 + qman_release_fqid(fq->fqid);
13342 +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
13343 + clear_fq_table_entry(fq->key);
13344 +#endif
13345 + return;
13346 + default:
13347 + break;
13348 + }
13349 + DPA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
13350 +}
13351 +EXPORT_SYMBOL(qman_destroy_fq);
13352 +
13353 +u32 qman_fq_fqid(struct qman_fq *fq)
13354 +{
13355 + return fq->fqid;
13356 +}
13357 +EXPORT_SYMBOL(qman_fq_fqid);
13358 +
13359 +void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags)
13360 +{
13361 + if (state)
13362 + *state = fq->state;
13363 + if (flags)
13364 + *flags = fq->flags;
13365 +}
13366 +EXPORT_SYMBOL(qman_fq_state);
13367 +
13368 +int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
13369 +{
13370 + struct qm_mc_command *mcc;
13371 + struct qm_mc_result *mcr;
13372 + struct qman_portal *p;
13373 + unsigned long irqflags __maybe_unused;
13374 + u8 res, myverb = (flags & QMAN_INITFQ_FLAG_SCHED) ?
13375 + QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
13376 +
13377 + if ((fq->state != qman_fq_state_oos) &&
13378 + (fq->state != qman_fq_state_parked))
13379 + return -EINVAL;
13380 +#ifdef CONFIG_FSL_DPA_CHECKING
13381 + if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
13382 + return -EINVAL;
13383 +#endif
13384 + if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) {
13385 + /* And can't be set at the same time as TDTHRESH */
13386 + if (opts->we_mask & QM_INITFQ_WE_TDTHRESH)
13387 + return -EINVAL;
13388 + }
13389 + /* Issue an INITFQ_[PARKED|SCHED] management command */
13390 + p = get_affine_portal();
13391 + PORTAL_IRQ_LOCK(p, irqflags);
13392 + FQLOCK(fq);
13393 + if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
13394 + ((fq->state != qman_fq_state_oos) &&
13395 + (fq->state != qman_fq_state_parked)))) {
13396 + FQUNLOCK(fq);
13397 + PORTAL_IRQ_UNLOCK(p, irqflags);
13398 + put_affine_portal();
13399 + return -EBUSY;
13400 + }
13401 + mcc = qm_mc_start(&p->p);
13402 + if (opts)
13403 + mcc->initfq = *opts;
13404 + mcc->initfq.fqid = cpu_to_be32(fq->fqid);
13405 + mcc->initfq.count = 0;
13406 +
13407 + /* If the FQ does *not* have the TO_DCPORTAL flag, contextB is set as a
13408 + * demux pointer. Otherwise, the caller-provided value is allowed to
13409 + * stand, don't overwrite it. */
13410 + if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
13411 + dma_addr_t phys_fq;
13412 + mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB;
13413 +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
13414 + mcc->initfq.fqd.context_b = fq->key;
13415 +#else
13416 + mcc->initfq.fqd.context_b = (u32)(uintptr_t)fq;
13417 +#endif
13418 + /* and the physical address - NB, if the user wasn't trying to
13419 + * set CONTEXTA, clear the stashing settings. */
13420 + if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) {
13421 + mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
13422 + memset(&mcc->initfq.fqd.context_a, 0,
13423 + sizeof(mcc->initfq.fqd.context_a));
13424 + } else {
13425 + phys_fq = dma_map_single(&p->pdev->dev, fq, sizeof(*fq),
13426 + DMA_TO_DEVICE);
13427 + qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
13428 + }
13429 + }
13430 + if (flags & QMAN_INITFQ_FLAG_LOCAL) {
13431 + mcc->initfq.fqd.dest.channel = p->config->public_cfg.channel;
13432 + if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) {
13433 + mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
13434 + mcc->initfq.fqd.dest.wq = 4;
13435 + }
13436 + }
13437 + mcc->initfq.we_mask = cpu_to_be16(mcc->initfq.we_mask);
13438 + cpu_to_hw_fqd(&mcc->initfq.fqd);
13439 + qm_mc_commit(&p->p, myverb);
13440 + while (!(mcr = qm_mc_result(&p->p)))
13441 + cpu_relax();
13442 + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
13443 + res = mcr->result;
13444 + if (res != QM_MCR_RESULT_OK) {
13445 + FQUNLOCK(fq);
13446 + PORTAL_IRQ_UNLOCK(p, irqflags);
13447 + put_affine_portal();
13448 + return -EIO;
13449 + }
13450 + if (opts) {
13451 + if (opts->we_mask & QM_INITFQ_WE_FQCTRL) {
13452 + if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE)
13453 + fq_set(fq, QMAN_FQ_STATE_CGR_EN);
13454 + else
13455 + fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
13456 + }
13457 + if (opts->we_mask & QM_INITFQ_WE_CGID)
13458 + fq->cgr_groupid = opts->fqd.cgid;
13459 + }
13460 + fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
13461 + qman_fq_state_sched : qman_fq_state_parked;
13462 + FQUNLOCK(fq);
13463 + PORTAL_IRQ_UNLOCK(p, irqflags);
13464 + put_affine_portal();
13465 + return 0;
13466 +}
13467 +EXPORT_SYMBOL(qman_init_fq);
13468 +
13469 +int qman_schedule_fq(struct qman_fq *fq)
13470 +{
13471 + struct qm_mc_command *mcc;
13472 + struct qm_mc_result *mcr;
13473 + struct qman_portal *p;
13474 + unsigned long irqflags __maybe_unused;
13475 + int ret = 0;
13476 + u8 res;
13477 +
13478 + if (fq->state != qman_fq_state_parked)
13479 + return -EINVAL;
13480 +#ifdef CONFIG_FSL_DPA_CHECKING
13481 + if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
13482 + return -EINVAL;
13483 +#endif
13484 + /* Issue a ALTERFQ_SCHED management command */
13485 + p = get_affine_portal();
13486 + PORTAL_IRQ_LOCK(p, irqflags);
13487 + FQLOCK(fq);
13488 + if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
13489 + (fq->state != qman_fq_state_parked))) {
13490 + ret = -EBUSY;
13491 + goto out;
13492 + }
13493 + mcc = qm_mc_start(&p->p);
13494 + mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
13495 + qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
13496 + while (!(mcr = qm_mc_result(&p->p)))
13497 + cpu_relax();
13498 + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
13499 + res = mcr->result;
13500 + if (res != QM_MCR_RESULT_OK) {
13501 + ret = -EIO;
13502 + goto out;
13503 + }
13504 + fq->state = qman_fq_state_sched;
13505 +out:
13506 + FQUNLOCK(fq);
13507 + PORTAL_IRQ_UNLOCK(p, irqflags);
13508 + put_affine_portal();
13509 + return ret;
13510 +}
13511 +EXPORT_SYMBOL(qman_schedule_fq);
13512 +
13513 +int qman_retire_fq(struct qman_fq *fq, u32 *flags)
13514 +{
13515 + struct qm_mc_command *mcc;
13516 + struct qm_mc_result *mcr;
13517 + struct qman_portal *p;
13518 + unsigned long irqflags __maybe_unused;
13519 + int rval;
13520 + u8 res;
13521 +
13522 + if ((fq->state != qman_fq_state_parked) &&
13523 + (fq->state != qman_fq_state_sched))
13524 + return -EINVAL;
13525 +#ifdef CONFIG_FSL_DPA_CHECKING
13526 + if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
13527 + return -EINVAL;
13528 +#endif
13529 + p = get_affine_portal();
13530 + PORTAL_IRQ_LOCK(p, irqflags);
13531 + FQLOCK(fq);
13532 + if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
13533 + (fq->state == qman_fq_state_retired) ||
13534 + (fq->state == qman_fq_state_oos))) {
13535 + rval = -EBUSY;
13536 + goto out;
13537 + }
13538 + rval = table_push_fq(p, fq);
13539 + if (rval)
13540 + goto out;
13541 + mcc = qm_mc_start(&p->p);
13542 + mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
13543 + qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
13544 + while (!(mcr = qm_mc_result(&p->p)))
13545 + cpu_relax();
13546 + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
13547 + res = mcr->result;
13548 + /* "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
13549 + * and defer the flags until FQRNI or FQRN (respectively) show up. But
13550 + * "Friendly" is to process OK immediately, and not set CHANGING. We do
13551 + * friendly, otherwise the caller doesn't necessarily have a fully
13552 + * "retired" FQ on return even if the retirement was immediate. However
13553 + * this does mean some code duplication between here and
13554 + * fq_state_change(). */
13555 + if (likely(res == QM_MCR_RESULT_OK)) {
13556 + rval = 0;
13557 + /* Process 'fq' right away, we'll ignore FQRNI */
13558 + if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
13559 + fq_set(fq, QMAN_FQ_STATE_NE);
13560 + if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
13561 + fq_set(fq, QMAN_FQ_STATE_ORL);
13562 + else
13563 + table_del_fq(p, fq);
13564 + if (flags)
13565 + *flags = fq->flags;
13566 + fq->state = qman_fq_state_retired;
13567 + if (fq->cb.fqs) {
13568 + /* Another issue with supporting "immediate" retirement
13569 + * is that we're forced to drop FQRNIs, because by the
13570 + * time they're seen it may already be "too late" (the
13571 + * fq may have been OOS'd and free()'d already). But if
13572 + * the upper layer wants a callback whether it's
13573 + * immediate or not, we have to fake a "MR" entry to
13574 + * look like an FQRNI... */
13575 + struct qm_mr_entry msg;
13576 + msg.verb = QM_MR_VERB_FQRNI;
13577 + msg.fq.fqs = mcr->alterfq.fqs;
13578 + msg.fq.fqid = fq->fqid;
13579 +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
13580 + msg.fq.contextB = fq->key;
13581 +#else
13582 + msg.fq.contextB = (u32)(uintptr_t)fq;
13583 +#endif
13584 + fq->cb.fqs(p, fq, &msg);
13585 + }
13586 + } else if (res == QM_MCR_RESULT_PENDING) {
13587 + rval = 1;
13588 + fq_set(fq, QMAN_FQ_STATE_CHANGING);
13589 + } else {
13590 + rval = -EIO;
13591 + table_del_fq(p, fq);
13592 + }
13593 +out:
13594 + FQUNLOCK(fq);
13595 + PORTAL_IRQ_UNLOCK(p, irqflags);
13596 + put_affine_portal();
13597 + return rval;
13598 +}
13599 +EXPORT_SYMBOL(qman_retire_fq);
13600 +
13601 +int qman_oos_fq(struct qman_fq *fq)
13602 +{
13603 + struct qm_mc_command *mcc;
13604 + struct qm_mc_result *mcr;
13605 + struct qman_portal *p;
13606 + unsigned long irqflags __maybe_unused;
13607 + int ret = 0;
13608 + u8 res;
13609 +
13610 + if (fq->state != qman_fq_state_retired)
13611 + return -EINVAL;
13612 +#ifdef CONFIG_FSL_DPA_CHECKING
13613 + if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
13614 + return -EINVAL;
13615 +#endif
13616 + p = get_affine_portal();
13617 + PORTAL_IRQ_LOCK(p, irqflags);
13618 + FQLOCK(fq);
13619 + if (unlikely((fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS)) ||
13620 + (fq->state != qman_fq_state_retired))) {
13621 + ret = -EBUSY;
13622 + goto out;
13623 + }
13624 + mcc = qm_mc_start(&p->p);
13625 + mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
13626 + qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
13627 + while (!(mcr = qm_mc_result(&p->p)))
13628 + cpu_relax();
13629 + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
13630 + res = mcr->result;
13631 + if (res != QM_MCR_RESULT_OK) {
13632 + ret = -EIO;
13633 + goto out;
13634 + }
13635 + fq->state = qman_fq_state_oos;
13636 +out:
13637 + FQUNLOCK(fq);
13638 + PORTAL_IRQ_UNLOCK(p, irqflags);
13639 + put_affine_portal();
13640 + return ret;
13641 +}
13642 +EXPORT_SYMBOL(qman_oos_fq);
13643 +
13644 +int qman_fq_flow_control(struct qman_fq *fq, int xon)
13645 +{
13646 + struct qm_mc_command *mcc;
13647 + struct qm_mc_result *mcr;
13648 + struct qman_portal *p;
13649 + unsigned long irqflags __maybe_unused;
13650 + int ret = 0;
13651 + u8 res;
13652 + u8 myverb;
13653 +
13654 + if ((fq->state == qman_fq_state_oos) ||
13655 + (fq->state == qman_fq_state_retired) ||
13656 + (fq->state == qman_fq_state_parked))
13657 + return -EINVAL;
13658 +
13659 +#ifdef CONFIG_FSL_DPA_CHECKING
13660 + if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
13661 + return -EINVAL;
13662 +#endif
13663 + /* Issue a ALTER_FQXON or ALTER_FQXOFF management command */
13664 + p = get_affine_portal();
13665 + PORTAL_IRQ_LOCK(p, irqflags);
13666 + FQLOCK(fq);
13667 + if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
13668 + (fq->state == qman_fq_state_parked) ||
13669 + (fq->state == qman_fq_state_oos) ||
13670 + (fq->state == qman_fq_state_retired))) {
13671 + ret = -EBUSY;
13672 + goto out;
13673 + }
13674 + mcc = qm_mc_start(&p->p);
13675 + mcc->alterfq.fqid = fq->fqid;
13676 + mcc->alterfq.count = 0;
13677 + myverb = xon ? QM_MCC_VERB_ALTER_FQXON : QM_MCC_VERB_ALTER_FQXOFF;
13678 +
13679 + qm_mc_commit(&p->p, myverb);
13680 + while (!(mcr = qm_mc_result(&p->p)))
13681 + cpu_relax();
13682 + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
13683 +
13684 + res = mcr->result;
13685 + if (res != QM_MCR_RESULT_OK) {
13686 + ret = -EIO;
13687 + goto out;
13688 + }
13689 +out:
13690 + FQUNLOCK(fq);
13691 + PORTAL_IRQ_UNLOCK(p, irqflags);
13692 + put_affine_portal();
13693 + return ret;
13694 +}
13695 +EXPORT_SYMBOL(qman_fq_flow_control);
13696 +
13697 +int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
13698 +{
13699 + struct qm_mc_command *mcc;
13700 + struct qm_mc_result *mcr;
13701 + struct qman_portal *p = get_affine_portal();
13702 + unsigned long irqflags __maybe_unused;
13703 + u8 res;
13704 +
13705 + PORTAL_IRQ_LOCK(p, irqflags);
13706 + mcc = qm_mc_start(&p->p);
13707 + mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
13708 + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
13709 + while (!(mcr = qm_mc_result(&p->p)))
13710 + cpu_relax();
13711 + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
13712 + res = mcr->result;
13713 + if (res == QM_MCR_RESULT_OK)
13714 + *fqd = mcr->queryfq.fqd;
13715 + hw_fqd_to_cpu(fqd);
13716 + PORTAL_IRQ_UNLOCK(p, irqflags);
13717 + put_affine_portal();
13718 + if (res != QM_MCR_RESULT_OK)
13719 + return -EIO;
13720 + return 0;
13721 +}
13722 +EXPORT_SYMBOL(qman_query_fq);
13723 +
13724 +int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
13725 +{
13726 + struct qm_mc_command *mcc;
13727 + struct qm_mc_result *mcr;
13728 + struct qman_portal *p = get_affine_portal();
13729 + unsigned long irqflags __maybe_unused;
13730 + u8 res;
13731 +
13732 + PORTAL_IRQ_LOCK(p, irqflags);
13733 + mcc = qm_mc_start(&p->p);
13734 + mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
13735 + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
13736 + while (!(mcr = qm_mc_result(&p->p)))
13737 + cpu_relax();
13738 + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
13739 + res = mcr->result;
13740 + if (res == QM_MCR_RESULT_OK) {
13741 + *np = mcr->queryfq_np;
13742 + np->fqd_link = be24_to_cpu(np->fqd_link);
13743 + np->odp_seq = be16_to_cpu(np->odp_seq);
13744 + np->orp_nesn = be16_to_cpu(np->orp_nesn);
13745 + np->orp_ea_hseq = be16_to_cpu(np->orp_ea_hseq);
13746 + np->orp_ea_tseq = be16_to_cpu(np->orp_ea_tseq);
13747 + np->orp_ea_hptr = be24_to_cpu(np->orp_ea_hptr);
13748 + np->orp_ea_tptr = be24_to_cpu(np->orp_ea_tptr);
13749 + np->pfdr_hptr = be24_to_cpu(np->pfdr_hptr);
13750 + np->pfdr_tptr = be24_to_cpu(np->pfdr_tptr);
13751 + np->ics_surp = be16_to_cpu(np->ics_surp);
13752 + np->byte_cnt = be32_to_cpu(np->byte_cnt);
13753 + np->frm_cnt = be24_to_cpu(np->frm_cnt);
13754 + np->ra1_sfdr = be16_to_cpu(np->ra1_sfdr);
13755 + np->ra2_sfdr = be16_to_cpu(np->ra2_sfdr);
13756 + np->od1_sfdr = be16_to_cpu(np->od1_sfdr);
13757 + np->od2_sfdr = be16_to_cpu(np->od2_sfdr);
13758 + np->od3_sfdr = be16_to_cpu(np->od3_sfdr);
13759 + }
13760 + PORTAL_IRQ_UNLOCK(p, irqflags);
13761 + put_affine_portal();
13762 + if (res == QM_MCR_RESULT_ERR_FQID)
13763 + return -ERANGE;
13764 + else if (res != QM_MCR_RESULT_OK)
13765 + return -EIO;
13766 + return 0;
13767 +}
13768 +EXPORT_SYMBOL(qman_query_fq_np);
13769 +
13770 +int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq)
13771 +{
13772 + struct qm_mc_command *mcc;
13773 + struct qm_mc_result *mcr;
13774 + struct qman_portal *p = get_affine_portal();
13775 + unsigned long irqflags __maybe_unused;
13776 + u8 res, myverb;
13777 +
13778 + PORTAL_IRQ_LOCK(p, irqflags);
13779 + myverb = (query_dedicated) ? QM_MCR_VERB_QUERYWQ_DEDICATED :
13780 + QM_MCR_VERB_QUERYWQ;
13781 + mcc = qm_mc_start(&p->p);
13782 + mcc->querywq.channel.id = cpu_to_be16(wq->channel.id);
13783 + qm_mc_commit(&p->p, myverb);
13784 + while (!(mcr = qm_mc_result(&p->p)))
13785 + cpu_relax();
13786 + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
13787 + res = mcr->result;
13788 + if (res == QM_MCR_RESULT_OK) {
13789 + int i, array_len;
13790 + wq->channel.id = be16_to_cpu(mcr->querywq.channel.id);
13791 + array_len = ARRAY_SIZE(mcr->querywq.wq_len);
13792 + for (i = 0; i < array_len; i++)
13793 + wq->wq_len[i] = be32_to_cpu(mcr->querywq.wq_len[i]);
13794 + }
13795 + PORTAL_IRQ_UNLOCK(p, irqflags);
13796 + put_affine_portal();
13797 + if (res != QM_MCR_RESULT_OK) {
13798 + pr_err("QUERYWQ failed: %s\n", mcr_result_str(res));
13799 + return -EIO;
13800 + }
13801 + return 0;
13802 +}
13803 +EXPORT_SYMBOL(qman_query_wq);
13804 +
13805 +int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,
13806 + struct qm_mcr_cgrtestwrite *result)
13807 +{
13808 + struct qm_mc_command *mcc;
13809 + struct qm_mc_result *mcr;
13810 + struct qman_portal *p = get_affine_portal();
13811 + unsigned long irqflags __maybe_unused;
13812 + u8 res;
13813 +
13814 + PORTAL_IRQ_LOCK(p, irqflags);
13815 + mcc = qm_mc_start(&p->p);
13816 + mcc->cgrtestwrite.cgid = cgr->cgrid;
13817 + mcc->cgrtestwrite.i_bcnt_hi = (u8)(i_bcnt >> 32);
13818 + mcc->cgrtestwrite.i_bcnt_lo = (u32)i_bcnt;
13819 + qm_mc_commit(&p->p, QM_MCC_VERB_CGRTESTWRITE);
13820 + while (!(mcr = qm_mc_result(&p->p)))
13821 + cpu_relax();
13822 + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_CGRTESTWRITE);
13823 + res = mcr->result;
13824 + if (res == QM_MCR_RESULT_OK)
13825 + *result = mcr->cgrtestwrite;
13826 + PORTAL_IRQ_UNLOCK(p, irqflags);
13827 + put_affine_portal();
13828 + if (res != QM_MCR_RESULT_OK) {
13829 + pr_err("CGR TEST WRITE failed: %s\n", mcr_result_str(res));
13830 + return -EIO;
13831 + }
13832 + return 0;
13833 +}
13834 +EXPORT_SYMBOL(qman_testwrite_cgr);
13835 +
13836 +int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *cgrd)
13837 +{
13838 + struct qm_mc_command *mcc;
13839 + struct qm_mc_result *mcr;
13840 + struct qman_portal *p = get_affine_portal();
13841 + unsigned long irqflags __maybe_unused;
13842 + u8 res;
13843 + int i;
13844 +
13845 + PORTAL_IRQ_LOCK(p, irqflags);
13846 + mcc = qm_mc_start(&p->p);
13847 + mcc->querycgr.cgid = cgr->cgrid;
13848 + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
13849 + while (!(mcr = qm_mc_result(&p->p)))
13850 + cpu_relax();
13851 + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
13852 + res = mcr->result;
13853 + if (res == QM_MCR_RESULT_OK)
13854 + *cgrd = mcr->querycgr;
13855 + PORTAL_IRQ_UNLOCK(p, irqflags);
13856 + put_affine_portal();
13857 + if (res != QM_MCR_RESULT_OK) {
13858 + pr_err("QUERY_CGR failed: %s\n", mcr_result_str(res));
13859 + return -EIO;
13860 + }
13861 + cgrd->cgr.wr_parm_g.word =
13862 + be32_to_cpu(cgrd->cgr.wr_parm_g.word);
13863 + cgrd->cgr.wr_parm_y.word =
13864 + be32_to_cpu(cgrd->cgr.wr_parm_y.word);
13865 + cgrd->cgr.wr_parm_r.word =
13866 + be32_to_cpu(cgrd->cgr.wr_parm_r.word);
13867 + cgrd->cgr.cscn_targ = be32_to_cpu(cgrd->cgr.cscn_targ);
13868 + cgrd->cgr.__cs_thres = be16_to_cpu(cgrd->cgr.__cs_thres);
13869 + for (i = 0; i < ARRAY_SIZE(cgrd->cscn_targ_swp); i++)
13870 + be32_to_cpus(&cgrd->cscn_targ_swp[i]);
13871 + return 0;
13872 +}
13873 +EXPORT_SYMBOL(qman_query_cgr);
13874 +
13875 +int qman_query_congestion(struct qm_mcr_querycongestion *congestion)
13876 +{
13877 + struct qm_mc_result *mcr;
13878 + struct qman_portal *p = get_affine_portal();
13879 + unsigned long irqflags __maybe_unused;
13880 + u8 res;
13881 + int i;
13882 +
13883 + PORTAL_IRQ_LOCK(p, irqflags);
13884 + qm_mc_start(&p->p);
13885 + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
13886 + while (!(mcr = qm_mc_result(&p->p)))
13887 + cpu_relax();
13888 + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
13889 + QM_MCC_VERB_QUERYCONGESTION);
13890 + res = mcr->result;
13891 + if (res == QM_MCR_RESULT_OK)
13892 + memcpy_fromio(congestion, &mcr->querycongestion,
13893 + sizeof(*congestion));
13894 + PORTAL_IRQ_UNLOCK(p, irqflags);
13895 + put_affine_portal();
13896 + if (res != QM_MCR_RESULT_OK) {
13897 + pr_err("QUERY_CONGESTION failed: %s\n", mcr_result_str(res));
13898 + return -EIO;
13899 + }
13900 +
13901 + for (i = 0; i < ARRAY_SIZE(congestion->state.__state); i++)
13902 + be32_to_cpus(&congestion->state.__state[i]);
13903 + return 0;
13904 +}
13905 +EXPORT_SYMBOL(qman_query_congestion);
13906 +
13907 +/* internal function used as a wait_event() expression */
13908 +static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr)
13909 +{
13910 + unsigned long irqflags __maybe_unused;
13911 + int ret = -EBUSY;
13912 + PORTAL_IRQ_LOCK(p, irqflags);
13913 + if (!p->vdqcr_owned) {
13914 + FQLOCK(fq);
13915 + if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
13916 + goto escape;
13917 + fq_set(fq, QMAN_FQ_STATE_VDQCR);
13918 + FQUNLOCK(fq);
13919 + p->vdqcr_owned = fq;
13920 + ret = 0;
13921 + }
13922 +escape:
13923 + PORTAL_IRQ_UNLOCK(p, irqflags);
13924 + if (!ret)
13925 + qm_dqrr_vdqcr_set(&p->p, vdqcr);
13926 + return ret;
13927 +}
13928 +
13929 +static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr)
13930 +{
13931 + int ret;
13932 + *p = get_affine_portal();
13933 + ret = set_p_vdqcr(*p, fq, vdqcr);
13934 + put_affine_portal();
13935 + return ret;
13936 +}
13937 +
13938 +#ifdef CONFIG_FSL_DPA_CAN_WAIT
13939 +static int wait_p_vdqcr_start(struct qman_portal *p, struct qman_fq *fq,
13940 + u32 vdqcr, u32 flags)
13941 +{
13942 + int ret = 0;
13943 + if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
13944 + ret = wait_event_interruptible(affine_queue,
13945 + !(ret = set_p_vdqcr(p, fq, vdqcr)));
13946 + else
13947 + wait_event(affine_queue, !(ret = set_p_vdqcr(p, fq, vdqcr)));
13948 + return ret;
13949 +}
13950 +
13951 +static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq,
13952 + u32 vdqcr, u32 flags)
13953 +{
13954 + int ret = 0;
13955 + if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
13956 + ret = wait_event_interruptible(affine_queue,
13957 + !(ret = set_vdqcr(p, fq, vdqcr)));
13958 + else
13959 + wait_event(affine_queue, !(ret = set_vdqcr(p, fq, vdqcr)));
13960 + return ret;
13961 +}
13962 +#endif
13963 +
13964 +int qman_p_volatile_dequeue(struct qman_portal *p, struct qman_fq *fq,
13965 + u32 flags __maybe_unused, u32 vdqcr)
13966 +{
13967 + int ret;
13968 +
13969 + if ((fq->state != qman_fq_state_parked) &&
13970 + (fq->state != qman_fq_state_retired))
13971 + return -EINVAL;
13972 + if (vdqcr & QM_VDQCR_FQID_MASK)
13973 + return -EINVAL;
13974 + if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
13975 + return -EBUSY;
13976 + vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
13977 +#ifdef CONFIG_FSL_DPA_CAN_WAIT
13978 + if (flags & QMAN_VOLATILE_FLAG_WAIT)
13979 + ret = wait_p_vdqcr_start(p, fq, vdqcr, flags);
13980 + else
13981 +#endif
13982 + ret = set_p_vdqcr(p, fq, vdqcr);
13983 + if (ret)
13984 + return ret;
13985 + /* VDQCR is set */
13986 +#ifdef CONFIG_FSL_DPA_CAN_WAIT
13987 + if (flags & QMAN_VOLATILE_FLAG_FINISH) {
13988 + if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
13989 + /* NB: don't propagate any error - the caller wouldn't
13990 + * know whether the VDQCR was issued or not. A signal
13991 + * could arrive after returning anyway, so the caller
13992 + * can check signal_pending() if that's an issue. */
13993 + wait_event_interruptible(affine_queue,
13994 + !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
13995 + else
13996 + wait_event(affine_queue,
13997 + !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
13998 + }
13999 +#endif
14000 + return 0;
14001 +}
14002 +EXPORT_SYMBOL(qman_p_volatile_dequeue);
14003 +
14004 +int qman_volatile_dequeue(struct qman_fq *fq, u32 flags __maybe_unused,
14005 + u32 vdqcr)
14006 +{
14007 + struct qman_portal *p;
14008 + int ret;
14009 +
14010 + if ((fq->state != qman_fq_state_parked) &&
14011 + (fq->state != qman_fq_state_retired))
14012 + return -EINVAL;
14013 + if (vdqcr & QM_VDQCR_FQID_MASK)
14014 + return -EINVAL;
14015 + if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
14016 + return -EBUSY;
14017 + vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
14018 +#ifdef CONFIG_FSL_DPA_CAN_WAIT
14019 + if (flags & QMAN_VOLATILE_FLAG_WAIT)
14020 + ret = wait_vdqcr_start(&p, fq, vdqcr, flags);
14021 + else
14022 +#endif
14023 + ret = set_vdqcr(&p, fq, vdqcr);
14024 + if (ret)
14025 + return ret;
14026 + /* VDQCR is set */
14027 +#ifdef CONFIG_FSL_DPA_CAN_WAIT
14028 + if (flags & QMAN_VOLATILE_FLAG_FINISH) {
14029 + if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
14030 + /* NB: don't propagate any error - the caller wouldn't
14031 + * know whether the VDQCR was issued or not. A signal
14032 + * could arrive after returning anyway, so the caller
14033 + * can check signal_pending() if that's an issue. */
14034 + wait_event_interruptible(affine_queue,
14035 + !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
14036 + else
14037 + wait_event(affine_queue,
14038 + !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
14039 + }
14040 +#endif
14041 + return 0;
14042 +}
14043 +EXPORT_SYMBOL(qman_volatile_dequeue);
14044 +
14045 +static noinline void update_eqcr_ci(struct qman_portal *p, u8 avail)
14046 +{
14047 + if (avail)
14048 + qm_eqcr_cce_prefetch(&p->p);
14049 + else
14050 + qm_eqcr_cce_update(&p->p);
14051 +}
14052 +
14053 +int qman_eqcr_is_empty(void)
14054 +{
14055 + unsigned long irqflags __maybe_unused;
14056 + struct qman_portal *p = get_affine_portal();
14057 + u8 avail;
14058 +
14059 + PORTAL_IRQ_LOCK(p, irqflags);
14060 + update_eqcr_ci(p, 0);
14061 + avail = qm_eqcr_get_fill(&p->p);
14062 + PORTAL_IRQ_UNLOCK(p, irqflags);
14063 + put_affine_portal();
14064 + return avail == 0;
14065 +}
14066 +EXPORT_SYMBOL(qman_eqcr_is_empty);
14067 +
14068 +void qman_set_dc_ern(qman_cb_dc_ern handler, int affine)
14069 +{
14070 + if (affine) {
14071 + unsigned long irqflags __maybe_unused;
14072 + struct qman_portal *p = get_affine_portal();
14073 + PORTAL_IRQ_LOCK(p, irqflags);
14074 + p->cb_dc_ern = handler;
14075 + PORTAL_IRQ_UNLOCK(p, irqflags);
14076 + put_affine_portal();
14077 + } else
14078 + cb_dc_ern = handler;
14079 +}
14080 +EXPORT_SYMBOL(qman_set_dc_ern);
14081 +
14082 +static inline struct qm_eqcr_entry *try_p_eq_start(struct qman_portal *p,
14083 + unsigned long *irqflags __maybe_unused,
14084 + struct qman_fq *fq,
14085 + const struct qm_fd *fd,
14086 + u32 flags)
14087 +{
14088 + struct qm_eqcr_entry *eq;
14089 + u8 avail;
14090 + PORTAL_IRQ_LOCK(p, (*irqflags));
14091 +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
14092 + if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
14093 + (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
14094 + if (p->eqci_owned) {
14095 + PORTAL_IRQ_UNLOCK(p, (*irqflags));
14096 + return NULL;
14097 + }
14098 + p->eqci_owned = fq;
14099 + }
14100 +#endif
14101 + if (p->use_eqcr_ci_stashing) {
14102 + /*
14103 + * The stashing case is easy, only update if we need to in
14104 + * order to try and liberate ring entries.
14105 + */
14106 + eq = qm_eqcr_start_stash(&p->p);
14107 + } else {
14108 + /*
14109 + * The non-stashing case is harder, need to prefetch ahead of
14110 + * time.
14111 + */
14112 + avail = qm_eqcr_get_avail(&p->p);
14113 + if (avail < 2)
14114 + update_eqcr_ci(p, avail);
14115 + eq = qm_eqcr_start_no_stash(&p->p);
14116 + }
14117 +
14118 + if (unlikely(!eq)) {
14119 +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
14120 + if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
14121 + (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC)))
14122 + p->eqci_owned = NULL;
14123 +#endif
14124 + PORTAL_IRQ_UNLOCK(p, (*irqflags));
14125 + return NULL;
14126 + }
14127 + if (flags & QMAN_ENQUEUE_FLAG_DCA)
14128 + eq->dca = QM_EQCR_DCA_ENABLE |
14129 + ((flags & QMAN_ENQUEUE_FLAG_DCA_PARK) ?
14130 + QM_EQCR_DCA_PARK : 0) |
14131 + ((flags >> 8) & QM_EQCR_DCA_IDXMASK);
14132 + eq->fqid = cpu_to_be32(fq->fqid);
14133 +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
14134 + eq->tag = cpu_to_be32(fq->key);
14135 +#else
14136 + eq->tag = cpu_to_be32((u32)(uintptr_t)fq);
14137 +#endif
14138 + eq->fd = *fd;
14139 + cpu_to_hw_fd(&eq->fd);
14140 + return eq;
14141 +}
14142 +
14143 +static inline struct qm_eqcr_entry *try_eq_start(struct qman_portal **p,
14144 + unsigned long *irqflags __maybe_unused,
14145 + struct qman_fq *fq,
14146 + const struct qm_fd *fd,
14147 + u32 flags)
14148 +{
14149 + struct qm_eqcr_entry *eq;
14150 + *p = get_affine_portal();
14151 + eq = try_p_eq_start(*p, irqflags, fq, fd, flags);
14152 + if (!eq)
14153 + put_affine_portal();
14154 + return eq;
14155 +}
14156 +
14157 +#ifdef CONFIG_FSL_DPA_CAN_WAIT
14158 +static noinline struct qm_eqcr_entry *__wait_eq_start(struct qman_portal **p,
14159 + unsigned long *irqflags __maybe_unused,
14160 + struct qman_fq *fq,
14161 + const struct qm_fd *fd,
14162 + u32 flags)
14163 +{
14164 + struct qm_eqcr_entry *eq = try_eq_start(p, irqflags, fq, fd, flags);
14165 + if (!eq)
14166 + qm_eqcr_set_ithresh(&(*p)->p, EQCR_ITHRESH);
14167 + return eq;
14168 +}
14169 +static noinline struct qm_eqcr_entry *wait_eq_start(struct qman_portal **p,
14170 + unsigned long *irqflags __maybe_unused,
14171 + struct qman_fq *fq,
14172 + const struct qm_fd *fd,
14173 + u32 flags)
14174 +{
14175 + struct qm_eqcr_entry *eq;
14176 + if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
14177 + /* NB: return NULL if signal occurs before completion. Signal
14178 + * can occur during return. Caller must check for signal */
14179 + wait_event_interruptible(affine_queue,
14180 + (eq = __wait_eq_start(p, irqflags, fq, fd, flags)));
14181 + else
14182 + wait_event(affine_queue,
14183 + (eq = __wait_eq_start(p, irqflags, fq, fd, flags)));
14184 + return eq;
14185 +}
14186 +static noinline struct qm_eqcr_entry *__wait_p_eq_start(struct qman_portal *p,
14187 + unsigned long *irqflags __maybe_unused,
14188 + struct qman_fq *fq,
14189 + const struct qm_fd *fd,
14190 + u32 flags)
14191 +{
14192 + struct qm_eqcr_entry *eq = try_p_eq_start(p, irqflags, fq, fd, flags);
14193 + if (!eq)
14194 + qm_eqcr_set_ithresh(&p->p, EQCR_ITHRESH);
14195 + return eq;
14196 +}
14197 +static noinline struct qm_eqcr_entry *wait_p_eq_start(struct qman_portal *p,
14198 + unsigned long *irqflags __maybe_unused,
14199 + struct qman_fq *fq,
14200 + const struct qm_fd *fd,
14201 + u32 flags)
14202 +{
14203 + struct qm_eqcr_entry *eq;
14204 + if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
14205 + /* NB: return NULL if signal occurs before completion. Signal
14206 + * can occur during return. Caller must check for signal */
14207 + wait_event_interruptible(affine_queue,
14208 + (eq = __wait_p_eq_start(p, irqflags, fq, fd, flags)));
14209 + else
14210 + wait_event(affine_queue,
14211 + (eq = __wait_p_eq_start(p, irqflags, fq, fd, flags)));
14212 + return eq;
14213 +}
14214 +#endif
14215 +
14216 +int qman_p_enqueue(struct qman_portal *p, struct qman_fq *fq,
14217 + const struct qm_fd *fd, u32 flags)
14218 +{
14219 + struct qm_eqcr_entry *eq;
14220 + unsigned long irqflags __maybe_unused;
14221 +
14222 +#ifdef CONFIG_FSL_DPA_CAN_WAIT
14223 + if (flags & QMAN_ENQUEUE_FLAG_WAIT)
14224 + eq = wait_p_eq_start(p, &irqflags, fq, fd, flags);
14225 + else
14226 +#endif
14227 + eq = try_p_eq_start(p, &irqflags, fq, fd, flags);
14228 + if (!eq)
14229 + return -EBUSY;
14230 + /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
14231 + qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
14232 + (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
14233 + /* Factor the below out, it's used from qman_enqueue_orp() too */
14234 + PORTAL_IRQ_UNLOCK(p, irqflags);
14235 +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
14236 + if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
14237 + (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
14238 + if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
14239 + /* NB: return success even if signal occurs before
14240 + * condition is true. pvb_commit guarantees success */
14241 + wait_event_interruptible(affine_queue,
14242 + (p->eqci_owned != fq));
14243 + else
14244 + wait_event(affine_queue, (p->eqci_owned != fq));
14245 + }
14246 +#endif
14247 + return 0;
14248 +}
14249 +EXPORT_SYMBOL(qman_p_enqueue);
14250 +
14251 +int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags)
14252 +{
14253 + struct qman_portal *p;
14254 + struct qm_eqcr_entry *eq;
14255 + unsigned long irqflags __maybe_unused;
14256 +
14257 +#ifdef CONFIG_FSL_DPA_CAN_WAIT
14258 + if (flags & QMAN_ENQUEUE_FLAG_WAIT)
14259 + eq = wait_eq_start(&p, &irqflags, fq, fd, flags);
14260 + else
14261 +#endif
14262 + eq = try_eq_start(&p, &irqflags, fq, fd, flags);
14263 + if (!eq)
14264 + return -EBUSY;
14265 + /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
14266 + qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
14267 + (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
14268 + /* Factor the below out, it's used from qman_enqueue_orp() too */
14269 + PORTAL_IRQ_UNLOCK(p, irqflags);
14270 + put_affine_portal();
14271 +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
14272 + if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
14273 + (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
14274 + if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
14275 + /* NB: return success even if signal occurs before
14276 + * condition is true. pvb_commit guarantees success */
14277 + wait_event_interruptible(affine_queue,
14278 + (p->eqci_owned != fq));
14279 + else
14280 + wait_event(affine_queue, (p->eqci_owned != fq));
14281 + }
14282 +#endif
14283 + return 0;
14284 +}
14285 +EXPORT_SYMBOL(qman_enqueue);
14286 +
14287 +int qman_p_enqueue_orp(struct qman_portal *p, struct qman_fq *fq,
14288 + const struct qm_fd *fd, u32 flags,
14289 + struct qman_fq *orp, u16 orp_seqnum)
14290 +{
14291 + struct qm_eqcr_entry *eq;
14292 + unsigned long irqflags __maybe_unused;
14293 +
14294 +#ifdef CONFIG_FSL_DPA_CAN_WAIT
14295 + if (flags & QMAN_ENQUEUE_FLAG_WAIT)
14296 + eq = wait_p_eq_start(p, &irqflags, fq, fd, flags);
14297 + else
14298 +#endif
14299 + eq = try_p_eq_start(p, &irqflags, fq, fd, flags);
14300 + if (!eq)
14301 + return -EBUSY;
14302 + /* Process ORP-specifics here */
14303 + if (flags & QMAN_ENQUEUE_FLAG_NLIS)
14304 + orp_seqnum |= QM_EQCR_SEQNUM_NLIS;
14305 + else {
14306 + orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS;
14307 + if (flags & QMAN_ENQUEUE_FLAG_NESN)
14308 + orp_seqnum |= QM_EQCR_SEQNUM_NESN;
14309 + else
14310 + /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */
14311 + orp_seqnum &= ~QM_EQCR_SEQNUM_NESN;
14312 + }
14313 + eq->seqnum = cpu_to_be16(orp_seqnum);
14314 + eq->orp = cpu_to_be32(orp->fqid);
14315 + /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
14316 + qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP |
14317 + ((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ?
14318 + 0 : QM_EQCR_VERB_CMD_ENQUEUE) |
14319 + (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
14320 + PORTAL_IRQ_UNLOCK(p, irqflags);
14321 +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
14322 + if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
14323 + (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
14324 + if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
14325 + /* NB: return success even if signal occurs before
14326 + * condition is true. pvb_commit guarantees success */
14327 + wait_event_interruptible(affine_queue,
14328 + (p->eqci_owned != fq));
14329 + else
14330 + wait_event(affine_queue, (p->eqci_owned != fq));
14331 + }
14332 +#endif
14333 + return 0;
14334 +}
14335 +EXPORT_SYMBOL(qman_p_enqueue_orp);
14336 +
14337 +int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
14338 + struct qman_fq *orp, u16 orp_seqnum)
14339 +{
14340 + struct qman_portal *p;
14341 + struct qm_eqcr_entry *eq;
14342 + unsigned long irqflags __maybe_unused;
14343 +
14344 +#ifdef CONFIG_FSL_DPA_CAN_WAIT
14345 + if (flags & QMAN_ENQUEUE_FLAG_WAIT)
14346 + eq = wait_eq_start(&p, &irqflags, fq, fd, flags);
14347 + else
14348 +#endif
14349 + eq = try_eq_start(&p, &irqflags, fq, fd, flags);
14350 + if (!eq)
14351 + return -EBUSY;
14352 + /* Process ORP-specifics here */
14353 + if (flags & QMAN_ENQUEUE_FLAG_NLIS)
14354 + orp_seqnum |= QM_EQCR_SEQNUM_NLIS;
14355 + else {
14356 + orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS;
14357 + if (flags & QMAN_ENQUEUE_FLAG_NESN)
14358 + orp_seqnum |= QM_EQCR_SEQNUM_NESN;
14359 + else
14360 + /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */
14361 + orp_seqnum &= ~QM_EQCR_SEQNUM_NESN;
14362 + }
14363 + eq->seqnum = cpu_to_be16(orp_seqnum);
14364 + eq->orp = cpu_to_be32(orp->fqid);
14365 + /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
14366 + qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP |
14367 + ((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ?
14368 + 0 : QM_EQCR_VERB_CMD_ENQUEUE) |
14369 + (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
14370 + PORTAL_IRQ_UNLOCK(p, irqflags);
14371 + put_affine_portal();
14372 +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
14373 + if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
14374 + (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
14375 + if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
14376 + /* NB: return success even if signal occurs before
14377 + * condition is true. pvb_commit guarantees success */
14378 + wait_event_interruptible(affine_queue,
14379 + (p->eqci_owned != fq));
14380 + else
14381 + wait_event(affine_queue, (p->eqci_owned != fq));
14382 + }
14383 +#endif
14384 + return 0;
14385 +}
14386 +EXPORT_SYMBOL(qman_enqueue_orp);
14387 +
14388 +int qman_p_enqueue_precommit(struct qman_portal *p, struct qman_fq *fq,
14389 + const struct qm_fd *fd, u32 flags,
14390 + qman_cb_precommit cb, void *cb_arg)
14391 +{
14392 + struct qm_eqcr_entry *eq;
14393 + unsigned long irqflags __maybe_unused;
14394 +
14395 +#ifdef CONFIG_FSL_DPA_CAN_WAIT
14396 + if (flags & QMAN_ENQUEUE_FLAG_WAIT)
14397 + eq = wait_p_eq_start(p, &irqflags, fq, fd, flags);
14398 + else
14399 +#endif
14400 + eq = try_p_eq_start(p, &irqflags, fq, fd, flags);
14401 + if (!eq)
14402 + return -EBUSY;
14403 + /* invoke user supplied callback function before writing commit verb */
14404 + if (cb(cb_arg)) {
14405 + PORTAL_IRQ_UNLOCK(p, irqflags);
14406 + return -EINVAL;
14407 + }
14408 + /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
14409 + qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
14410 + (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
14411 + /* Factor the below out, it's used from qman_enqueue_orp() too */
14412 + PORTAL_IRQ_UNLOCK(p, irqflags);
14413 +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
14414 + if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
14415 + (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
14416 + if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
14417 + /* NB: return success even if signal occurs before
14418 + * condition is true. pvb_commit guarantees success */
14419 + wait_event_interruptible(affine_queue,
14420 + (p->eqci_owned != fq));
14421 + else
14422 + wait_event(affine_queue, (p->eqci_owned != fq));
14423 + }
14424 +#endif
14425 + return 0;
14426 +}
14427 +EXPORT_SYMBOL(qman_p_enqueue_precommit);
14428 +
14429 +int qman_enqueue_precommit(struct qman_fq *fq, const struct qm_fd *fd,
14430 + u32 flags, qman_cb_precommit cb, void *cb_arg)
14431 +{
14432 + struct qman_portal *p;
14433 + struct qm_eqcr_entry *eq;
14434 + unsigned long irqflags __maybe_unused;
14435 +
14436 +#ifdef CONFIG_FSL_DPA_CAN_WAIT
14437 + if (flags & QMAN_ENQUEUE_FLAG_WAIT)
14438 + eq = wait_eq_start(&p, &irqflags, fq, fd, flags);
14439 + else
14440 +#endif
14441 + eq = try_eq_start(&p, &irqflags, fq, fd, flags);
14442 + if (!eq)
14443 + return -EBUSY;
14444 + /* invoke user supplied callback function before writing commit verb */
14445 + if (cb(cb_arg)) {
14446 + PORTAL_IRQ_UNLOCK(p, irqflags);
14447 + put_affine_portal();
14448 + return -EINVAL;
14449 + }
14450 + /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
14451 + qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
14452 + (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
14453 + /* Factor the below out, it's used from qman_enqueue_orp() too */
14454 + PORTAL_IRQ_UNLOCK(p, irqflags);
14455 + put_affine_portal();
14456 +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
14457 + if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
14458 + (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
14459 + if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
14460 + /* NB: return success even if signal occurs before
14461 + * condition is true. pvb_commit guarantees success */
14462 + wait_event_interruptible(affine_queue,
14463 + (p->eqci_owned != fq));
14464 + else
14465 + wait_event(affine_queue, (p->eqci_owned != fq));
14466 + }
14467 +#endif
14468 + return 0;
14469 +}
14470 +EXPORT_SYMBOL(qman_enqueue_precommit);
14471 +
14472 +int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
14473 + struct qm_mcc_initcgr *opts)
14474 +{
14475 + struct qm_mc_command *mcc;
14476 + struct qm_mc_result *mcr;
14477 + struct qman_portal *p = get_affine_portal();
14478 + unsigned long irqflags __maybe_unused;
14479 + u8 res;
14480 + u8 verb = QM_MCC_VERB_MODIFYCGR;
14481 +
14482 + PORTAL_IRQ_LOCK(p, irqflags);
14483 + mcc = qm_mc_start(&p->p);
14484 + if (opts)
14485 + mcc->initcgr = *opts;
14486 + mcc->initcgr.we_mask = cpu_to_be16(mcc->initcgr.we_mask);
14487 + mcc->initcgr.cgr.wr_parm_g.word =
14488 + cpu_to_be32(mcc->initcgr.cgr.wr_parm_g.word);
14489 + mcc->initcgr.cgr.wr_parm_y.word =
14490 + cpu_to_be32(mcc->initcgr.cgr.wr_parm_y.word);
14491 + mcc->initcgr.cgr.wr_parm_r.word =
14492 + cpu_to_be32(mcc->initcgr.cgr.wr_parm_r.word);
14493 + mcc->initcgr.cgr.cscn_targ = cpu_to_be32(mcc->initcgr.cgr.cscn_targ);
14494 + mcc->initcgr.cgr.__cs_thres = cpu_to_be16(mcc->initcgr.cgr.__cs_thres);
14495 +
14496 + mcc->initcgr.cgid = cgr->cgrid;
14497 + if (flags & QMAN_CGR_FLAG_USE_INIT)
14498 + verb = QM_MCC_VERB_INITCGR;
14499 + qm_mc_commit(&p->p, verb);
14500 + while (!(mcr = qm_mc_result(&p->p)))
14501 + cpu_relax();
14502 + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
14503 + res = mcr->result;
14504 + PORTAL_IRQ_UNLOCK(p, irqflags);
14505 + put_affine_portal();
14506 + return (res == QM_MCR_RESULT_OK) ? 0 : -EIO;
14507 +}
14508 +EXPORT_SYMBOL(qman_modify_cgr);
14509 +
14510 +#define TARG_MASK(n) (0x80000000 >> (n->config->public_cfg.channel - \
14511 + QM_CHANNEL_SWPORTAL0))
14512 +#define TARG_DCP_MASK(n) (0x80000000 >> (10 + n))
14513 +#define PORTAL_IDX(n) (n->config->public_cfg.channel - QM_CHANNEL_SWPORTAL0)
14514 +
14515 +static u8 qman_cgr_cpus[__CGR_NUM];
14516 +
14517 +int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
14518 + struct qm_mcc_initcgr *opts)
14519 +{
14520 + unsigned long irqflags __maybe_unused;
14521 + struct qm_mcr_querycgr cgr_state;
14522 + struct qm_mcc_initcgr local_opts;
14523 + int ret;
14524 + struct qman_portal *p;
14525 +
14526 + /* We have to check that the provided CGRID is within the limits of the
14527 + * data-structures, for obvious reasons. However we'll let h/w take
14528 + * care of determining whether it's within the limits of what exists on
14529 + * the SoC. */
14530 + if (cgr->cgrid >= __CGR_NUM)
14531 + return -EINVAL;
14532 +
14533 + preempt_disable();
14534 + p = get_affine_portal();
14535 + qman_cgr_cpus[cgr->cgrid] = smp_processor_id();
14536 + preempt_enable();
14537 +
14538 + memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
14539 + cgr->chan = p->config->public_cfg.channel;
14540 + spin_lock_irqsave(&p->cgr_lock, irqflags);
14541 +
14542 + /* if no opts specified, just add it to the list */
14543 + if (!opts)
14544 + goto add_list;
14545 +
14546 + ret = qman_query_cgr(cgr, &cgr_state);
14547 + if (ret)
14548 + goto release_lock;
14549 + if (opts)
14550 + local_opts = *opts;
14551 + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
14552 + local_opts.cgr.cscn_targ_upd_ctrl =
14553 + QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p);
14554 + else
14555 + /* Overwrite TARG */
14556 + local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
14557 + TARG_MASK(p);
14558 + local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
14559 +
14560 + /* send init if flags indicate so */
14561 + if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
14562 + ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, &local_opts);
14563 + else
14564 + ret = qman_modify_cgr(cgr, 0, &local_opts);
14565 + if (ret)
14566 + goto release_lock;
14567 +add_list:
14568 + list_add(&cgr->node, &p->cgr_cbs);
14569 +
14570 + /* Determine if newly added object requires its callback to be called */
14571 + ret = qman_query_cgr(cgr, &cgr_state);
14572 + if (ret) {
14573 + /* we can't go back, so proceed and return success, but screen
14574 + * and wail to the log file */
14575 + pr_crit("CGR HW state partially modified\n");
14576 + ret = 0;
14577 + goto release_lock;
14578 + }
14579 + if (cgr->cb && cgr_state.cgr.cscn_en && qman_cgrs_get(&p->cgrs[1],
14580 + cgr->cgrid))
14581 + cgr->cb(p, cgr, 1);
14582 +release_lock:
14583 + spin_unlock_irqrestore(&p->cgr_lock, irqflags);
14584 + put_affine_portal();
14585 + return ret;
14586 +}
14587 +EXPORT_SYMBOL(qman_create_cgr);
14588 +
14589 +int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
14590 + struct qm_mcc_initcgr *opts)
14591 +{
14592 + unsigned long irqflags __maybe_unused;
14593 + struct qm_mcc_initcgr local_opts;
14594 + struct qm_mcr_querycgr cgr_state;
14595 + int ret;
14596 +
14597 + if ((qman_ip_rev & 0xFF00) < QMAN_REV30) {
14598 + pr_warn("This QMan version doesn't support to send CSCN to DCP portal\n");
14599 + return -EINVAL;
14600 + }
14601 + /* We have to check that the provided CGRID is within the limits of the
14602 + * data-structures, for obvious reasons. However we'll let h/w take
14603 + * care of determining whether it's within the limits of what exists on
14604 + * the SoC.
14605 + */
14606 + if (cgr->cgrid >= __CGR_NUM)
14607 + return -EINVAL;
14608 +
14609 + ret = qman_query_cgr(cgr, &cgr_state);
14610 + if (ret)
14611 + return ret;
14612 +
14613 + memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
14614 + if (opts)
14615 + local_opts = *opts;
14616 +
14617 + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
14618 + local_opts.cgr.cscn_targ_upd_ctrl =
14619 + QM_CGR_TARG_UDP_CTRL_WRITE_BIT |
14620 + QM_CGR_TARG_UDP_CTRL_DCP | dcp_portal;
14621 + else
14622 + local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
14623 + TARG_DCP_MASK(dcp_portal);
14624 + local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
14625 +
14626 + /* send init if flags indicate so */
14627 + if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
14628 + ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
14629 + &local_opts);
14630 + else
14631 + ret = qman_modify_cgr(cgr, 0, &local_opts);
14632 +
14633 + return ret;
14634 +}
14635 +EXPORT_SYMBOL(qman_create_cgr_to_dcp);
14636 +
14637 +int qman_delete_cgr(struct qman_cgr *cgr)
14638 +{
14639 + unsigned long irqflags __maybe_unused;
14640 + struct qm_mcr_querycgr cgr_state;
14641 + struct qm_mcc_initcgr local_opts;
14642 + int ret = 0;
14643 + struct qman_cgr *i;
14644 + struct qman_portal *p = get_affine_portal();
14645 +
14646 + if (cgr->chan != p->config->public_cfg.channel) {
14647 + pr_crit("Attempting to delete cgr from different portal "
14648 + "than it was create: create 0x%x, delete 0x%x\n",
14649 + cgr->chan, p->config->public_cfg.channel);
14650 + ret = -EINVAL;
14651 + goto put_portal;
14652 + }
14653 + memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
14654 + spin_lock_irqsave(&p->cgr_lock, irqflags);
14655 + list_del(&cgr->node);
14656 + /*
14657 + * If there are no other CGR objects for this CGRID in the list, update
14658 + * CSCN_TARG accordingly
14659 + */
14660 + list_for_each_entry(i, &p->cgr_cbs, node)
14661 + if ((i->cgrid == cgr->cgrid) && i->cb)
14662 + goto release_lock;
14663 + ret = qman_query_cgr(cgr, &cgr_state);
14664 + if (ret) {
14665 + /* add back to the list */
14666 + list_add(&cgr->node, &p->cgr_cbs);
14667 + goto release_lock;
14668 + }
14669 + /* Overwrite TARG */
14670 + local_opts.we_mask = QM_CGR_WE_CSCN_TARG;
14671 + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
14672 + local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p);
14673 + else
14674 + local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ &
14675 + ~(TARG_MASK(p));
14676 + ret = qman_modify_cgr(cgr, 0, &local_opts);
14677 + if (ret)
14678 + /* add back to the list */
14679 + list_add(&cgr->node, &p->cgr_cbs);
14680 +release_lock:
14681 + spin_unlock_irqrestore(&p->cgr_lock, irqflags);
14682 +put_portal:
14683 + put_affine_portal();
14684 + return ret;
14685 +}
14686 +EXPORT_SYMBOL(qman_delete_cgr);
14687 +
14688 +struct cgr_comp {
14689 + struct qman_cgr *cgr;
14690 + struct completion completion;
14691 +};
14692 +
14693 +static int qman_delete_cgr_thread(void *p)
14694 +{
14695 + struct cgr_comp *cgr_comp = (struct cgr_comp *)p;
14696 + int res;
14697 +
14698 + res = qman_delete_cgr((struct qman_cgr *)cgr_comp->cgr);
14699 + complete(&cgr_comp->completion);
14700 +
14701 + return res;
14702 +}
14703 +
14704 +void qman_delete_cgr_safe(struct qman_cgr *cgr)
14705 +{
14706 + struct task_struct *thread;
14707 + struct cgr_comp cgr_comp;
14708 +
14709 + preempt_disable();
14710 + if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) {
14711 + init_completion(&cgr_comp.completion);
14712 + cgr_comp.cgr = cgr;
14713 + thread = kthread_create(qman_delete_cgr_thread, &cgr_comp,
14714 + "cgr_del");
14715 +
14716 + if (likely(!IS_ERR(thread))) {
14717 + kthread_bind(thread, qman_cgr_cpus[cgr->cgrid]);
14718 + wake_up_process(thread);
14719 + wait_for_completion(&cgr_comp.completion);
14720 + preempt_enable();
14721 + return;
14722 + }
14723 + }
14724 + qman_delete_cgr(cgr);
14725 + preempt_enable();
14726 +}
14727 +EXPORT_SYMBOL(qman_delete_cgr_safe);
14728 +
14729 +int qm_get_clock(u64 *clock_hz)
14730 +{
14731 + if (!qman_clk) {
14732 + pr_warn("Qman clock speed is unknown\n");
14733 + return -EINVAL;
14734 + }
14735 + *clock_hz = (u64)qman_clk;
14736 + return 0;
14737 +}
14738 +EXPORT_SYMBOL(qm_get_clock);
14739 +
14740 +int qm_set_clock(u64 clock_hz)
14741 +{
14742 + if (qman_clk)
14743 + return -1;
14744 + qman_clk = (u32)clock_hz;
14745 + return 0;
14746 +}
14747 +EXPORT_SYMBOL(qm_set_clock);
14748 +
14749 +/* CEETM management command */
14750 +static int qman_ceetm_configure_lfqmt(struct qm_mcc_ceetm_lfqmt_config *opts)
14751 +{
14752 + struct qm_mc_command *mcc;
14753 + struct qm_mc_result *mcr;
14754 + struct qman_portal *p;
14755 + unsigned long irqflags __maybe_unused;
14756 + u8 res;
14757 +
14758 + p = get_affine_portal();
14759 + PORTAL_IRQ_LOCK(p, irqflags);
14760 +
14761 + mcc = qm_mc_start(&p->p);
14762 + mcc->lfqmt_config = *opts;
14763 + qm_mc_commit(&p->p, QM_CEETM_VERB_LFQMT_CONFIG);
14764 + while (!(mcr = qm_mc_result(&p->p)))
14765 + cpu_relax();
14766 + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
14767 + QM_CEETM_VERB_LFQMT_CONFIG);
14768 + PORTAL_IRQ_UNLOCK(p, irqflags);
14769 + put_affine_portal();
14770 +
14771 + res = mcr->result;
14772 + if (res != QM_MCR_RESULT_OK) {
14773 + pr_err("CEETM: CONFIGURE LFQMT failed\n");
14774 + return -EIO;
14775 + }
14776 + return 0;
14777 +}
14778 +
14779 +int qman_ceetm_query_lfqmt(int lfqid,
14780 + struct qm_mcr_ceetm_lfqmt_query *lfqmt_query)
14781 +{
14782 + struct qm_mc_command *mcc;
14783 + struct qm_mc_result *mcr;
14784 + struct qman_portal *p;
14785 + unsigned long irqflags __maybe_unused;
14786 + u8 res;
14787 +
14788 + p = get_affine_portal();
14789 + PORTAL_IRQ_LOCK(p, irqflags);
14790 +
14791 + mcc = qm_mc_start(&p->p);
14792 + mcc->lfqmt_query.lfqid = lfqid;
14793 + qm_mc_commit(&p->p, QM_CEETM_VERB_LFQMT_QUERY);
14794 + while (!(mcr = qm_mc_result(&p->p)))
14795 + cpu_relax();
14796 + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_LFQMT_QUERY);
14797 + res = mcr->result;
14798 + if (res == QM_MCR_RESULT_OK)
14799 + *lfqmt_query = mcr->lfqmt_query;
14800 +
14801 + PORTAL_IRQ_UNLOCK(p, irqflags);
14802 + put_affine_portal();
14803 + if (res != QM_MCR_RESULT_OK) {
14804 + pr_err("CEETM: QUERY LFQMT failed\n");
14805 + return -EIO;
14806 + }
14807 + return 0;
14808 +}
14809 +EXPORT_SYMBOL(qman_ceetm_query_lfqmt);
14810 +
14811 +static int qman_ceetm_configure_cq(struct qm_mcc_ceetm_cq_config *opts)
14812 +{
14813 + struct qm_mc_command *mcc;
14814 + struct qm_mc_result *mcr;
14815 + struct qman_portal *p;
14816 + unsigned long irqflags __maybe_unused;
14817 + u8 res;
14818 +
14819 + p = get_affine_portal();
14820 + PORTAL_IRQ_LOCK(p, irqflags);
14821 +
14822 + mcc = qm_mc_start(&p->p);
14823 + mcc->cq_config = *opts;
14824 + qm_mc_commit(&p->p, QM_CEETM_VERB_CQ_CONFIG);
14825 + while (!(mcr = qm_mc_result(&p->p)))
14826 + cpu_relax();
14827 + res = mcr->result;
14828 + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_CQ_CONFIG);
14829 +
14830 + PORTAL_IRQ_UNLOCK(p, irqflags);
14831 + put_affine_portal();
14832 +
14833 + if (res != QM_MCR_RESULT_OK) {
14834 + pr_err("CEETM: CONFIGURE CQ failed\n");
14835 + return -EIO;
14836 + }
14837 + return 0;
14838 +}
14839 +
14840 +int qman_ceetm_query_cq(unsigned int cqid, unsigned int dcpid,
14841 + struct qm_mcr_ceetm_cq_query *cq_query)
14842 +{
14843 + struct qm_mc_command *mcc;
14844 + struct qm_mc_result *mcr;
14845 + struct qman_portal *p;
14846 + unsigned long irqflags __maybe_unused;
14847 + u8 res;
14848 +
14849 + p = get_affine_portal();
14850 + PORTAL_IRQ_LOCK(p, irqflags);
14851 +
14852 + mcc = qm_mc_start(&p->p);
14853 + mcc->cq_query.cqid = cpu_to_be16(cqid);
14854 + mcc->cq_query.dcpid = dcpid;
14855 + qm_mc_commit(&p->p, QM_CEETM_VERB_CQ_QUERY);
14856 + while (!(mcr = qm_mc_result(&p->p)))
14857 + cpu_relax();
14858 + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_CQ_QUERY);
14859 + res = mcr->result;
14860 + if (res == QM_MCR_RESULT_OK) {
14861 + *cq_query = mcr->cq_query;
14862 + hw_cq_query_to_cpu(cq_query);
14863 + }
14864 +
14865 + PORTAL_IRQ_UNLOCK(p, irqflags);
14866 + put_affine_portal();
14867 +
14868 + if (res != QM_MCR_RESULT_OK) {
14869 + pr_err("CEETM: QUERY CQ failed\n");
14870 + return -EIO;
14871 + }
14872 +
14873 + return 0;
14874 +}
14875 +EXPORT_SYMBOL(qman_ceetm_query_cq);
14876 +
14877 +static int qman_ceetm_configure_dct(struct qm_mcc_ceetm_dct_config *opts)
14878 +{
14879 + struct qm_mc_command *mcc;
14880 + struct qm_mc_result *mcr;
14881 + struct qman_portal *p;
14882 + unsigned long irqflags __maybe_unused;
14883 + u8 res;
14884 +
14885 + p = get_affine_portal();
14886 + PORTAL_IRQ_LOCK(p, irqflags);
14887 +
14888 + mcc = qm_mc_start(&p->p);
14889 + mcc->dct_config = *opts;
14890 + qm_mc_commit(&p->p, QM_CEETM_VERB_DCT_CONFIG);
14891 + while (!(mcr = qm_mc_result(&p->p)))
14892 + cpu_relax();
14893 + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_DCT_CONFIG);
14894 + res = mcr->result;
14895 +
14896 + PORTAL_IRQ_UNLOCK(p, irqflags);
14897 + put_affine_portal();
14898 +
14899 + if (res != QM_MCR_RESULT_OK) {
14900 + pr_err("CEETM: CONFIGURE DCT failed\n");
14901 + return -EIO;
14902 + }
14903 + return 0;
14904 +}
14905 +
14906 +static int qman_ceetm_query_dct(struct qm_mcc_ceetm_dct_query *opts,
14907 + struct qm_mcr_ceetm_dct_query *dct_query)
14908 +{
14909 + struct qm_mc_command *mcc;
14910 + struct qm_mc_result *mcr;
14911 + struct qman_portal *p = get_affine_portal();
14912 + unsigned long irqflags __maybe_unused;
14913 + u8 res;
14914 +
14915 + PORTAL_IRQ_LOCK(p, irqflags);
14916 +
14917 + mcc = qm_mc_start(&p->p);
14918 + mcc->dct_query = *opts;
14919 + qm_mc_commit(&p->p, QM_CEETM_VERB_DCT_QUERY);
14920 + while (!(mcr = qm_mc_result(&p->p)))
14921 + cpu_relax();
14922 + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_DCT_QUERY);
14923 + res = mcr->result;
14924 +
14925 + PORTAL_IRQ_UNLOCK(p, irqflags);
14926 + put_affine_portal();
14927 +
14928 + if (res != QM_MCR_RESULT_OK) {
14929 + pr_err("CEETM: QUERY DCT failed\n");
14930 + return -EIO;
14931 + }
14932 +
14933 + *dct_query = mcr->dct_query;
14934 + return 0;
14935 +}
14936 +
14937 +static int qman_ceetm_configure_class_scheduler(
14938 + struct qm_mcc_ceetm_class_scheduler_config *opts)
14939 +{
14940 + struct qm_mc_command *mcc;
14941 + struct qm_mc_result *mcr;
14942 + struct qman_portal *p;
14943 + unsigned long irqflags __maybe_unused;
14944 + u8 res;
14945 +
14946 + p = get_affine_portal();
14947 + PORTAL_IRQ_LOCK(p, irqflags);
14948 +
14949 + mcc = qm_mc_start(&p->p);
14950 + mcc->csch_config = *opts;
14951 + qm_mc_commit(&p->p, QM_CEETM_VERB_CLASS_SCHEDULER_CONFIG);
14952 + while (!(mcr = qm_mc_result(&p->p)))
14953 + cpu_relax();
14954 + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
14955 + QM_CEETM_VERB_CLASS_SCHEDULER_CONFIG);
14956 + res = mcr->result;
14957 +
14958 + PORTAL_IRQ_UNLOCK(p, irqflags);
14959 + put_affine_portal();
14960 +
14961 + if (res != QM_MCR_RESULT_OK) {
14962 + pr_err("CEETM: CONFIGURE CLASS SCHEDULER failed\n");
14963 + return -EIO;
14964 + }
14965 + return 0;
14966 +}
14967 +
14968 +static int qman_ceetm_query_class_scheduler(struct qm_ceetm_channel *channel,
14969 + struct qm_mcr_ceetm_class_scheduler_query *query)
14970 +{
14971 + struct qm_mc_command *mcc;
14972 + struct qm_mc_result *mcr;
14973 + struct qman_portal *p;
14974 + unsigned long irqflags __maybe_unused;
14975 + u8 res;
14976 +
14977 + p = get_affine_portal();
14978 + PORTAL_IRQ_LOCK(p, irqflags);
14979 +
14980 + mcc = qm_mc_start(&p->p);
14981 + mcc->csch_query.cqcid = cpu_to_be16(channel->idx);
14982 + mcc->csch_query.dcpid = channel->dcp_idx;
14983 + qm_mc_commit(&p->p, QM_CEETM_VERB_CLASS_SCHEDULER_QUERY);
14984 + while (!(mcr = qm_mc_result(&p->p)))
14985 + cpu_relax();
14986 + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
14987 + QM_CEETM_VERB_CLASS_SCHEDULER_QUERY);
14988 + res = mcr->result;
14989 +
14990 + PORTAL_IRQ_UNLOCK(p, irqflags);
14991 + put_affine_portal();
14992 +
14993 + if (res != QM_MCR_RESULT_OK) {
14994 + pr_err("CEETM: QUERY CLASS SCHEDULER failed\n");
14995 + return -EIO;
14996 + }
14997 + *query = mcr->csch_query;
14998 + return 0;
14999 +}
15000 +
15001 +static int qman_ceetm_configure_mapping_shaper_tcfc(
15002 + struct qm_mcc_ceetm_mapping_shaper_tcfc_config *opts)
15003 +{
15004 + struct qm_mc_command *mcc;
15005 + struct qm_mc_result *mcr;
15006 + struct qman_portal *p;
15007 + unsigned long irqflags __maybe_unused;
15008 + u8 res;
15009 +
15010 + p = get_affine_portal();
15011 + PORTAL_IRQ_LOCK(p, irqflags);
15012 +
15013 + mcc = qm_mc_start(&p->p);
15014 + mcc->mst_config = *opts;
15015 + qm_mc_commit(&p->p, QM_CEETM_VERB_MAPPING_SHAPER_TCFC_CONFIG);
15016 + while (!(mcr = qm_mc_result(&p->p)))
15017 + cpu_relax();
15018 + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
15019 + QM_CEETM_VERB_MAPPING_SHAPER_TCFC_CONFIG);
15020 + res = mcr->result;
15021 +
15022 + PORTAL_IRQ_UNLOCK(p, irqflags);
15023 + put_affine_portal();
15024 +
15025 + if (res != QM_MCR_RESULT_OK) {
15026 + pr_err("CEETM: CONFIGURE CHANNEL MAPPING failed\n");
15027 + return -EIO;
15028 + }
15029 + return 0;
15030 +}
15031 +
15032 +static int qman_ceetm_query_mapping_shaper_tcfc(
15033 + struct qm_mcc_ceetm_mapping_shaper_tcfc_query *opts,
15034 + struct qm_mcr_ceetm_mapping_shaper_tcfc_query *response)
15035 +{
15036 + struct qm_mc_command *mcc;
15037 + struct qm_mc_result *mcr;
15038 + struct qman_portal *p;
15039 + unsigned long irqflags __maybe_unused;
15040 + u8 res;
15041 +
15042 + p = get_affine_portal();
15043 + PORTAL_IRQ_LOCK(p, irqflags);
15044 +
15045 + mcc = qm_mc_start(&p->p);
15046 + mcc->mst_query = *opts;
15047 + qm_mc_commit(&p->p, QM_CEETM_VERB_MAPPING_SHAPER_TCFC_QUERY);
15048 + while (!(mcr = qm_mc_result(&p->p)))
15049 + cpu_relax();
15050 + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
15051 + QM_CEETM_VERB_MAPPING_SHAPER_TCFC_QUERY);
15052 + res = mcr->result;
15053 +
15054 + PORTAL_IRQ_UNLOCK(p, irqflags);
15055 + put_affine_portal();
15056 +
15057 + if (res != QM_MCR_RESULT_OK) {
15058 + pr_err("CEETM: QUERY CHANNEL MAPPING failed\n");
15059 + return -EIO;
15060 + }
15061 +
15062 + *response = mcr->mst_query;
15063 + return 0;
15064 +}
15065 +
15066 +static int qman_ceetm_configure_ccgr(struct qm_mcc_ceetm_ccgr_config *opts)
15067 +{
15068 + struct qm_mc_command *mcc;
15069 + struct qm_mc_result *mcr;
15070 + struct qman_portal *p;
15071 + unsigned long irqflags __maybe_unused;
15072 + u8 res;
15073 +
15074 + p = get_affine_portal();
15075 + PORTAL_IRQ_LOCK(p, irqflags);
15076 +
15077 + mcc = qm_mc_start(&p->p);
15078 + mcc->ccgr_config = *opts;
15079 +
15080 + qm_mc_commit(&p->p, QM_CEETM_VERB_CCGR_CONFIG);
15081 + while (!(mcr = qm_mc_result(&p->p)))
15082 + cpu_relax();
15083 + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_CCGR_CONFIG);
15084 +
15085 + PORTAL_IRQ_UNLOCK(p, irqflags);
15086 + put_affine_portal();
15087 +
15088 + res = mcr->result;
15089 + if (res != QM_MCR_RESULT_OK) {
15090 + pr_err("CEETM: CONFIGURE CCGR failed\n");
15091 + return -EIO;
15092 + }
15093 + return 0;
15094 +}
15095 +
15096 +int qman_ceetm_query_ccgr(struct qm_mcc_ceetm_ccgr_query *ccgr_query,
15097 + struct qm_mcr_ceetm_ccgr_query *response)
15098 +{
15099 + struct qm_mc_command *mcc;
15100 + struct qm_mc_result *mcr;
15101 + struct qman_portal *p;
15102 + unsigned long irqflags __maybe_unused;
15103 + u8 res;
15104 +
15105 + p = get_affine_portal();
15106 + PORTAL_IRQ_LOCK(p, irqflags);
15107 +
15108 + mcc = qm_mc_start(&p->p);
15109 + mcc->ccgr_query.ccgrid = cpu_to_be16(ccgr_query->ccgrid);
15110 + mcc->ccgr_query.dcpid = ccgr_query->dcpid;
15111 + qm_mc_commit(&p->p, QM_CEETM_VERB_CCGR_QUERY);
15112 +
15113 + while (!(mcr = qm_mc_result(&p->p)))
15114 + cpu_relax();
15115 + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_CCGR_QUERY);
15116 + res = mcr->result;
15117 + if (res == QM_MCR_RESULT_OK) {
15118 + *response = mcr->ccgr_query;
15119 + hw_ccgr_query_to_cpu(response);
15120 + }
15121 +
15122 + PORTAL_IRQ_UNLOCK(p, irqflags);
15123 + put_affine_portal();
15124 + if (res != QM_MCR_RESULT_OK) {
15125 + pr_err("CEETM: QUERY CCGR failed\n");
15126 + return -EIO;
15127 + }
15128 + return 0;
15129 +}
15130 +EXPORT_SYMBOL(qman_ceetm_query_ccgr);
15131 +
15132 +static int qman_ceetm_cq_peek_pop_xsfdrread(struct qm_ceetm_cq *cq,
15133 + u8 command_type, u16 xsfdr,
15134 + struct qm_mcr_ceetm_cq_peek_pop_xsfdrread *cq_ppxr)
15135 +{
15136 + struct qm_mc_command *mcc;
15137 + struct qm_mc_result *mcr;
15138 + struct qman_portal *p;
15139 + unsigned long irqflags __maybe_unused;
15140 + u8 res;
15141 +
15142 + p = get_affine_portal();
15143 + PORTAL_IRQ_LOCK(p, irqflags);
15144 +
15145 + mcc = qm_mc_start(&p->p);
15146 + switch (command_type) {
15147 + case 0:
15148 + case 1:
15149 + mcc->cq_ppxr.cqid = (cq->parent->idx << 4) | cq->idx;
15150 + break;
15151 + case 2:
15152 + mcc->cq_ppxr.xsfdr = xsfdr;
15153 + break;
15154 + default:
15155 + break;
15156 + }
15157 + mcc->cq_ppxr.ct = command_type;
15158 + mcc->cq_ppxr.dcpid = cq->parent->dcp_idx;
15159 + qm_mc_commit(&p->p, QM_CEETM_VERB_CQ_PEEK_POP_XFDRREAD);
15160 + while (!(mcr = qm_mc_result(&p->p)))
15161 + cpu_relax();
15162 + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
15163 + QM_CEETM_VERB_CQ_PEEK_POP_XFDRREAD);
15164 +
15165 + PORTAL_IRQ_UNLOCK(p, irqflags);
15166 + put_affine_portal();
15167 +
15168 + res = mcr->result;
15169 + if (res != QM_MCR_RESULT_OK) {
15170 + pr_err("CEETM: CQ PEEK/POP/XSFDR READ failed\n");
15171 + return -EIO;
15172 + }
15173 + *cq_ppxr = mcr->cq_ppxr;
15174 + return 0;
15175 +}
15176 +
15177 +static int qman_ceetm_query_statistics(u16 cid,
15178 + enum qm_dc_portal dcp_idx,
15179 + u16 command_type,
15180 + struct qm_mcr_ceetm_statistics_query *query_result)
15181 +{
15182 + struct qm_mc_command *mcc;
15183 + struct qm_mc_result *mcr;
15184 + struct qman_portal *p;
15185 + unsigned long irqflags __maybe_unused;
15186 + u8 res;
15187 +
15188 + p = get_affine_portal();
15189 + PORTAL_IRQ_LOCK(p, irqflags);
15190 +
15191 + mcc = qm_mc_start(&p->p);
15192 + mcc->stats_query_write.cid = cid;
15193 + mcc->stats_query_write.dcpid = dcp_idx;
15194 + mcc->stats_query_write.ct = command_type;
15195 + qm_mc_commit(&p->p, QM_CEETM_VERB_STATISTICS_QUERY_WRITE);
15196 +
15197 + while (!(mcr = qm_mc_result(&p->p)))
15198 + cpu_relax();
15199 + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
15200 + QM_CEETM_VERB_STATISTICS_QUERY_WRITE);
15201 +
15202 + PORTAL_IRQ_UNLOCK(p, irqflags);
15203 + put_affine_portal();
15204 +
15205 + res = mcr->result;
15206 + if (res != QM_MCR_RESULT_OK) {
15207 + pr_err("CEETM: STATISTICS QUERY failed\n");
15208 + return -EIO;
15209 + }
15210 + *query_result = mcr->stats_query;
15211 + return 0;
15212 +}
15213 +
15214 +int qman_ceetm_query_write_statistics(u16 cid, enum qm_dc_portal dcp_idx,
15215 + u16 command_type, u64 frame_count,
15216 + u64 byte_count)
15217 +{
15218 + struct qm_mc_command *mcc;
15219 + struct qm_mc_result *mcr;
15220 + struct qman_portal *p;
15221 + unsigned long irqflags __maybe_unused;
15222 + u8 res;
15223 +
15224 + p = get_affine_portal();
15225 + PORTAL_IRQ_LOCK(p, irqflags);
15226 +
15227 + mcc = qm_mc_start(&p->p);
15228 + mcc->stats_query_write.cid = cid;
15229 + mcc->stats_query_write.dcpid = dcp_idx;
15230 + mcc->stats_query_write.ct = command_type;
15231 + mcc->stats_query_write.frm_cnt = frame_count;
15232 + mcc->stats_query_write.byte_cnt = byte_count;
15233 + qm_mc_commit(&p->p, QM_CEETM_VERB_STATISTICS_QUERY_WRITE);
15234 +
15235 + while (!(mcr = qm_mc_result(&p->p)))
15236 + cpu_relax();
15237 + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
15238 + QM_CEETM_VERB_STATISTICS_QUERY_WRITE);
15239 +
15240 + PORTAL_IRQ_UNLOCK(p, irqflags);
15241 + put_affine_portal();
15242 +
15243 + res = mcr->result;
15244 + if (res != QM_MCR_RESULT_OK) {
15245 + pr_err("CEETM: STATISTICS WRITE failed\n");
15246 + return -EIO;
15247 + }
15248 + return 0;
15249 +}
15250 +EXPORT_SYMBOL(qman_ceetm_query_write_statistics);
15251 +
15252 +int qman_ceetm_bps2tokenrate(u64 bps, struct qm_ceetm_rate *token_rate,
15253 + int rounding)
15254 +{
15255 + u16 pres;
15256 + u64 temp;
15257 + u64 qman_freq;
15258 + int ret;
15259 +
15260 + /* Read PRES from CEET_CFG_PRES register */
15261 + ret = qman_ceetm_get_prescaler(&pres);
15262 + if (ret)
15263 + return -EINVAL;
15264 +
15265 + ret = qm_get_clock(&qman_freq);
15266 + if (ret)
15267 + return -EINVAL;
15268 +
15269 + /* token-rate = bytes-per-second * update-reference-period
15270 + *
15271 + * Where token-rate is N/8192 for a integer N, and
15272 + * update-reference-period is (2^22)/(PRES*QHz), where PRES
15273 + * is the prescalar value and QHz is the QMan clock frequency.
15274 + * So:
15275 + *
15276 + * token-rate = (byte-per-second*2^22)/PRES*QHZ)
15277 + *
15278 + * Converting to bits-per-second gives;
15279 + *
15280 + * token-rate = (bps*2^19) / (PRES*QHZ)
15281 + * N = (bps*2^32) / (PRES*QHz)
15282 + *
15283 + * And to avoid 64-bit overflow if 'bps' is larger than 4Gbps
15284 + * (yet minimise rounding error if 'bps' is small), we reorganise
15285 + * the formula to use two 16-bit shifts rather than 1 32-bit shift.
15286 + * N = (((bps*2^16)/PRES)*2^16)/QHz
15287 + */
15288 + temp = ROUNDING((bps << 16), pres, rounding);
15289 + temp = ROUNDING((temp << 16), qman_freq, rounding);
15290 + token_rate->whole = temp >> 13;
15291 + token_rate->fraction = temp & (((u64)1 << 13) - 1);
15292 + return 0;
15293 +}
15294 +EXPORT_SYMBOL(qman_ceetm_bps2tokenrate);
15295 +
15296 +int qman_ceetm_tokenrate2bps(const struct qm_ceetm_rate *token_rate, u64 *bps,
15297 + int rounding)
15298 +{
15299 + u16 pres;
15300 + u64 temp;
15301 + u64 qman_freq;
15302 + int ret;
15303 +
15304 + /* Read PRES from CEET_CFG_PRES register */
15305 + ret = qman_ceetm_get_prescaler(&pres);
15306 + if (ret)
15307 + return -EINVAL;
15308 +
15309 + ret = qm_get_clock(&qman_freq);
15310 + if (ret)
15311 + return -EINVAL;
15312 +
15313 + /* bytes-per-second = token-rate / update-reference-period
15314 + *
15315 + * where "token-rate" is N/8192 for an integer N, and
15316 + * "update-reference-period" is (2^22)/(PRES*QHz), where PRES is
15317 + * the prescalar value and QHz is the QMan clock frequency. So;
15318 + *
15319 + * bytes-per-second = (N/8192) / (4194304/PRES*QHz)
15320 + * = N*PRES*QHz / (4194304*8192)
15321 + * = N*PRES*QHz / (2^35)
15322 + *
15323 + * Converting to bits-per-second gives;
15324 + *
15325 + * bps = N*PRES*QHZ / (2^32)
15326 + *
15327 + * Note, the numerator has a maximum width of 72 bits! So to
15328 + * avoid 64-bit overflow errors, we calculate PRES*QHZ (maximum
15329 + * width 48 bits) divided by 2^9 (reducing to maximum 39 bits), before
15330 + * multiplying by N (goes to maximum of 63 bits).
15331 + *
15332 + * temp = PRES*QHZ / (2^16)
15333 + * kbps = temp*N / (2^16)
15334 + */
15335 + temp = ROUNDING(qman_freq * pres, (u64)1 << 16 , rounding);
15336 + temp *= ((token_rate->whole << 13) + token_rate->fraction);
15337 + *bps = ROUNDING(temp, (u64)(1) << 16, rounding);
15338 + return 0;
15339 +}
15340 +EXPORT_SYMBOL(qman_ceetm_tokenrate2bps);
15341 +
15342 +int qman_ceetm_sp_claim(struct qm_ceetm_sp **sp, enum qm_dc_portal dcp_idx,
15343 + unsigned int sp_idx)
15344 +{
15345 + struct qm_ceetm_sp *p;
15346 +
15347 + DPA_ASSERT((dcp_idx == qm_dc_portal_fman0) ||
15348 + (dcp_idx == qm_dc_portal_fman1));
15349 +
15350 + if ((sp_idx < qman_ceetms[dcp_idx].sp_range[0]) ||
15351 + (sp_idx >= (qman_ceetms[dcp_idx].sp_range[0] +
15352 + qman_ceetms[dcp_idx].sp_range[1]))) {
15353 + pr_err("Sub-portal index doesn't exist\n");
15354 + return -EINVAL;
15355 + }
15356 +
15357 + list_for_each_entry(p, &qman_ceetms[dcp_idx].sub_portals, node) {
15358 + if ((p->idx == sp_idx) && (p->is_claimed == 0)) {
15359 + p->is_claimed = 1;
15360 + *sp = p;
15361 + return 0;
15362 + }
15363 + }
15364 + pr_err("The sub-portal#%d is not available!\n", sp_idx);
15365 + return -ENODEV;
15366 +}
15367 +EXPORT_SYMBOL(qman_ceetm_sp_claim);
15368 +
15369 +int qman_ceetm_sp_release(struct qm_ceetm_sp *sp)
15370 +{
15371 + struct qm_ceetm_sp *p;
15372 +
15373 + if (sp->lni && sp->lni->is_claimed == 1) {
15374 + pr_err("The dependency of sub-portal has not been released!\n");
15375 + return -EBUSY;
15376 + }
15377 +
15378 + list_for_each_entry(p, &qman_ceetms[sp->dcp_idx].sub_portals, node) {
15379 + if (p->idx == sp->idx) {
15380 + p->is_claimed = 0;
15381 + p->lni = NULL;
15382 + }
15383 + }
15384 + /* Disable CEETM mode of this sub-portal */
15385 + qman_sp_disable_ceetm_mode(sp->dcp_idx, sp->idx);
15386 +
15387 + return 0;
15388 +}
15389 +EXPORT_SYMBOL(qman_ceetm_sp_release);
15390 +
15391 +int qman_ceetm_lni_claim(struct qm_ceetm_lni **lni, enum qm_dc_portal dcp_idx,
15392 + unsigned int lni_idx)
15393 +{
15394 + struct qm_ceetm_lni *p;
15395 +
15396 + if ((lni_idx < qman_ceetms[dcp_idx].lni_range[0]) ||
15397 + (lni_idx >= (qman_ceetms[dcp_idx].lni_range[0] +
15398 + qman_ceetms[dcp_idx].lni_range[1]))) {
15399 + pr_err("The lni index is out of range\n");
15400 + return -EINVAL;
15401 + }
15402 +
15403 + list_for_each_entry(p, &qman_ceetms[dcp_idx].lnis, node) {
15404 + if ((p->idx == lni_idx) && (p->is_claimed == 0)) {
15405 + *lni = p;
15406 + p->is_claimed = 1;
15407 + return 0;
15408 + }
15409 + }
15410 +
15411 + pr_err("The LNI#%d is not available!\n", lni_idx);
15412 + return -EINVAL;
15413 +}
15414 +EXPORT_SYMBOL(qman_ceetm_lni_claim);
15415 +
15416 +int qman_ceetm_lni_release(struct qm_ceetm_lni *lni)
15417 +{
15418 + struct qm_ceetm_lni *p;
15419 + struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
15420 +
15421 + if (!list_empty(&lni->channels)) {
15422 + pr_err("The LNI dependencies are not released!\n");
15423 + return -EBUSY;
15424 + }
15425 +
15426 + list_for_each_entry(p, &qman_ceetms[lni->dcp_idx].lnis, node) {
15427 + if (p->idx == lni->idx) {
15428 + p->shaper_enable = 0;
15429 + p->shaper_couple = 0;
15430 + p->cr_token_rate.whole = 0;
15431 + p->cr_token_rate.fraction = 0;
15432 + p->er_token_rate.whole = 0;
15433 + p->er_token_rate.fraction = 0;
15434 + p->cr_token_bucket_limit = 0;
15435 + p->er_token_bucket_limit = 0;
15436 + p->is_claimed = 0;
15437 + }
15438 + }
15439 + config_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
15440 + config_opts.dcpid = lni->dcp_idx;
15441 + memset(&config_opts.shaper_config, 0,
15442 + sizeof(config_opts.shaper_config));
15443 + return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
15444 +}
15445 +EXPORT_SYMBOL(qman_ceetm_lni_release);
15446 +
15447 +int qman_ceetm_sp_set_lni(struct qm_ceetm_sp *sp, struct qm_ceetm_lni *lni)
15448 +{
15449 + struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
15450 +
15451 + config_opts.cid = cpu_to_be16(CEETM_COMMAND_SP_MAPPING | sp->idx);
15452 + config_opts.dcpid = sp->dcp_idx;
15453 + config_opts.sp_mapping.map_lni_id = lni->idx;
15454 + sp->lni = lni;
15455 +
15456 + if (qman_ceetm_configure_mapping_shaper_tcfc(&config_opts))
15457 + return -EINVAL;
15458 +
15459 + /* Enable CEETM mode for this sub-portal */
15460 + return qman_sp_enable_ceetm_mode(sp->dcp_idx, sp->idx);
15461 +}
15462 +EXPORT_SYMBOL(qman_ceetm_sp_set_lni);
15463 +
15464 +int qman_ceetm_sp_get_lni(struct qm_ceetm_sp *sp, unsigned int *lni_idx)
15465 +{
15466 + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
15467 + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
15468 +
15469 + query_opts.cid = cpu_to_be16(CEETM_COMMAND_SP_MAPPING | sp->idx);
15470 + query_opts.dcpid = sp->dcp_idx;
15471 + if (qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result)) {
15472 + pr_err("Can't get SP <-> LNI mapping\n");
15473 + return -EINVAL;
15474 + }
15475 + *lni_idx = query_result.sp_mapping_query.map_lni_id;
15476 + sp->lni->idx = query_result.sp_mapping_query.map_lni_id;
15477 + return 0;
15478 +}
15479 +EXPORT_SYMBOL(qman_ceetm_sp_get_lni);
15480 +
15481 +int qman_ceetm_lni_enable_shaper(struct qm_ceetm_lni *lni, int coupled,
15482 + int oal)
15483 +{
15484 + struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
15485 +
15486 + if (lni->shaper_enable) {
15487 + pr_err("The shaper has already been enabled\n");
15488 + return -EINVAL;
15489 + }
15490 + lni->shaper_enable = 1;
15491 + lni->shaper_couple = coupled;
15492 + lni->oal = oal;
15493 +
15494 + config_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
15495 + config_opts.dcpid = lni->dcp_idx;
15496 + config_opts.shaper_config.cpl = coupled;
15497 + config_opts.shaper_config.oal = oal;
15498 + config_opts.shaper_config.crtcr = cpu_to_be24((lni->cr_token_rate.whole
15499 + << 13) | lni->cr_token_rate.fraction);
15500 + config_opts.shaper_config.ertcr = cpu_to_be24((lni->er_token_rate.whole
15501 + << 13) | lni->er_token_rate.fraction);
15502 + config_opts.shaper_config.crtbl =
15503 + cpu_to_be16(lni->cr_token_bucket_limit);
15504 + config_opts.shaper_config.ertbl =
15505 + cpu_to_be16(lni->er_token_bucket_limit);
15506 + config_opts.shaper_config.mps = 60;
15507 +
15508 + return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
15509 +}
15510 +EXPORT_SYMBOL(qman_ceetm_lni_enable_shaper);
15511 +
15512 +int qman_ceetm_lni_disable_shaper(struct qm_ceetm_lni *lni)
15513 +{
15514 + struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
15515 +
15516 + if (!lni->shaper_enable) {
15517 + pr_err("The shaper has been disabled\n");
15518 + return -EINVAL;
15519 + }
15520 +
15521 + config_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
15522 + config_opts.dcpid = lni->dcp_idx;
15523 + config_opts.shaper_config.cpl = lni->shaper_couple;
15524 + config_opts.shaper_config.oal = lni->oal;
15525 + config_opts.shaper_config.crtbl =
15526 + cpu_to_be16(lni->cr_token_bucket_limit);
15527 + config_opts.shaper_config.ertbl =
15528 + cpu_to_be16(lni->er_token_bucket_limit);
15529 + /* Set CR/ER rate with all 1's to configure an infinite rate, thus
15530 + * disable the shaping.
15531 + */
15532 + config_opts.shaper_config.crtcr = 0xFFFFFF;
15533 + config_opts.shaper_config.ertcr = 0xFFFFFF;
15534 + config_opts.shaper_config.mps = 60;
15535 + lni->shaper_enable = 0;
15536 + return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
15537 +}
15538 +EXPORT_SYMBOL(qman_ceetm_lni_disable_shaper);
15539 +
15540 +int qman_ceetm_lni_is_shaper_enabled(struct qm_ceetm_lni *lni)
15541 +{
15542 + return lni->shaper_enable;
15543 +}
15544 +EXPORT_SYMBOL(qman_ceetm_lni_is_shaper_enabled);
15545 +
15546 +int qman_ceetm_lni_set_commit_rate(struct qm_ceetm_lni *lni,
15547 + const struct qm_ceetm_rate *token_rate,
15548 + u16 token_limit)
15549 +{
15550 + struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
15551 + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
15552 + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
15553 + int ret;
15554 +
15555 + lni->cr_token_rate.whole = token_rate->whole;
15556 + lni->cr_token_rate.fraction = token_rate->fraction;
15557 + lni->cr_token_bucket_limit = token_limit;
15558 + if (!lni->shaper_enable)
15559 + return 0;
15560 + query_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
15561 + query_opts.dcpid = lni->dcp_idx;
15562 + ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts,
15563 + &query_result);
15564 + if (ret) {
15565 + pr_err("Fail to get current LNI shaper setting\n");
15566 + return -EINVAL;
15567 + }
15568 +
15569 + config_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
15570 + config_opts.dcpid = lni->dcp_idx;
15571 + config_opts.shaper_config.crtcr = cpu_to_be24((token_rate->whole << 13)
15572 + | (token_rate->fraction));
15573 + config_opts.shaper_config.crtbl = cpu_to_be16(token_limit);
15574 + config_opts.shaper_config.cpl = query_result.shaper_query.cpl;
15575 + config_opts.shaper_config.oal = query_result.shaper_query.oal;
15576 + config_opts.shaper_config.ertcr = query_result.shaper_query.ertcr;
15577 + config_opts.shaper_config.ertbl = query_result.shaper_query.ertbl;
15578 + config_opts.shaper_config.mps = query_result.shaper_query.mps;
15579 + return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
15580 +}
15581 +EXPORT_SYMBOL(qman_ceetm_lni_set_commit_rate);
15582 +
15583 +int qman_ceetm_lni_set_commit_rate_bps(struct qm_ceetm_lni *lni,
15584 + u64 bps,
15585 + u16 token_limit)
15586 +{
15587 + struct qm_ceetm_rate token_rate;
15588 + int ret;
15589 +
15590 + ret = qman_ceetm_bps2tokenrate(bps, &token_rate, 0);
15591 + if (ret) {
15592 + pr_err("Can not convert bps to token rate\n");
15593 + return -EINVAL;
15594 + }
15595 +
15596 + return qman_ceetm_lni_set_commit_rate(lni, &token_rate, token_limit);
15597 +}
15598 +EXPORT_SYMBOL(qman_ceetm_lni_set_commit_rate_bps);
15599 +
15600 +int qman_ceetm_lni_get_commit_rate(struct qm_ceetm_lni *lni,
15601 + struct qm_ceetm_rate *token_rate,
15602 + u16 *token_limit)
15603 +{
15604 + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
15605 + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
15606 + int ret;
15607 +
15608 + query_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
15609 + query_opts.dcpid = lni->dcp_idx;
15610 +
15611 + ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result);
15612 + if (ret) {
15613 + pr_err("The LNI CR rate or limit is not set\n");
15614 + return -EINVAL;
15615 + }
15616 + token_rate->whole = be24_to_cpu(query_result.shaper_query.crtcr) >> 13;
15617 + token_rate->fraction = be24_to_cpu(query_result.shaper_query.crtcr) &
15618 + 0x1FFF;
15619 + *token_limit = be16_to_cpu(query_result.shaper_query.crtbl);
15620 + return 0;
15621 +}
15622 +EXPORT_SYMBOL(qman_ceetm_lni_get_commit_rate);
15623 +
15624 +int qman_ceetm_lni_get_commit_rate_bps(struct qm_ceetm_lni *lni,
15625 + u64 *bps, u16 *token_limit)
15626 +{
15627 + struct qm_ceetm_rate token_rate;
15628 + int ret;
15629 +
15630 + ret = qman_ceetm_lni_get_commit_rate(lni, &token_rate, token_limit);
15631 + if (ret) {
15632 + pr_err("The LNI CR rate or limit is not available\n");
15633 + return -EINVAL;
15634 + }
15635 +
15636 + return qman_ceetm_tokenrate2bps(&token_rate, bps, 0);
15637 +}
15638 +EXPORT_SYMBOL(qman_ceetm_lni_get_commit_rate_bps);
15639 +
15640 +int qman_ceetm_lni_set_excess_rate(struct qm_ceetm_lni *lni,
15641 + const struct qm_ceetm_rate *token_rate,
15642 + u16 token_limit)
15643 +{
15644 + struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
15645 + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
15646 + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
15647 + int ret;
15648 +
15649 + lni->er_token_rate.whole = token_rate->whole;
15650 + lni->er_token_rate.fraction = token_rate->fraction;
15651 + lni->er_token_bucket_limit = token_limit;
15652 + if (!lni->shaper_enable)
15653 + return 0;
15654 +
15655 + query_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
15656 + query_opts.dcpid = lni->dcp_idx;
15657 + ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts,
15658 + &query_result);
15659 + if (ret) {
15660 + pr_err("Fail to get current LNI shaper setting\n");
15661 + return -EINVAL;
15662 + }
15663 +
15664 + config_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
15665 + config_opts.dcpid = lni->dcp_idx;
15666 + config_opts.shaper_config.ertcr = cpu_to_be24(
15667 + (token_rate->whole << 13) | (token_rate->fraction));
15668 + config_opts.shaper_config.ertbl = cpu_to_be16(token_limit);
15669 + config_opts.shaper_config.cpl = query_result.shaper_query.cpl;
15670 + config_opts.shaper_config.oal = query_result.shaper_query.oal;
15671 + config_opts.shaper_config.crtcr = query_result.shaper_query.crtcr;
15672 + config_opts.shaper_config.crtbl = query_result.shaper_query.crtbl;
15673 + config_opts.shaper_config.mps = query_result.shaper_query.mps;
15674 + return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
15675 +}
15676 +EXPORT_SYMBOL(qman_ceetm_lni_set_excess_rate);
15677 +
15678 +int qman_ceetm_lni_set_excess_rate_bps(struct qm_ceetm_lni *lni,
15679 + u64 bps,
15680 + u16 token_limit)
15681 +{
15682 + struct qm_ceetm_rate token_rate;
15683 + int ret;
15684 +
15685 + ret = qman_ceetm_bps2tokenrate(bps, &token_rate, 0);
15686 + if (ret) {
15687 + pr_err("Can not convert bps to token rate\n");
15688 + return -EINVAL;
15689 + }
15690 + return qman_ceetm_lni_set_excess_rate(lni, &token_rate, token_limit);
15691 +}
15692 +EXPORT_SYMBOL(qman_ceetm_lni_set_excess_rate_bps);
15693 +
15694 +int qman_ceetm_lni_get_excess_rate(struct qm_ceetm_lni *lni,
15695 + struct qm_ceetm_rate *token_rate,
15696 + u16 *token_limit)
15697 +{
15698 + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
15699 + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
15700 + int ret;
15701 +
15702 + query_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
15703 + query_opts.dcpid = lni->dcp_idx;
15704 + ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result);
15705 + if (ret) {
15706 + pr_err("The LNI ER rate or limit is not set\n");
15707 + return -EINVAL;
15708 + }
15709 + token_rate->whole = be24_to_cpu(query_result.shaper_query.ertcr) >> 13;
15710 + token_rate->fraction = be24_to_cpu(query_result.shaper_query.ertcr) &
15711 + 0x1FFF;
15712 + *token_limit = be16_to_cpu(query_result.shaper_query.ertbl);
15713 + return 0;
15714 +}
15715 +EXPORT_SYMBOL(qman_ceetm_lni_get_excess_rate);
15716 +
15717 +int qman_ceetm_lni_get_excess_rate_bps(struct qm_ceetm_lni *lni,
15718 + u64 *bps, u16 *token_limit)
15719 +{
15720 + struct qm_ceetm_rate token_rate;
15721 + int ret;
15722 +
15723 + ret = qman_ceetm_lni_get_excess_rate(lni, &token_rate, token_limit);
15724 + if (ret) {
15725 + pr_err("The LNI ER rate or limit is not available\n");
15726 + return -EINVAL;
15727 + }
15728 +
15729 + return qman_ceetm_tokenrate2bps(&token_rate, bps, 0);
15730 +}
15731 +EXPORT_SYMBOL(qman_ceetm_lni_get_excess_rate_bps);
15732 +
15733 +#define QMAN_CEETM_LNITCFCC_CQ_LEVEL_SHIFT(n) ((15 - n) * 4)
15734 +#define QMAN_CEETM_LNITCFCC_ENABLE 0x8
15735 +int qman_ceetm_lni_set_tcfcc(struct qm_ceetm_lni *lni,
15736 + unsigned int cq_level,
15737 + int traffic_class)
15738 +{
15739 + struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
15740 + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
15741 + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
15742 + u64 lnitcfcc;
15743 +
15744 + if ((cq_level > 15) | (traffic_class > 7)) {
15745 + pr_err("The CQ or traffic class id is out of range\n");
15746 + return -EINVAL;
15747 + }
15748 +
15749 + query_opts.cid = cpu_to_be16(CEETM_COMMAND_TCFC | lni->idx);
15750 + query_opts.dcpid = lni->dcp_idx;
15751 + if (qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result)) {
15752 + pr_err("Fail to query tcfcc\n");
15753 + return -EINVAL;
15754 + }
15755 +
15756 + lnitcfcc = be64_to_cpu(query_result.tcfc_query.lnitcfcc);
15757 + if (traffic_class == -1) {
15758 + /* disable tcfc for this CQ */
15759 + lnitcfcc &= ~((u64)QMAN_CEETM_LNITCFCC_ENABLE <<
15760 + QMAN_CEETM_LNITCFCC_CQ_LEVEL_SHIFT(cq_level));
15761 + } else {
15762 + lnitcfcc &= ~((u64)0xF <<
15763 + QMAN_CEETM_LNITCFCC_CQ_LEVEL_SHIFT(cq_level));
15764 + lnitcfcc |= ((u64)(QMAN_CEETM_LNITCFCC_ENABLE |
15765 + traffic_class)) <<
15766 + QMAN_CEETM_LNITCFCC_CQ_LEVEL_SHIFT(cq_level);
15767 + }
15768 + config_opts.tcfc_config.lnitcfcc = cpu_to_be64(lnitcfcc);
15769 + config_opts.cid = cpu_to_be16(CEETM_COMMAND_TCFC | lni->idx);
15770 + config_opts.dcpid = lni->dcp_idx;
15771 + return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
15772 +}
15773 +EXPORT_SYMBOL(qman_ceetm_lni_set_tcfcc);
15774 +
15775 +#define QMAN_CEETM_LNITCFCC_TC_MASK 0x7
15776 +int qman_ceetm_lni_get_tcfcc(struct qm_ceetm_lni *lni, unsigned int cq_level,
15777 + int *traffic_class)
15778 +{
15779 + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
15780 + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
15781 + int ret;
15782 + u8 lnitcfcc;
15783 +
15784 + if (cq_level > 15) {
15785 + pr_err("the CQ level is out of range\n");
15786 + return -EINVAL;
15787 + }
15788 +
15789 + query_opts.cid = cpu_to_be16(CEETM_COMMAND_TCFC | lni->idx);
15790 + query_opts.dcpid = lni->dcp_idx;
15791 + ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result);
15792 + if (ret)
15793 + return ret;
15794 + lnitcfcc = (u8)be64_to_cpu((query_result.tcfc_query.lnitcfcc) >>
15795 + QMAN_CEETM_LNITCFCC_CQ_LEVEL_SHIFT(cq_level));
15796 + if (lnitcfcc & QMAN_CEETM_LNITCFCC_ENABLE)
15797 + *traffic_class = lnitcfcc & QMAN_CEETM_LNITCFCC_TC_MASK;
15798 + else
15799 + *traffic_class = -1;
15800 + return 0;
15801 +}
15802 +EXPORT_SYMBOL(qman_ceetm_lni_get_tcfcc);
15803 +
15804 +int qman_ceetm_channel_claim(struct qm_ceetm_channel **channel,
15805 + struct qm_ceetm_lni *lni)
15806 +{
15807 + struct qm_ceetm_channel *p;
15808 + u32 channel_idx;
15809 + int ret = 0;
15810 + struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
15811 +
15812 + if (lni->dcp_idx == qm_dc_portal_fman0) {
15813 + ret = qman_alloc_ceetm0_channel(&channel_idx);
15814 + } else if (lni->dcp_idx == qm_dc_portal_fman1) {
15815 + ret = qman_alloc_ceetm1_channel(&channel_idx);
15816 + } else {
15817 + pr_err("dcp_idx %u does not correspond to a known fman in this driver\n",
15818 + lni->dcp_idx);
15819 + return -EINVAL;
15820 + }
15821 +
15822 + if (ret) {
15823 + pr_err("The is no channel available for LNI#%d\n", lni->idx);
15824 + return -ENODEV;
15825 + }
15826 +
15827 + p = kzalloc(sizeof(*p), GFP_KERNEL);
15828 + if (!p)
15829 + return -ENOMEM;
15830 + p->idx = channel_idx;
15831 + p->dcp_idx = lni->dcp_idx;
15832 + p->lni_idx = lni->idx;
15833 + list_add_tail(&p->node, &lni->channels);
15834 + INIT_LIST_HEAD(&p->class_queues);
15835 + INIT_LIST_HEAD(&p->ccgs);
15836 + config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_MAPPING |
15837 + channel_idx);
15838 + config_opts.dcpid = lni->dcp_idx;
15839 + config_opts.channel_mapping.map_lni_id = lni->idx;
15840 + config_opts.channel_mapping.map_shaped = 0;
15841 + if (qman_ceetm_configure_mapping_shaper_tcfc(&config_opts)) {
15842 + pr_err("Can't map channel#%d for LNI#%d\n",
15843 + channel_idx, lni->idx);
15844 + return -EINVAL;
15845 + }
15846 + *channel = p;
15847 + return 0;
15848 +}
15849 +EXPORT_SYMBOL(qman_ceetm_channel_claim);
15850 +
15851 +int qman_ceetm_channel_release(struct qm_ceetm_channel *channel)
15852 +{
15853 + struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
15854 + if (!list_empty(&channel->class_queues)) {
15855 + pr_err("CEETM channel#%d has class queue unreleased!\n",
15856 + channel->idx);
15857 + return -EBUSY;
15858 + }
15859 + if (!list_empty(&channel->ccgs)) {
15860 + pr_err("CEETM channel#%d has ccg unreleased!\n",
15861 + channel->idx);
15862 + return -EBUSY;
15863 + }
15864 +
15865 + /* channel->dcp_idx corresponds to known fman validation */
15866 + if ((channel->dcp_idx != qm_dc_portal_fman0) &&
15867 + (channel->dcp_idx != qm_dc_portal_fman1)) {
15868 + pr_err("dcp_idx %u does not correspond to a known fman in this driver\n",
15869 + channel->dcp_idx);
15870 + return -EINVAL;
15871 + }
15872 +
15873 + config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
15874 + channel->idx);
15875 + config_opts.dcpid = channel->dcp_idx;
15876 + memset(&config_opts.shaper_config, 0,
15877 + sizeof(config_opts.shaper_config));
15878 + if (qman_ceetm_configure_mapping_shaper_tcfc(&config_opts)) {
15879 + pr_err("Can't reset channel shapping parameters\n");
15880 + return -EINVAL;
15881 + }
15882 +
15883 + if (channel->dcp_idx == qm_dc_portal_fman0) {
15884 + qman_release_ceetm0_channelid(channel->idx);
15885 + } else if (channel->dcp_idx == qm_dc_portal_fman1) {
15886 + qman_release_ceetm1_channelid(channel->idx);
15887 + } else {
15888 + pr_err("dcp_idx %u does not correspond to a known fman in this driver\n",
15889 + channel->dcp_idx);
15890 + return -EINVAL;
15891 + }
15892 + list_del(&channel->node);
15893 + kfree(channel);
15894 +
15895 + return 0;
15896 +}
15897 +EXPORT_SYMBOL(qman_ceetm_channel_release);
15898 +
15899 +int qman_ceetm_channel_enable_shaper(struct qm_ceetm_channel *channel,
15900 + int coupled)
15901 +{
15902 + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
15903 + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
15904 + struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
15905 +
15906 + if (channel->shaper_enable == 1) {
15907 + pr_err("This channel shaper has been enabled!\n");
15908 + return -EINVAL;
15909 + }
15910 +
15911 + channel->shaper_enable = 1;
15912 + channel->shaper_couple = coupled;
15913 +
15914 + query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_MAPPING |
15915 + channel->idx);
15916 + query_opts.dcpid = channel->dcp_idx;
15917 +
15918 + if (qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result)) {
15919 + pr_err("Can't query channel mapping\n");
15920 + return -EINVAL;
15921 + }
15922 +
15923 + config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_MAPPING |
15924 + channel->idx);
15925 + config_opts.dcpid = channel->dcp_idx;
15926 + config_opts.channel_mapping.map_lni_id =
15927 + query_result.channel_mapping_query.map_lni_id;
15928 + config_opts.channel_mapping.map_shaped = 1;
15929 + if (qman_ceetm_configure_mapping_shaper_tcfc(&config_opts)) {
15930 + pr_err("Can't enable shaper for channel #%d\n", channel->idx);
15931 + return -EINVAL;
15932 + }
15933 +
15934 + config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
15935 + channel->idx);
15936 + config_opts.shaper_config.cpl = coupled;
15937 + config_opts.shaper_config.crtcr =
15938 + cpu_to_be24((channel->cr_token_rate.whole
15939 + << 13) |
15940 + channel->cr_token_rate.fraction);
15941 + config_opts.shaper_config.ertcr =
15942 + cpu_to_be24(channel->er_token_rate.whole
15943 + << 13 |
15944 + channel->er_token_rate.fraction);
15945 + config_opts.shaper_config.crtbl =
15946 + cpu_to_be16(channel->cr_token_bucket_limit);
15947 + config_opts.shaper_config.ertbl =
15948 + cpu_to_be16(channel->er_token_bucket_limit);
15949 +
15950 + return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
15951 +}
15952 +EXPORT_SYMBOL(qman_ceetm_channel_enable_shaper);
15953 +
15954 +int qman_ceetm_channel_disable_shaper(struct qm_ceetm_channel *channel)
15955 +{
15956 + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
15957 + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
15958 + struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
15959 +
15960 +
15961 + query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_MAPPING |
15962 + channel->idx);
15963 + query_opts.dcpid = channel->dcp_idx;
15964 +
15965 + if (qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result)) {
15966 + pr_err("Can't query channel mapping\n");
15967 + return -EINVAL;
15968 + }
15969 +
15970 + config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_MAPPING |
15971 + channel->idx);
15972 + config_opts.dcpid = channel->dcp_idx;
15973 + config_opts.channel_mapping.map_shaped = 0;
15974 + config_opts.channel_mapping.map_lni_id =
15975 + query_result.channel_mapping_query.map_lni_id;
15976 +
15977 + return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
15978 +}
15979 +EXPORT_SYMBOL(qman_ceetm_channel_disable_shaper);
15980 +
15981 +int qman_ceetm_channel_is_shaper_enabled(struct qm_ceetm_channel *channel)
15982 +{
15983 + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
15984 + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
15985 +
15986 + query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_MAPPING |
15987 + channel->idx);
15988 + query_opts.dcpid = channel->dcp_idx;
15989 +
15990 + if (qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result)) {
15991 + pr_err("Can't query channel mapping\n");
15992 + return -EINVAL;
15993 + }
15994 +
15995 + return query_result.channel_mapping_query.map_shaped;
15996 +}
15997 +EXPORT_SYMBOL(qman_ceetm_channel_is_shaper_enabled);
15998 +
15999 +int qman_ceetm_channel_set_commit_rate(struct qm_ceetm_channel *channel,
16000 + const struct qm_ceetm_rate *token_rate,
16001 + u16 token_limit)
16002 +{
16003 + struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
16004 + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
16005 + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
16006 + int ret;
16007 +
16008 + query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
16009 + channel->idx);
16010 + query_opts.dcpid = channel->dcp_idx;
16011 +
16012 + ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result);
16013 + if (ret) {
16014 + pr_err("Fail to get the current channel shaper setting\n");
16015 + return -EINVAL;
16016 + }
16017 +
16018 + channel->cr_token_rate.whole = token_rate->whole;
16019 + channel->cr_token_rate.fraction = token_rate->fraction;
16020 + channel->cr_token_bucket_limit = token_limit;
16021 + config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
16022 + channel->idx);
16023 + config_opts.dcpid = channel->dcp_idx;
16024 + config_opts.shaper_config.crtcr = cpu_to_be24((token_rate->whole
16025 + << 13) | (token_rate->fraction));
16026 + config_opts.shaper_config.crtbl = cpu_to_be16(token_limit);
16027 + config_opts.shaper_config.cpl = query_result.shaper_query.cpl;
16028 + config_opts.shaper_config.ertcr = query_result.shaper_query.ertcr;
16029 + config_opts.shaper_config.ertbl = query_result.shaper_query.ertbl;
16030 + return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
16031 +}
16032 +EXPORT_SYMBOL(qman_ceetm_channel_set_commit_rate);
16033 +
16034 +int qman_ceetm_channel_set_commit_rate_bps(struct qm_ceetm_channel *channel,
16035 + u64 bps, u16 token_limit)
16036 +{
16037 + struct qm_ceetm_rate token_rate;
16038 + int ret;
16039 +
16040 + ret = qman_ceetm_bps2tokenrate(bps, &token_rate, 0);
16041 + if (ret) {
16042 + pr_err("Can not convert bps to token rate\n");
16043 + return -EINVAL;
16044 + }
16045 + return qman_ceetm_channel_set_commit_rate(channel, &token_rate,
16046 + token_limit);
16047 +}
16048 +EXPORT_SYMBOL(qman_ceetm_channel_set_commit_rate_bps);
16049 +
16050 +int qman_ceetm_channel_get_commit_rate(struct qm_ceetm_channel *channel,
16051 + struct qm_ceetm_rate *token_rate,
16052 + u16 *token_limit)
16053 +{
16054 + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
16055 + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
16056 + int ret;
16057 +
16058 + query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
16059 + channel->idx);
16060 + query_opts.dcpid = channel->dcp_idx;
16061 +
16062 + ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result);
16063 + if (ret | !query_result.shaper_query.crtcr |
16064 + !query_result.shaper_query.crtbl) {
16065 + pr_err("The channel commit rate or limit is not set\n");
16066 + return -EINVAL;
16067 + }
16068 + token_rate->whole = be24_to_cpu(query_result.shaper_query.crtcr) >> 13;
16069 + token_rate->fraction = be24_to_cpu(query_result.shaper_query.crtcr) &
16070 + 0x1FFF;
16071 + *token_limit = be16_to_cpu(query_result.shaper_query.crtbl);
16072 + return 0;
16073 +}
16074 +EXPORT_SYMBOL(qman_ceetm_channel_get_commit_rate);
16075 +
16076 +int qman_ceetm_channel_get_commit_rate_bps(struct qm_ceetm_channel *channel,
16077 + u64 *bps, u16 *token_limit)
16078 +{
16079 + struct qm_ceetm_rate token_rate;
16080 + int ret;
16081 +
16082 + ret = qman_ceetm_channel_get_commit_rate(channel, &token_rate,
16083 + token_limit);
16084 + if (ret) {
16085 + pr_err("The channel CR rate or limit is not available\n");
16086 + return -EINVAL;
16087 + }
16088 +
16089 + return qman_ceetm_tokenrate2bps(&token_rate, bps, 0);
16090 +}
16091 +EXPORT_SYMBOL(qman_ceetm_channel_get_commit_rate_bps);
16092 +
16093 +int qman_ceetm_channel_set_excess_rate(struct qm_ceetm_channel *channel,
16094 + const struct qm_ceetm_rate *token_rate,
16095 + u16 token_limit)
16096 +{
16097 + struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
16098 + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
16099 + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
16100 + int ret;
16101 +
16102 + query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
16103 + channel->idx);
16104 + query_opts.dcpid = channel->dcp_idx;
16105 + ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result);
16106 + if (ret) {
16107 + pr_err("Fail to get the current channel shaper setting\n");
16108 + return -EINVAL;
16109 + }
16110 +
16111 + channel->er_token_rate.whole = token_rate->whole;
16112 + channel->er_token_rate.fraction = token_rate->fraction;
16113 + channel->er_token_bucket_limit = token_limit;
16114 + config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
16115 + channel->idx);
16116 + config_opts.dcpid = channel->dcp_idx;
16117 + config_opts.shaper_config.ertcr = cpu_to_be24(
16118 + (token_rate->whole << 13) | (token_rate->fraction));
16119 + config_opts.shaper_config.ertbl = cpu_to_be16(token_limit);
16120 + config_opts.shaper_config.cpl = query_result.shaper_query.cpl;
16121 + config_opts.shaper_config.crtcr = query_result.shaper_query.crtcr;
16122 + config_opts.shaper_config.crtbl = query_result.shaper_query.crtbl;
16123 + return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
16124 +}
16125 +EXPORT_SYMBOL(qman_ceetm_channel_set_excess_rate);
16126 +
16127 +int qman_ceetm_channel_set_excess_rate_bps(struct qm_ceetm_channel *channel,
16128 + u64 bps, u16 token_limit)
16129 +{
16130 + struct qm_ceetm_rate token_rate;
16131 + int ret;
16132 +
16133 + ret = qman_ceetm_bps2tokenrate(bps, &token_rate, 0);
16134 + if (ret) {
16135 + pr_err("Can not convert bps to token rate\n");
16136 + return -EINVAL;
16137 + }
16138 + return qman_ceetm_channel_set_excess_rate(channel, &token_rate,
16139 + token_limit);
16140 +}
16141 +EXPORT_SYMBOL(qman_ceetm_channel_set_excess_rate_bps);
16142 +
16143 +int qman_ceetm_channel_get_excess_rate(struct qm_ceetm_channel *channel,
16144 + struct qm_ceetm_rate *token_rate,
16145 + u16 *token_limit)
16146 +{
16147 + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
16148 + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
16149 + int ret;
16150 +
16151 + query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
16152 + channel->idx);
16153 + query_opts.dcpid = channel->dcp_idx;
16154 + ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result);
16155 + if (ret | !query_result.shaper_query.ertcr |
16156 + !query_result.shaper_query.ertbl) {
16157 + pr_err("The channel excess rate or limit is not set\n");
16158 + return -EINVAL;
16159 + }
16160 + token_rate->whole = be24_to_cpu(query_result.shaper_query.ertcr) >> 13;
16161 + token_rate->fraction = be24_to_cpu(query_result.shaper_query.ertcr) &
16162 + 0x1FFF;
16163 + *token_limit = be16_to_cpu(query_result.shaper_query.ertbl);
16164 + return 0;
16165 +}
16166 +EXPORT_SYMBOL(qman_ceetm_channel_get_excess_rate);
16167 +
16168 +int qman_ceetm_channel_get_excess_rate_bps(struct qm_ceetm_channel *channel,
16169 + u64 *bps, u16 *token_limit)
16170 +{
16171 + struct qm_ceetm_rate token_rate;
16172 + int ret;
16173 +
16174 + ret = qman_ceetm_channel_get_excess_rate(channel, &token_rate,
16175 + token_limit);
16176 + if (ret) {
16177 + pr_err("The channel ER rate or limit is not available\n");
16178 + return -EINVAL;
16179 + }
16180 +
16181 + return qman_ceetm_tokenrate2bps(&token_rate, bps, 0);
16182 +}
16183 +EXPORT_SYMBOL(qman_ceetm_channel_get_excess_rate_bps);
16184 +
16185 +int qman_ceetm_channel_set_weight(struct qm_ceetm_channel *channel,
16186 + u16 token_limit)
16187 +{
16188 + struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
16189 +
16190 + if (channel->shaper_enable) {
16191 + pr_err("This channel is a shaped one\n");
16192 + return -EINVAL;
16193 + }
16194 +
16195 + channel->cr_token_bucket_limit = token_limit;
16196 + config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
16197 + channel->idx);
16198 + config_opts.dcpid = channel->dcp_idx;
16199 + config_opts.shaper_config.crtbl = cpu_to_be16(token_limit);
16200 + return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
16201 +}
16202 +EXPORT_SYMBOL(qman_ceetm_channel_set_weight);
16203 +
16204 +int qman_ceetm_channel_get_weight(struct qm_ceetm_channel *channel,
16205 + u16 *token_limit)
16206 +{
16207 + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
16208 + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
16209 + int ret;
16210 +
16211 + query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
16212 + channel->idx);
16213 + query_opts.dcpid = channel->dcp_idx;
16214 + ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result);
16215 + if (ret | !query_result.shaper_query.crtbl) {
16216 + pr_err("This unshaped channel's uFQ wight is unavailable\n");
16217 + return -EINVAL;
16218 + }
16219 + *token_limit = be16_to_cpu(query_result.shaper_query.crtbl);
16220 + return 0;
16221 +}
16222 +EXPORT_SYMBOL(qman_ceetm_channel_get_weight);
16223 +
16224 +int qman_ceetm_channel_set_group(struct qm_ceetm_channel *channel, int group_b,
16225 + unsigned int prio_a, unsigned int prio_b)
16226 +{
16227 + struct qm_mcc_ceetm_class_scheduler_config config_opts;
16228 + struct qm_mcr_ceetm_class_scheduler_query query_result;
16229 + int i;
16230 +
16231 + if (prio_a > 7) {
16232 + pr_err("The priority of group A is out of range\n");
16233 + return -EINVAL;
16234 + }
16235 + if (group_b && (prio_b > 7)) {
16236 + pr_err("The priority of group B is out of range\n");
16237 + return -EINVAL;
16238 + }
16239 +
16240 + if (qman_ceetm_query_class_scheduler(channel, &query_result)) {
16241 + pr_err("Can't query channel#%d's scheduler!\n", channel->idx);
16242 + return -EINVAL;
16243 + }
16244 +
16245 + config_opts.cqcid = cpu_to_be16(channel->idx);
16246 + config_opts.dcpid = channel->dcp_idx;
16247 + config_opts.gpc_combine_flag = !group_b;
16248 + config_opts.gpc_prio_a = prio_a;
16249 + config_opts.gpc_prio_b = prio_b;
16250 +
16251 + for (i = 0; i < 8; i++)
16252 + config_opts.w[i] = query_result.w[i];
16253 + config_opts.crem = query_result.crem;
16254 + config_opts.erem = query_result.erem;
16255 +
16256 + return qman_ceetm_configure_class_scheduler(&config_opts);
16257 +}
16258 +EXPORT_SYMBOL(qman_ceetm_channel_set_group);
16259 +
16260 +int qman_ceetm_channel_get_group(struct qm_ceetm_channel *channel, int *group_b,
16261 + unsigned int *prio_a, unsigned int *prio_b)
16262 +{
16263 + struct qm_mcr_ceetm_class_scheduler_query query_result;
16264 +
16265 + if (qman_ceetm_query_class_scheduler(channel, &query_result)) {
16266 + pr_err("Can't query channel#%d's scheduler!\n", channel->idx);
16267 + return -EINVAL;
16268 + }
16269 + *group_b = !query_result.gpc_combine_flag;
16270 + *prio_a = query_result.gpc_prio_a;
16271 + *prio_b = query_result.gpc_prio_b;
16272 +
16273 + return 0;
16274 +}
16275 +EXPORT_SYMBOL(qman_ceetm_channel_get_group);
16276 +
16277 +#define GROUP_A_ELIGIBILITY_SET (1 << 8)
16278 +#define GROUP_B_ELIGIBILITY_SET (1 << 9)
16279 +#define CQ_ELIGIBILITY_SET(n) (1 << (7 - n))
16280 +int qman_ceetm_channel_set_group_cr_eligibility(struct qm_ceetm_channel
16281 + *channel, int group_b, int cre)
16282 +{
16283 + struct qm_mcc_ceetm_class_scheduler_config csch_config;
16284 + struct qm_mcr_ceetm_class_scheduler_query csch_query;
16285 + int i;
16286 +
16287 + if (qman_ceetm_query_class_scheduler(channel, &csch_query)) {
16288 + pr_err("Cannot get the channel %d scheduler setting.\n",
16289 + channel->idx);
16290 + return -EINVAL;
16291 + }
16292 + csch_config.cqcid = cpu_to_be16(channel->idx);
16293 + csch_config.dcpid = channel->dcp_idx;
16294 + csch_config.gpc_combine_flag = csch_query.gpc_combine_flag;
16295 + csch_config.gpc_prio_a = csch_query.gpc_prio_a;
16296 + csch_config.gpc_prio_b = csch_query.gpc_prio_b;
16297 +
16298 + for (i = 0; i < 8; i++)
16299 + csch_config.w[i] = csch_query.w[i];
16300 + csch_config.erem = csch_query.erem;
16301 + if (group_b)
16302 + csch_config.crem = (be16_to_cpu(csch_query.crem)
16303 + & ~GROUP_B_ELIGIBILITY_SET)
16304 + | (cre ? GROUP_B_ELIGIBILITY_SET : 0);
16305 + else
16306 + csch_config.crem = (be16_to_cpu(csch_query.crem)
16307 + & ~GROUP_A_ELIGIBILITY_SET)
16308 + | (cre ? GROUP_A_ELIGIBILITY_SET : 0);
16309 +
16310 + csch_config.crem = cpu_to_be16(csch_config.crem);
16311 +
16312 + if (qman_ceetm_configure_class_scheduler(&csch_config)) {
16313 + pr_err("Cannot config channel %d's scheduler with "
16314 + "group_%c's cr eligibility\n", channel->idx,
16315 + group_b ? 'b' : 'a');
16316 + return -EINVAL;
16317 + }
16318 +
16319 + return 0;
16320 +}
16321 +EXPORT_SYMBOL(qman_ceetm_channel_set_group_cr_eligibility);
16322 +
16323 +int qman_ceetm_channel_set_group_er_eligibility(struct qm_ceetm_channel
16324 + *channel, int group_b, int ere)
16325 +{
16326 + struct qm_mcc_ceetm_class_scheduler_config csch_config;
16327 + struct qm_mcr_ceetm_class_scheduler_query csch_query;
16328 + int i;
16329 +
16330 + if (qman_ceetm_query_class_scheduler(channel, &csch_query)) {
16331 + pr_err("Cannot get the channel %d scheduler setting.\n",
16332 + channel->idx);
16333 + return -EINVAL;
16334 + }
16335 + csch_config.cqcid = cpu_to_be16(channel->idx);
16336 + csch_config.dcpid = channel->dcp_idx;
16337 + csch_config.gpc_combine_flag = csch_query.gpc_combine_flag;
16338 + csch_config.gpc_prio_a = csch_query.gpc_prio_a;
16339 + csch_config.gpc_prio_b = csch_query.gpc_prio_b;
16340 +
16341 + for (i = 0; i < 8; i++)
16342 + csch_config.w[i] = csch_query.w[i];
16343 + csch_config.crem = csch_query.crem;
16344 + if (group_b)
16345 + csch_config.erem = (be16_to_cpu(csch_query.erem)
16346 + & ~GROUP_B_ELIGIBILITY_SET)
16347 + | (ere ? GROUP_B_ELIGIBILITY_SET : 0);
16348 + else
16349 + csch_config.erem = (be16_to_cpu(csch_query.erem)
16350 + & ~GROUP_A_ELIGIBILITY_SET)
16351 + | (ere ? GROUP_A_ELIGIBILITY_SET : 0);
16352 +
16353 + csch_config.erem = cpu_to_be16(csch_config.erem);
16354 +
16355 + if (qman_ceetm_configure_class_scheduler(&csch_config)) {
16356 + pr_err("Cannot config channel %d's scheduler with "
16357 + "group_%c's er eligibility\n", channel->idx,
16358 + group_b ? 'b' : 'a');
16359 + return -EINVAL;
16360 + }
16361 +
16362 + return 0;
16363 +}
16364 +EXPORT_SYMBOL(qman_ceetm_channel_set_group_er_eligibility);
16365 +
16366 +int qman_ceetm_channel_set_cq_cr_eligibility(struct qm_ceetm_channel *channel,
16367 + unsigned int idx, int cre)
16368 +{
16369 + struct qm_mcc_ceetm_class_scheduler_config csch_config;
16370 + struct qm_mcr_ceetm_class_scheduler_query csch_query;
16371 + int i;
16372 +
16373 + if (idx > 7) {
16374 + pr_err("CQ index is out of range\n");
16375 + return -EINVAL;
16376 + }
16377 + if (qman_ceetm_query_class_scheduler(channel, &csch_query)) {
16378 + pr_err("Cannot get the channel %d scheduler setting.\n",
16379 + channel->idx);
16380 + return -EINVAL;
16381 + }
16382 + csch_config.cqcid = cpu_to_be16(channel->idx);
16383 + csch_config.dcpid = channel->dcp_idx;
16384 + csch_config.gpc_combine_flag = csch_query.gpc_combine_flag;
16385 + csch_config.gpc_prio_a = csch_query.gpc_prio_a;
16386 + csch_config.gpc_prio_b = csch_query.gpc_prio_b;
16387 + for (i = 0; i < 8; i++)
16388 + csch_config.w[i] = csch_query.w[i];
16389 + csch_config.erem = csch_query.erem;
16390 + csch_config.crem = (be16_to_cpu(csch_query.crem)
16391 + & ~CQ_ELIGIBILITY_SET(idx)) |
16392 + (cre ? CQ_ELIGIBILITY_SET(idx) : 0);
16393 + csch_config.crem = cpu_to_be16(csch_config.crem);
16394 + if (qman_ceetm_configure_class_scheduler(&csch_config)) {
16395 + pr_err("Cannot config channel scheduler to set "
16396 + "cr eligibility mask for CQ#%d\n", idx);
16397 + return -EINVAL;
16398 + }
16399 +
16400 + return 0;
16401 +}
16402 +EXPORT_SYMBOL(qman_ceetm_channel_set_cq_cr_eligibility);
16403 +
16404 +int qman_ceetm_channel_set_cq_er_eligibility(struct qm_ceetm_channel *channel,
16405 + unsigned int idx, int ere)
16406 +{
16407 + struct qm_mcc_ceetm_class_scheduler_config csch_config;
16408 + struct qm_mcr_ceetm_class_scheduler_query csch_query;
16409 + int i;
16410 +
16411 + if (idx > 7) {
16412 + pr_err("CQ index is out of range\n");
16413 + return -EINVAL;
16414 + }
16415 + if (qman_ceetm_query_class_scheduler(channel, &csch_query)) {
16416 + pr_err("Cannot get the channel %d scheduler setting.\n",
16417 + channel->idx);
16418 + return -EINVAL;
16419 + }
16420 + csch_config.cqcid = cpu_to_be16(channel->idx);
16421 + csch_config.dcpid = channel->dcp_idx;
16422 + csch_config.gpc_combine_flag = csch_query.gpc_combine_flag;
16423 + csch_config.gpc_prio_a = csch_query.gpc_prio_a;
16424 + csch_config.gpc_prio_b = csch_query.gpc_prio_b;
16425 + for (i = 0; i < 8; i++)
16426 + csch_config.w[i] = csch_query.w[i];
16427 + csch_config.crem = csch_query.crem;
16428 + csch_config.erem = (be16_to_cpu(csch_query.erem)
16429 + & ~CQ_ELIGIBILITY_SET(idx)) |
16430 + (ere ? CQ_ELIGIBILITY_SET(idx) : 0);
16431 + csch_config.erem = cpu_to_be16(csch_config.erem);
16432 + if (qman_ceetm_configure_class_scheduler(&csch_config)) {
16433 + pr_err("Cannot config channel scheduler to set "
16434 + "er eligibility mask for CQ#%d\n", idx);
16435 + return -EINVAL;
16436 + }
16437 + return 0;
16438 +}
16439 +EXPORT_SYMBOL(qman_ceetm_channel_set_cq_er_eligibility);
16440 +
16441 +int qman_ceetm_cq_claim(struct qm_ceetm_cq **cq,
16442 + struct qm_ceetm_channel *channel, unsigned int idx,
16443 + struct qm_ceetm_ccg *ccg)
16444 +{
16445 + struct qm_ceetm_cq *p;
16446 + struct qm_mcc_ceetm_cq_config cq_config;
16447 +
16448 + if (idx > 7) {
16449 + pr_err("The independent class queue id is out of range\n");
16450 + return -EINVAL;
16451 + }
16452 +
16453 + list_for_each_entry(p, &channel->class_queues, node) {
16454 + if (p->idx == idx) {
16455 + pr_err("The CQ#%d has been claimed!\n", idx);
16456 + return -EINVAL;
16457 + }
16458 + }
16459 +
16460 + p = kmalloc(sizeof(*p), GFP_KERNEL);
16461 + if (!p) {
16462 + pr_err("Can't allocate memory for CQ#%d!\n", idx);
16463 + return -ENOMEM;
16464 + }
16465 +
16466 + list_add_tail(&p->node, &channel->class_queues);
16467 + p->idx = idx;
16468 + p->is_claimed = 1;
16469 + p->parent = channel;
16470 + INIT_LIST_HEAD(&p->bound_lfqids);
16471 +
16472 + if (ccg) {
16473 + cq_config.cqid = cpu_to_be16((channel->idx << 4) | idx);
16474 + cq_config.dcpid = channel->dcp_idx;
16475 + cq_config.ccgid = cpu_to_be16(ccg->idx);
16476 + if (qman_ceetm_configure_cq(&cq_config)) {
16477 + pr_err("Can't configure the CQ#%d with CCGRID#%d\n",
16478 + idx, ccg->idx);
16479 + list_del(&p->node);
16480 + kfree(p);
16481 + return -EINVAL;
16482 + }
16483 + }
16484 +
16485 + *cq = p;
16486 + return 0;
16487 +}
16488 +EXPORT_SYMBOL(qman_ceetm_cq_claim);
16489 +
16490 +int qman_ceetm_cq_claim_A(struct qm_ceetm_cq **cq,
16491 + struct qm_ceetm_channel *channel, unsigned int idx,
16492 + struct qm_ceetm_ccg *ccg)
16493 +{
16494 + struct qm_ceetm_cq *p;
16495 + struct qm_mcc_ceetm_cq_config cq_config;
16496 +
16497 + if ((idx < 8) || (idx > 15)) {
16498 + pr_err("This grouped class queue id is out of range\n");
16499 + return -EINVAL;
16500 + }
16501 +
16502 + list_for_each_entry(p, &channel->class_queues, node) {
16503 + if (p->idx == idx) {
16504 + pr_err("The CQ#%d has been claimed!\n", idx);
16505 + return -EINVAL;
16506 + }
16507 + }
16508 +
16509 + p = kmalloc(sizeof(*p), GFP_KERNEL);
16510 + if (!p) {
16511 + pr_err("Can't allocate memory for CQ#%d!\n", idx);
16512 + return -ENOMEM;
16513 + }
16514 +
16515 + list_add_tail(&p->node, &channel->class_queues);
16516 + p->idx = idx;
16517 + p->is_claimed = 1;
16518 + p->parent = channel;
16519 + INIT_LIST_HEAD(&p->bound_lfqids);
16520 +
16521 + if (ccg) {
16522 + cq_config.cqid = cpu_to_be16((channel->idx << 4) | idx);
16523 + cq_config.dcpid = channel->dcp_idx;
16524 + cq_config.ccgid = cpu_to_be16(ccg->idx);
16525 + if (qman_ceetm_configure_cq(&cq_config)) {
16526 + pr_err("Can't configure the CQ#%d with CCGRID#%d\n",
16527 + idx, ccg->idx);
16528 + list_del(&p->node);
16529 + kfree(p);
16530 + return -EINVAL;
16531 + }
16532 + }
16533 + *cq = p;
16534 + return 0;
16535 +}
16536 +EXPORT_SYMBOL(qman_ceetm_cq_claim_A);
16537 +
16538 +int qman_ceetm_cq_claim_B(struct qm_ceetm_cq **cq,
16539 + struct qm_ceetm_channel *channel, unsigned int idx,
16540 + struct qm_ceetm_ccg *ccg)
16541 +{
16542 + struct qm_ceetm_cq *p;
16543 + struct qm_mcc_ceetm_cq_config cq_config;
16544 +
16545 + if ((idx < 12) || (idx > 15)) {
16546 + pr_err("This grouped class queue id is out of range\n");
16547 + return -EINVAL;
16548 + }
16549 +
16550 + list_for_each_entry(p, &channel->class_queues, node) {
16551 + if (p->idx == idx) {
16552 + pr_err("The CQ#%d has been claimed!\n", idx);
16553 + return -EINVAL;
16554 + }
16555 + }
16556 +
16557 + p = kmalloc(sizeof(*p), GFP_KERNEL);
16558 + if (!p) {
16559 + pr_err("Can't allocate memory for CQ#%d!\n", idx);
16560 + return -ENOMEM;
16561 + }
16562 +
16563 + list_add_tail(&p->node, &channel->class_queues);
16564 + p->idx = idx;
16565 + p->is_claimed = 1;
16566 + p->parent = channel;
16567 + INIT_LIST_HEAD(&p->bound_lfqids);
16568 +
16569 + if (ccg) {
16570 + cq_config.cqid = cpu_to_be16((channel->idx << 4) | idx);
16571 + cq_config.dcpid = channel->dcp_idx;
16572 + cq_config.ccgid = cpu_to_be16(ccg->idx);
16573 + if (qman_ceetm_configure_cq(&cq_config)) {
16574 + pr_err("Can't configure the CQ#%d with CCGRID#%d\n",
16575 + idx, ccg->idx);
16576 + list_del(&p->node);
16577 + kfree(p);
16578 + return -EINVAL;
16579 + }
16580 + }
16581 + *cq = p;
16582 + return 0;
16583 +}
16584 +EXPORT_SYMBOL(qman_ceetm_cq_claim_B);
16585 +
16586 +int qman_ceetm_cq_release(struct qm_ceetm_cq *cq)
16587 +{
16588 + if (!list_empty(&cq->bound_lfqids)) {
16589 + pr_err("The CQ#%d has unreleased LFQID\n", cq->idx);
16590 + return -EBUSY;
16591 + }
16592 + list_del(&cq->node);
16593 + qman_ceetm_drain_cq(cq);
16594 + kfree(cq);
16595 + return 0;
16596 +}
16597 +EXPORT_SYMBOL(qman_ceetm_cq_release);
16598 +
16599 +int qman_ceetm_set_queue_weight(struct qm_ceetm_cq *cq,
16600 + struct qm_ceetm_weight_code *weight_code)
16601 +{
16602 + struct qm_mcc_ceetm_class_scheduler_config config_opts;
16603 + struct qm_mcr_ceetm_class_scheduler_query query_result;
16604 + int i;
16605 +
16606 + if (cq->idx < 8) {
16607 + pr_err("Can not set weight for ungrouped class queue\n");
16608 + return -EINVAL;
16609 + }
16610 +
16611 + if (qman_ceetm_query_class_scheduler(cq->parent, &query_result)) {
16612 + pr_err("Can't query channel#%d's scheduler!\n",
16613 + cq->parent->idx);
16614 + return -EINVAL;
16615 + }
16616 +
16617 + config_opts.cqcid = cpu_to_be16(cq->parent->idx);
16618 + config_opts.dcpid = cq->parent->dcp_idx;
16619 + config_opts.crem = query_result.crem;
16620 + config_opts.erem = query_result.erem;
16621 + config_opts.gpc_combine_flag = query_result.gpc_combine_flag;
16622 + config_opts.gpc_prio_a = query_result.gpc_prio_a;
16623 + config_opts.gpc_prio_b = query_result.gpc_prio_b;
16624 +
16625 + for (i = 0; i < 8; i++)
16626 + config_opts.w[i] = query_result.w[i];
16627 + config_opts.w[cq->idx - 8] = ((weight_code->y << 3) |
16628 + (weight_code->x & 0x7));
16629 + return qman_ceetm_configure_class_scheduler(&config_opts);
16630 +}
16631 +EXPORT_SYMBOL(qman_ceetm_set_queue_weight);
16632 +
16633 +int qman_ceetm_get_queue_weight(struct qm_ceetm_cq *cq,
16634 + struct qm_ceetm_weight_code *weight_code)
16635 +{
16636 + struct qm_mcr_ceetm_class_scheduler_query query_result;
16637 +
16638 + if (cq->idx < 8) {
16639 + pr_err("Can not get weight for ungrouped class queue\n");
16640 + return -EINVAL;
16641 + }
16642 +
16643 + if (qman_ceetm_query_class_scheduler(cq->parent,
16644 + &query_result)) {
16645 + pr_err("Can't get the weight code for CQ#%d!\n", cq->idx);
16646 + return -EINVAL;
16647 + }
16648 + weight_code->y = query_result.w[cq->idx - 8] >> 3;
16649 + weight_code->x = query_result.w[cq->idx - 8] & 0x7;
16650 +
16651 + return 0;
16652 +}
16653 +EXPORT_SYMBOL(qman_ceetm_get_queue_weight);
16654 +
16655 +/* The WBFS code is represent as {x,y}, the effect wieght can be calculated as:
16656 + * effective weight = 2^x / (1 - (y/64))
16657 + * = 2^(x+6) / (64 - y)
16658 + */
16659 +static void reduce_fraction(u32 *n, u32 *d)
16660 +{
16661 + u32 factor = 2;
16662 + u32 lesser = (*n < *d) ? *n : *d;
16663 + /* If factor exceeds the square-root of the lesser of *n and *d,
16664 + * then there's no point continuing. Proof: if there was a factor
16665 + * bigger than the square root, that would imply there exists
16666 + * another factor smaller than the square-root with which it
16667 + * multiplies to give 'lesser' - but that's a contradiction
16668 + * because the other factor would have already been found and
16669 + * divided out.
16670 + */
16671 + while ((factor * factor) <= lesser) {
16672 + /* If 'factor' is a factor of *n and *d, divide them both
16673 + * by 'factor' as many times as possible.
16674 + */
16675 + while (!(*n % factor) && !(*d % factor)) {
16676 + *n /= factor;
16677 + *d /= factor;
16678 + lesser /= factor;
16679 + }
16680 + if (factor == 2)
16681 + factor = 3;
16682 + else
16683 + factor += 2;
16684 + }
16685 +}
16686 +
16687 +int qman_ceetm_wbfs2ratio(struct qm_ceetm_weight_code *weight_code,
16688 + u32 *numerator,
16689 + u32 *denominator)
16690 +{
16691 + *numerator = (u32) 1 << (weight_code->x + 6);
16692 + *denominator = 64 - weight_code->y;
16693 + reduce_fraction(numerator, denominator);
16694 + return 0;
16695 +}
16696 +EXPORT_SYMBOL(qman_ceetm_wbfs2ratio);
16697 +
16698 +/* For a given x, the weight is between 2^x (inclusive) and 2^(x+1) (exclusive).
16699 + * So find 'x' by range, and then estimate 'y' using:
16700 + * 64 - y = 2^(x + 6) / weight
16701 + * = 2^(x + 6) / (n/d)
16702 + * = d * 2^(x+6) / n
16703 + * y = 64 - (d * 2^(x+6) / n)
16704 + */
16705 +int qman_ceetm_ratio2wbfs(u32 numerator,
16706 + u32 denominator,
16707 + struct qm_ceetm_weight_code *weight_code,
16708 + int rounding)
16709 +{
16710 + unsigned int y, x = 0;
16711 + /* search incrementing 'x' until:
16712 + * weight < 2^(x+1)
16713 + * n/d < 2^(x+1)
16714 + * n < d * 2^(x+1)
16715 + */
16716 + while ((x < 8) && (numerator >= (denominator << (x + 1))))
16717 + x++;
16718 + if (x >= 8)
16719 + return -ERANGE;
16720 + /* because of the subtraction, use '-rounding' */
16721 + y = 64 - ROUNDING(denominator << (x + 6), numerator, -rounding);
16722 + if (y >= 32)
16723 + return -ERANGE;
16724 + weight_code->x = x;
16725 + weight_code->y = y;
16726 + return 0;
16727 +}
16728 +EXPORT_SYMBOL(qman_ceetm_ratio2wbfs);
16729 +
16730 +int qman_ceetm_set_queue_weight_in_ratio(struct qm_ceetm_cq *cq, u32 ratio)
16731 +{
16732 + struct qm_ceetm_weight_code weight_code;
16733 +
16734 + if (qman_ceetm_ratio2wbfs(ratio, 100, &weight_code, 0)) {
16735 + pr_err("Cannot get wbfs code for cq %x\n", cq->idx);
16736 + return -EINVAL;
16737 + }
16738 + return qman_ceetm_set_queue_weight(cq, &weight_code);
16739 +}
16740 +EXPORT_SYMBOL(qman_ceetm_set_queue_weight_in_ratio);
16741 +
16742 +int qman_ceetm_get_queue_weight_in_ratio(struct qm_ceetm_cq *cq, u32 *ratio)
16743 +{
16744 + struct qm_ceetm_weight_code weight_code;
16745 + u32 n, d;
16746 +
16747 + if (qman_ceetm_get_queue_weight(cq, &weight_code)) {
16748 + pr_err("Cannot query the weight code for cq%x\n", cq->idx);
16749 + return -EINVAL;
16750 + }
16751 +
16752 + if (qman_ceetm_wbfs2ratio(&weight_code, &n, &d)) {
16753 + pr_err("Cannot get the ratio with wbfs code\n");
16754 + return -EINVAL;
16755 + }
16756 +
16757 + *ratio = (n * 100) / d;
16758 + return 0;
16759 +}
16760 +EXPORT_SYMBOL(qman_ceetm_get_queue_weight_in_ratio);
16761 +
16762 +int qman_ceetm_cq_get_dequeue_statistics(struct qm_ceetm_cq *cq, u32 flags,
16763 + u64 *frame_count, u64 *byte_count)
16764 +{
16765 + struct qm_mcr_ceetm_statistics_query result;
16766 + u16 cid, command_type;
16767 + enum qm_dc_portal dcp_idx;
16768 + int ret;
16769 +
16770 + cid = cpu_to_be16((cq->parent->idx << 4) | cq->idx);
16771 + dcp_idx = cq->parent->dcp_idx;
16772 + if (flags == QMAN_CEETM_FLAG_CLEAR_STATISTICS_COUNTER)
16773 + command_type = CEETM_QUERY_DEQUEUE_CLEAR_STATISTICS;
16774 + else
16775 + command_type = CEETM_QUERY_DEQUEUE_STATISTICS;
16776 +
16777 + ret = qman_ceetm_query_statistics(cid, dcp_idx, command_type, &result);
16778 + if (ret) {
16779 + pr_err("Can't query the statistics of CQ#%d!\n", cq->idx);
16780 + return -EINVAL;
16781 + }
16782 +
16783 + *frame_count = be40_to_cpu(result.frm_cnt);
16784 + *byte_count = be48_to_cpu(result.byte_cnt);
16785 + return 0;
16786 +}
16787 +EXPORT_SYMBOL(qman_ceetm_cq_get_dequeue_statistics);
16788 +
16789 +int qman_ceetm_drain_cq(struct qm_ceetm_cq *cq)
16790 +{
16791 + struct qm_mcr_ceetm_cq_peek_pop_xsfdrread ppxr;
16792 + int ret;
16793 +
16794 + do {
16795 + ret = qman_ceetm_cq_peek_pop_xsfdrread(cq, 1, 0, &ppxr);
16796 + if (ret) {
16797 + pr_err("Failed to pop frame from CQ\n");
16798 + return -EINVAL;
16799 + }
16800 + } while (!(ppxr.stat & 0x2));
16801 +
16802 + return 0;
16803 +}
16804 +EXPORT_SYMBOL(qman_ceetm_drain_cq);
16805 +
16806 +#define CEETM_LFQMT_LFQID_MSB 0xF00000
16807 +#define CEETM_LFQMT_LFQID_LSB 0x000FFF
16808 +int qman_ceetm_lfq_claim(struct qm_ceetm_lfq **lfq,
16809 + struct qm_ceetm_cq *cq)
16810 +{
16811 + struct qm_ceetm_lfq *p;
16812 + u32 lfqid;
16813 + int ret = 0;
16814 + struct qm_mcc_ceetm_lfqmt_config lfqmt_config;
16815 +
16816 + if (cq->parent->dcp_idx == qm_dc_portal_fman0) {
16817 + ret = qman_alloc_ceetm0_lfqid(&lfqid);
16818 + } else if (cq->parent->dcp_idx == qm_dc_portal_fman1) {
16819 + ret = qman_alloc_ceetm1_lfqid(&lfqid);
16820 + } else {
16821 + pr_err("dcp_idx %u does not correspond to a known fman in this driver\n",
16822 + cq->parent->dcp_idx);
16823 + return -EINVAL;
16824 + }
16825 +
16826 + if (ret) {
16827 + pr_err("There is no lfqid avalaible for CQ#%d!\n", cq->idx);
16828 + return -ENODEV;
16829 + }
16830 + p = kmalloc(sizeof(*p), GFP_KERNEL);
16831 + if (!p)
16832 + return -ENOMEM;
16833 + p->idx = lfqid;
16834 + p->dctidx = (u16)(lfqid & CEETM_LFQMT_LFQID_LSB);
16835 + p->parent = cq->parent;
16836 + list_add_tail(&p->node, &cq->bound_lfqids);
16837 +
16838 + lfqmt_config.lfqid = cpu_to_be24(CEETM_LFQMT_LFQID_MSB |
16839 + (cq->parent->dcp_idx << 16) |
16840 + (lfqid & CEETM_LFQMT_LFQID_LSB));
16841 + lfqmt_config.cqid = cpu_to_be16((cq->parent->idx << 4) | (cq->idx));
16842 + lfqmt_config.dctidx = cpu_to_be16(p->dctidx);
16843 + if (qman_ceetm_configure_lfqmt(&lfqmt_config)) {
16844 + pr_err("Can't configure LFQMT for LFQID#%d @ CQ#%d\n",
16845 + lfqid, cq->idx);
16846 + list_del(&p->node);
16847 + kfree(p);
16848 + return -EINVAL;
16849 + }
16850 + *lfq = p;
16851 + return 0;
16852 +}
16853 +EXPORT_SYMBOL(qman_ceetm_lfq_claim);
16854 +
16855 +int qman_ceetm_lfq_release(struct qm_ceetm_lfq *lfq)
16856 +{
16857 + if (lfq->parent->dcp_idx == qm_dc_portal_fman0) {
16858 + qman_release_ceetm0_lfqid(lfq->idx);
16859 + } else if (lfq->parent->dcp_idx == qm_dc_portal_fman1) {
16860 + qman_release_ceetm1_lfqid(lfq->idx);
16861 + } else {
16862 + pr_err("dcp_idx %u does not correspond to a known fman in this driver\n",
16863 + lfq->parent->dcp_idx);
16864 + return -EINVAL;
16865 + }
16866 + list_del(&lfq->node);
16867 + kfree(lfq);
16868 + return 0;
16869 +}
16870 +EXPORT_SYMBOL(qman_ceetm_lfq_release);
16871 +
16872 +int qman_ceetm_lfq_set_context(struct qm_ceetm_lfq *lfq, u64 context_a,
16873 + u32 context_b)
16874 +{
16875 + struct qm_mcc_ceetm_dct_config dct_config;
16876 + lfq->context_a = context_a;
16877 + lfq->context_b = context_b;
16878 + dct_config.dctidx = cpu_to_be16((u16)lfq->dctidx);
16879 + dct_config.dcpid = lfq->parent->dcp_idx;
16880 + dct_config.context_b = cpu_to_be32(context_b);
16881 + dct_config.context_a = cpu_to_be64(context_a);
16882 +
16883 + return qman_ceetm_configure_dct(&dct_config);
16884 +}
16885 +EXPORT_SYMBOL(qman_ceetm_lfq_set_context);
16886 +
16887 +int qman_ceetm_lfq_get_context(struct qm_ceetm_lfq *lfq, u64 *context_a,
16888 + u32 *context_b)
16889 +{
16890 + struct qm_mcc_ceetm_dct_query dct_query;
16891 + struct qm_mcr_ceetm_dct_query query_result;
16892 +
16893 + dct_query.dctidx = cpu_to_be16(lfq->dctidx);
16894 + dct_query.dcpid = lfq->parent->dcp_idx;
16895 + if (qman_ceetm_query_dct(&dct_query, &query_result)) {
16896 + pr_err("Can't query LFQID#%d's context!\n", lfq->idx);
16897 + return -EINVAL;
16898 + }
16899 + *context_a = be64_to_cpu(query_result.context_a);
16900 + *context_b = be32_to_cpu(query_result.context_b);
16901 + return 0;
16902 +}
16903 +EXPORT_SYMBOL(qman_ceetm_lfq_get_context);
16904 +
16905 +int qman_ceetm_create_fq(struct qm_ceetm_lfq *lfq, struct qman_fq *fq)
16906 +{
16907 + spin_lock_init(&fq->fqlock);
16908 + fq->fqid = lfq->idx;
16909 + fq->flags = QMAN_FQ_FLAG_NO_MODIFY;
16910 + if (lfq->ern)
16911 + fq->cb.ern = lfq->ern;
16912 +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
16913 + if (unlikely(find_empty_fq_table_entry(&fq->key, fq)))
16914 + return -ENOMEM;
16915 +#endif
16916 + return 0;
16917 +}
16918 +EXPORT_SYMBOL(qman_ceetm_create_fq);
16919 +
16920 +#define MAX_CCG_IDX 0x000F
16921 +int qman_ceetm_ccg_claim(struct qm_ceetm_ccg **ccg,
16922 + struct qm_ceetm_channel *channel,
16923 + unsigned int idx,
16924 + void (*cscn)(struct qm_ceetm_ccg *,
16925 + void *cb_ctx,
16926 + int congested),
16927 + void *cb_ctx)
16928 +{
16929 + struct qm_ceetm_ccg *p;
16930 +
16931 + if (idx > MAX_CCG_IDX) {
16932 + pr_err("The given ccg index is out of range\n");
16933 + return -EINVAL;
16934 + }
16935 +
16936 + list_for_each_entry(p, &channel->ccgs, node) {
16937 + if (p->idx == idx) {
16938 + pr_err("The CCG#%d has been claimed\n", idx);
16939 + return -EINVAL;
16940 + }
16941 + }
16942 +
16943 + p = kmalloc(sizeof(*p), GFP_KERNEL);
16944 + if (!p) {
16945 + pr_err("Can't allocate memory for CCG#%d!\n", idx);
16946 + return -ENOMEM;
16947 + }
16948 +
16949 + list_add_tail(&p->node, &channel->ccgs);
16950 +
16951 + p->idx = idx;
16952 + p->parent = channel;
16953 + p->cb = cscn;
16954 + p->cb_ctx = cb_ctx;
16955 + INIT_LIST_HEAD(&p->cb_node);
16956 +
16957 + *ccg = p;
16958 + return 0;
16959 +}
16960 +EXPORT_SYMBOL(qman_ceetm_ccg_claim);
16961 +
16962 +int qman_ceetm_ccg_release(struct qm_ceetm_ccg *ccg)
16963 +{
16964 + unsigned long irqflags __maybe_unused;
16965 + struct qm_mcc_ceetm_ccgr_config config_opts;
16966 + int ret = 0;
16967 + struct qman_portal *p = get_affine_portal();
16968 +
16969 + memset(&config_opts, 0, sizeof(struct qm_mcc_ceetm_ccgr_config));
16970 + spin_lock_irqsave(&p->ccgr_lock, irqflags);
16971 + if (!list_empty(&ccg->cb_node))
16972 + list_del(&ccg->cb_node);
16973 + config_opts.ccgrid = cpu_to_be16(CEETM_CCGR_CM_CONFIGURE |
16974 + (ccg->parent->idx << 4) | ccg->idx);
16975 + config_opts.dcpid = ccg->parent->dcp_idx;
16976 + config_opts.we_mask = cpu_to_be16(QM_CCGR_WE_CSCN_TUPD);
16977 + config_opts.cm_config.cscn_tupd = cpu_to_be16(PORTAL_IDX(p));
16978 + ret = qman_ceetm_configure_ccgr(&config_opts);
16979 + spin_unlock_irqrestore(&p->ccgr_lock, irqflags);
16980 + put_affine_portal();
16981 +
16982 + list_del(&ccg->node);
16983 + kfree(ccg);
16984 + return ret;
16985 +}
16986 +EXPORT_SYMBOL(qman_ceetm_ccg_release);
16987 +
16988 +int qman_ceetm_ccg_set(struct qm_ceetm_ccg *ccg, u16 we_mask,
16989 + const struct qm_ceetm_ccg_params *params)
16990 +{
16991 + struct qm_mcc_ceetm_ccgr_config config_opts;
16992 + unsigned long irqflags __maybe_unused;
16993 + int ret;
16994 + struct qman_portal *p;
16995 +
16996 + if (((ccg->parent->idx << 4) | ccg->idx) >= (2 * __CGR_NUM))
16997 + return -EINVAL;
16998 +
16999 + p = get_affine_portal();
17000 +
17001 + memset(&config_opts, 0, sizeof(struct qm_mcc_ceetm_ccgr_config));
17002 + spin_lock_irqsave(&p->ccgr_lock, irqflags);
17003 +
17004 + config_opts.ccgrid = cpu_to_be16(CEETM_CCGR_CM_CONFIGURE |
17005 + (ccg->parent->idx << 4) | ccg->idx);
17006 + config_opts.dcpid = ccg->parent->dcp_idx;
17007 + config_opts.we_mask = we_mask;
17008 + if (we_mask & QM_CCGR_WE_CSCN_EN) {
17009 + config_opts.we_mask |= QM_CCGR_WE_CSCN_TUPD;
17010 + config_opts.cm_config.cscn_tupd = cpu_to_be16(
17011 + QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p));
17012 + }
17013 + config_opts.we_mask = cpu_to_be16(config_opts.we_mask);
17014 + config_opts.cm_config.ctl_wr_en_g = params->wr_en_g;
17015 + config_opts.cm_config.ctl_wr_en_y = params->wr_en_y;
17016 + config_opts.cm_config.ctl_wr_en_r = params->wr_en_r;
17017 + config_opts.cm_config.ctl_td_en = params->td_en;
17018 + config_opts.cm_config.ctl_td_mode = params->td_mode;
17019 + config_opts.cm_config.ctl_cscn_en = params->cscn_en;
17020 + config_opts.cm_config.ctl_mode = params->mode;
17021 + config_opts.cm_config.oal = params->oal;
17022 + config_opts.cm_config.cs_thres.hword =
17023 + cpu_to_be16(params->cs_thres_in.hword);
17024 + config_opts.cm_config.cs_thres_x.hword =
17025 + cpu_to_be16(params->cs_thres_out.hword);
17026 + config_opts.cm_config.td_thres.hword =
17027 + cpu_to_be16(params->td_thres.hword);
17028 + config_opts.cm_config.wr_parm_g.word =
17029 + cpu_to_be32(params->wr_parm_g.word);
17030 + config_opts.cm_config.wr_parm_y.word =
17031 + cpu_to_be32(params->wr_parm_y.word);
17032 + config_opts.cm_config.wr_parm_r.word =
17033 + cpu_to_be32(params->wr_parm_r.word);
17034 + ret = qman_ceetm_configure_ccgr(&config_opts);
17035 + if (ret) {
17036 + pr_err("Configure CCGR CM failed!\n");
17037 + goto release_lock;
17038 + }
17039 +
17040 + if (we_mask & QM_CCGR_WE_CSCN_EN)
17041 + if (list_empty(&ccg->cb_node))
17042 + list_add(&ccg->cb_node,
17043 + &p->ccgr_cbs[ccg->parent->dcp_idx]);
17044 +release_lock:
17045 + spin_unlock_irqrestore(&p->ccgr_lock, irqflags);
17046 + put_affine_portal();
17047 + return ret;
17048 +}
17049 +EXPORT_SYMBOL(qman_ceetm_ccg_set);
17050 +
17051 +#define CEETM_CCGR_CTL_MASK 0x01
17052 +int qman_ceetm_ccg_get(struct qm_ceetm_ccg *ccg,
17053 + struct qm_ceetm_ccg_params *params)
17054 +{
17055 + struct qm_mcc_ceetm_ccgr_query query_opts;
17056 + struct qm_mcr_ceetm_ccgr_query query_result;
17057 +
17058 + query_opts.ccgrid = cpu_to_be16(CEETM_CCGR_CM_QUERY |
17059 + (ccg->parent->idx << 4) | ccg->idx);
17060 + query_opts.dcpid = ccg->parent->dcp_idx;
17061 +
17062 + if (qman_ceetm_query_ccgr(&query_opts, &query_result)) {
17063 + pr_err("Can't query CCGR#%d\n", ccg->idx);
17064 + return -EINVAL;
17065 + }
17066 +
17067 + params->wr_parm_r.word = query_result.cm_query.wr_parm_r.word;
17068 + params->wr_parm_y.word = query_result.cm_query.wr_parm_y.word;
17069 + params->wr_parm_g.word = query_result.cm_query.wr_parm_g.word;
17070 + params->td_thres.hword = query_result.cm_query.td_thres.hword;
17071 + params->cs_thres_out.hword = query_result.cm_query.cs_thres_x.hword;
17072 + params->cs_thres_in.hword = query_result.cm_query.cs_thres.hword;
17073 + params->oal = query_result.cm_query.oal;
17074 + params->wr_en_g = query_result.cm_query.ctl_wr_en_g;
17075 + params->wr_en_y = query_result.cm_query.ctl_wr_en_y;
17076 + params->wr_en_r = query_result.cm_query.ctl_wr_en_r;
17077 + params->td_en = query_result.cm_query.ctl_td_en;
17078 + params->td_mode = query_result.cm_query.ctl_td_mode;
17079 + params->cscn_en = query_result.cm_query.ctl_cscn_en;
17080 + params->mode = query_result.cm_query.ctl_mode;
17081 +
17082 + return 0;
17083 +}
17084 +EXPORT_SYMBOL(qman_ceetm_ccg_get);
17085 +
17086 +int qman_ceetm_ccg_get_reject_statistics(struct qm_ceetm_ccg *ccg, u32 flags,
17087 + u64 *frame_count, u64 *byte_count)
17088 +{
17089 + struct qm_mcr_ceetm_statistics_query result;
17090 + u16 cid, command_type;
17091 + enum qm_dc_portal dcp_idx;
17092 + int ret;
17093 +
17094 + cid = cpu_to_be16((ccg->parent->idx << 4) | ccg->idx);
17095 + dcp_idx = ccg->parent->dcp_idx;
17096 + if (flags == QMAN_CEETM_FLAG_CLEAR_STATISTICS_COUNTER)
17097 + command_type = CEETM_QUERY_REJECT_CLEAR_STATISTICS;
17098 + else
17099 + command_type = CEETM_QUERY_REJECT_STATISTICS;
17100 +
17101 + ret = qman_ceetm_query_statistics(cid, dcp_idx, command_type, &result);
17102 + if (ret) {
17103 + pr_err("Can't query the statistics of CCG#%d!\n", ccg->idx);
17104 + return -EINVAL;
17105 + }
17106 +
17107 + *frame_count = be40_to_cpu(result.frm_cnt);
17108 + *byte_count = be48_to_cpu(result.byte_cnt);
17109 + return 0;
17110 +}
17111 +EXPORT_SYMBOL(qman_ceetm_ccg_get_reject_statistics);
17112 +
17113 +int qman_ceetm_cscn_swp_get(struct qm_ceetm_ccg *ccg,
17114 + u16 swp_idx,
17115 + unsigned int *cscn_enabled)
17116 +{
17117 + struct qm_mcc_ceetm_ccgr_query query_opts;
17118 + struct qm_mcr_ceetm_ccgr_query query_result;
17119 + int i;
17120 +
17121 + DPA_ASSERT(swp_idx < 127);
17122 + query_opts.ccgrid = cpu_to_be16(CEETM_CCGR_CM_QUERY |
17123 + (ccg->parent->idx << 4) | ccg->idx);
17124 + query_opts.dcpid = ccg->parent->dcp_idx;
17125 +
17126 + if (qman_ceetm_query_ccgr(&query_opts, &query_result)) {
17127 + pr_err("Can't query CCGR#%d\n", ccg->idx);
17128 + return -EINVAL;
17129 + }
17130 +
17131 + i = swp_idx / 32;
17132 + i = 3 - i;
17133 + *cscn_enabled = query_result.cm_query.cscn_targ_swp[i] >>
17134 + (31 - swp_idx % 32);
17135 +
17136 + return 0;
17137 +}
17138 +EXPORT_SYMBOL(qman_ceetm_cscn_swp_get);
17139 +
17140 +int qman_ceetm_cscn_dcp_set(struct qm_ceetm_ccg *ccg,
17141 + u16 dcp_idx,
17142 + u8 vcgid,
17143 + unsigned int cscn_enabled,
17144 + u16 we_mask,
17145 + const struct qm_ceetm_ccg_params *params)
17146 +{
17147 + struct qm_mcc_ceetm_ccgr_config config_opts;
17148 + int ret;
17149 +
17150 + config_opts.ccgrid = cpu_to_be16(CEETM_CCGR_CM_CONFIGURE |
17151 + (ccg->parent->idx << 4) | ccg->idx);
17152 + config_opts.dcpid = ccg->parent->dcp_idx;
17153 + config_opts.we_mask = cpu_to_be16(we_mask | QM_CCGR_WE_CSCN_TUPD |
17154 + QM_CCGR_WE_CDV);
17155 + config_opts.cm_config.cdv = vcgid;
17156 + config_opts.cm_config.cscn_tupd = cpu_to_be16((cscn_enabled << 15) |
17157 + QM_CGR_TARG_UDP_CTRL_DCP | dcp_idx);
17158 + config_opts.cm_config.ctl_wr_en_g = params->wr_en_g;
17159 + config_opts.cm_config.ctl_wr_en_y = params->wr_en_y;
17160 + config_opts.cm_config.ctl_wr_en_r = params->wr_en_r;
17161 + config_opts.cm_config.ctl_td_en = params->td_en;
17162 + config_opts.cm_config.ctl_td_mode = params->td_mode;
17163 + config_opts.cm_config.ctl_cscn_en = params->cscn_en;
17164 + config_opts.cm_config.ctl_mode = params->mode;
17165 + config_opts.cm_config.cs_thres.hword =
17166 + cpu_to_be16(params->cs_thres_in.hword);
17167 + config_opts.cm_config.cs_thres_x.hword =
17168 + cpu_to_be16(params->cs_thres_out.hword);
17169 + config_opts.cm_config.td_thres.hword =
17170 + cpu_to_be16(params->td_thres.hword);
17171 + config_opts.cm_config.wr_parm_g.word =
17172 + cpu_to_be32(params->wr_parm_g.word);
17173 + config_opts.cm_config.wr_parm_y.word =
17174 + cpu_to_be32(params->wr_parm_y.word);
17175 + config_opts.cm_config.wr_parm_r.word =
17176 + cpu_to_be32(params->wr_parm_r.word);
17177 +
17178 + ret = qman_ceetm_configure_ccgr(&config_opts);
17179 + if (ret) {
17180 + pr_err("Configure CSCN_TARG_DCP failed!\n");
17181 + return -EINVAL;
17182 + }
17183 + return 0;
17184 +}
17185 +EXPORT_SYMBOL(qman_ceetm_cscn_dcp_set);
17186 +
17187 +int qman_ceetm_cscn_dcp_get(struct qm_ceetm_ccg *ccg,
17188 + u16 dcp_idx,
17189 + u8 *vcgid,
17190 + unsigned int *cscn_enabled)
17191 +{
17192 + struct qm_mcc_ceetm_ccgr_query query_opts;
17193 + struct qm_mcr_ceetm_ccgr_query query_result;
17194 +
17195 + query_opts.ccgrid = cpu_to_be16(CEETM_CCGR_CM_QUERY |
17196 + (ccg->parent->idx << 4) | ccg->idx);
17197 + query_opts.dcpid = ccg->parent->dcp_idx;
17198 +
17199 + if (qman_ceetm_query_ccgr(&query_opts, &query_result)) {
17200 + pr_err("Can't query CCGR#%d\n", ccg->idx);
17201 + return -EINVAL;
17202 + }
17203 +
17204 + *vcgid = query_result.cm_query.cdv;
17205 + *cscn_enabled = (query_result.cm_query.cscn_targ_dcp >> dcp_idx) & 0x1;
17206 + return 0;
17207 +}
17208 +EXPORT_SYMBOL(qman_ceetm_cscn_dcp_get);
17209 +
17210 +int qman_ceetm_querycongestion(struct __qm_mcr_querycongestion *ccg_state,
17211 + unsigned int dcp_idx)
17212 +{
17213 + struct qm_mc_command *mcc;
17214 + struct qm_mc_result *mcr;
17215 + struct qman_portal *p;
17216 + unsigned long irqflags __maybe_unused;
17217 + u8 res;
17218 + int i, j;
17219 +
17220 + p = get_affine_portal();
17221 + PORTAL_IRQ_LOCK(p, irqflags);
17222 +
17223 + mcc = qm_mc_start(&p->p);
17224 + for (i = 0; i < 2; i++) {
17225 + mcc->ccgr_query.ccgrid =
17226 + cpu_to_be16(CEETM_QUERY_CONGESTION_STATE | i);
17227 + mcc->ccgr_query.dcpid = dcp_idx;
17228 + qm_mc_commit(&p->p, QM_CEETM_VERB_CCGR_QUERY);
17229 +
17230 + while (!(mcr = qm_mc_result(&p->p)))
17231 + cpu_relax();
17232 + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
17233 + QM_CEETM_VERB_CCGR_QUERY);
17234 + res = mcr->result;
17235 + if (res == QM_MCR_RESULT_OK) {
17236 + for (j = 0; j < 8; j++)
17237 + mcr->ccgr_query.congestion_state.state.
17238 + __state[j] = be32_to_cpu(mcr->ccgr_query.
17239 + congestion_state.state.__state[j]);
17240 + *(ccg_state + i) =
17241 + mcr->ccgr_query.congestion_state.state;
17242 + } else {
17243 + pr_err("QUERY CEETM CONGESTION STATE failed\n");
17244 + PORTAL_IRQ_UNLOCK(p, irqflags);
17245 + return -EIO;
17246 + }
17247 + }
17248 + PORTAL_IRQ_UNLOCK(p, irqflags);
17249 + put_affine_portal();
17250 + return 0;
17251 +}
17252 +
17253 +int qman_set_wpm(int wpm_enable)
17254 +{
17255 + return qm_set_wpm(wpm_enable);
17256 +}
17257 +EXPORT_SYMBOL(qman_set_wpm);
17258 +
17259 +int qman_get_wpm(int *wpm_enable)
17260 +{
17261 + return qm_get_wpm(wpm_enable);
17262 +}
17263 +EXPORT_SYMBOL(qman_get_wpm);
17264 +
17265 +int qman_shutdown_fq(u32 fqid)
17266 +{
17267 + struct qman_portal *p;
17268 + unsigned long irqflags __maybe_unused;
17269 + int ret;
17270 + struct qm_portal *low_p;
17271 + p = get_affine_portal();
17272 + PORTAL_IRQ_LOCK(p, irqflags);
17273 + low_p = &p->p;
17274 + ret = qm_shutdown_fq(&low_p, 1, fqid);
17275 + PORTAL_IRQ_UNLOCK(p, irqflags);
17276 + put_affine_portal();
17277 + return ret;
17278 +}
17279 +
17280 +const struct qm_portal_config *qman_get_qm_portal_config(
17281 + struct qman_portal *portal)
17282 +{
17283 + return portal->sharing_redirect ? NULL : portal->config;
17284 +}
17285 --- /dev/null
17286 +++ b/drivers/staging/fsl_qbman/qman_low.h
17287 @@ -0,0 +1,1427 @@
17288 +/* Copyright 2008-2011 Freescale Semiconductor, Inc.
17289 + *
17290 + * Redistribution and use in source and binary forms, with or without
17291 + * modification, are permitted provided that the following conditions are met:
17292 + * * Redistributions of source code must retain the above copyright
17293 + * notice, this list of conditions and the following disclaimer.
17294 + * * Redistributions in binary form must reproduce the above copyright
17295 + * notice, this list of conditions and the following disclaimer in the
17296 + * documentation and/or other materials provided with the distribution.
17297 + * * Neither the name of Freescale Semiconductor nor the
17298 + * names of its contributors may be used to endorse or promote products
17299 + * derived from this software without specific prior written permission.
17300 + *
17301 + *
17302 + * ALTERNATIVELY, this software may be distributed under the terms of the
17303 + * GNU General Public License ("GPL") as published by the Free Software
17304 + * Foundation, either version 2 of that License or (at your option) any
17305 + * later version.
17306 + *
17307 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
17308 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17309 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17310 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
17311 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
17312 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
17313 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
17314 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
17315 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
17316 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
17317 + */
17318 +
17319 +#include "qman_private.h"
17320 +
17321 +/***************************/
17322 +/* Portal register assists */
17323 +/***************************/
17324 +
17325 +/* Cache-inhibited register offsets */
17326 +#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
17327 +
17328 +#define QM_REG_EQCR_PI_CINH 0x0000
17329 +#define QM_REG_EQCR_CI_CINH 0x0004
17330 +#define QM_REG_EQCR_ITR 0x0008
17331 +#define QM_REG_DQRR_PI_CINH 0x0040
17332 +#define QM_REG_DQRR_CI_CINH 0x0044
17333 +#define QM_REG_DQRR_ITR 0x0048
17334 +#define QM_REG_DQRR_DCAP 0x0050
17335 +#define QM_REG_DQRR_SDQCR 0x0054
17336 +#define QM_REG_DQRR_VDQCR 0x0058
17337 +#define QM_REG_DQRR_PDQCR 0x005c
17338 +#define QM_REG_MR_PI_CINH 0x0080
17339 +#define QM_REG_MR_CI_CINH 0x0084
17340 +#define QM_REG_MR_ITR 0x0088
17341 +#define QM_REG_CFG 0x0100
17342 +#define QM_REG_ISR 0x0e00
17343 +#define QM_REG_IIR 0x0e0c
17344 +#define QM_REG_ITPR 0x0e14
17345 +
17346 +/* Cache-enabled register offsets */
17347 +#define QM_CL_EQCR 0x0000
17348 +#define QM_CL_DQRR 0x1000
17349 +#define QM_CL_MR 0x2000
17350 +#define QM_CL_EQCR_PI_CENA 0x3000
17351 +#define QM_CL_EQCR_CI_CENA 0x3100
17352 +#define QM_CL_DQRR_PI_CENA 0x3200
17353 +#define QM_CL_DQRR_CI_CENA 0x3300
17354 +#define QM_CL_MR_PI_CENA 0x3400
17355 +#define QM_CL_MR_CI_CENA 0x3500
17356 +#define QM_CL_CR 0x3800
17357 +#define QM_CL_RR0 0x3900
17358 +#define QM_CL_RR1 0x3940
17359 +
17360 +#endif
17361 +
17362 +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
17363 +
17364 +#define QM_REG_EQCR_PI_CINH 0x3000
17365 +#define QM_REG_EQCR_CI_CINH 0x3040
17366 +#define QM_REG_EQCR_ITR 0x3080
17367 +#define QM_REG_DQRR_PI_CINH 0x3100
17368 +#define QM_REG_DQRR_CI_CINH 0x3140
17369 +#define QM_REG_DQRR_ITR 0x3180
17370 +#define QM_REG_DQRR_DCAP 0x31C0
17371 +#define QM_REG_DQRR_SDQCR 0x3200
17372 +#define QM_REG_DQRR_VDQCR 0x3240
17373 +#define QM_REG_DQRR_PDQCR 0x3280
17374 +#define QM_REG_MR_PI_CINH 0x3300
17375 +#define QM_REG_MR_CI_CINH 0x3340
17376 +#define QM_REG_MR_ITR 0x3380
17377 +#define QM_REG_CFG 0x3500
17378 +#define QM_REG_ISR 0x3600
17379 +#define QM_REG_IIR 0x36C0
17380 +#define QM_REG_ITPR 0x3740
17381 +
17382 +/* Cache-enabled register offsets */
17383 +#define QM_CL_EQCR 0x0000
17384 +#define QM_CL_DQRR 0x1000
17385 +#define QM_CL_MR 0x2000
17386 +#define QM_CL_EQCR_PI_CENA 0x3000
17387 +#define QM_CL_EQCR_CI_CENA 0x3040
17388 +#define QM_CL_DQRR_PI_CENA 0x3100
17389 +#define QM_CL_DQRR_CI_CENA 0x3140
17390 +#define QM_CL_MR_PI_CENA 0x3300
17391 +#define QM_CL_MR_CI_CENA 0x3340
17392 +#define QM_CL_CR 0x3800
17393 +#define QM_CL_RR0 0x3900
17394 +#define QM_CL_RR1 0x3940
17395 +
17396 +#endif
17397 +
17398 +
17399 +/* BTW, the drivers (and h/w programming model) already obtain the required
17400 + * synchronisation for portal accesses via lwsync(), hwsync(), and
17401 + * data-dependencies. Use of barrier()s or other order-preserving primitives
17402 + * simply degrade performance. Hence the use of the __raw_*() interfaces, which
17403 + * simply ensure that the compiler treats the portal registers as volatile (ie.
17404 + * non-coherent). */
17405 +
17406 +/* Cache-inhibited register access. */
17407 +#define __qm_in(qm, o) be32_to_cpu(__raw_readl((qm)->addr_ci + (o)))
17408 +#define __qm_out(qm, o, val) __raw_writel((cpu_to_be32(val)), \
17409 + (qm)->addr_ci + (o));
17410 +#define qm_in(reg) __qm_in(&portal->addr, QM_REG_##reg)
17411 +#define qm_out(reg, val) __qm_out(&portal->addr, QM_REG_##reg, val)
17412 +
17413 +/* Cache-enabled (index) register access */
17414 +#define __qm_cl_touch_ro(qm, o) dcbt_ro((qm)->addr_ce + (o))
17415 +#define __qm_cl_touch_rw(qm, o) dcbt_rw((qm)->addr_ce + (o))
17416 +#define __qm_cl_in(qm, o) be32_to_cpu(__raw_readl((qm)->addr_ce + (o)))
17417 +#define __qm_cl_out(qm, o, val) \
17418 + do { \
17419 + u32 *__tmpclout = (qm)->addr_ce + (o); \
17420 + __raw_writel(cpu_to_be32(val), __tmpclout); \
17421 + dcbf(__tmpclout); \
17422 + } while (0)
17423 +#define __qm_cl_invalidate(qm, o) dcbi((qm)->addr_ce + (o))
17424 +#define qm_cl_touch_ro(reg) __qm_cl_touch_ro(&portal->addr, QM_CL_##reg##_CENA)
17425 +#define qm_cl_touch_rw(reg) __qm_cl_touch_rw(&portal->addr, QM_CL_##reg##_CENA)
17426 +#define qm_cl_in(reg) __qm_cl_in(&portal->addr, QM_CL_##reg##_CENA)
17427 +#define qm_cl_out(reg, val) __qm_cl_out(&portal->addr, QM_CL_##reg##_CENA, val)
17428 +#define qm_cl_invalidate(reg)\
17429 + __qm_cl_invalidate(&portal->addr, QM_CL_##reg##_CENA)
17430 +
17431 +/* Cache-enabled ring access */
17432 +#define qm_cl(base, idx) ((void *)base + ((idx) << 6))
17433 +
17434 +/* Cyclic helper for rings. FIXME: once we are able to do fine-grain perf
17435 + * analysis, look at using the "extra" bit in the ring index registers to avoid
17436 + * cyclic issues. */
17437 +static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last)
17438 +{
17439 + /* 'first' is included, 'last' is excluded */
17440 + if (first <= last)
17441 + return last - first;
17442 + return ringsize + last - first;
17443 +}
17444 +
17445 +/* Portal modes.
17446 + * Enum types;
17447 + * pmode == production mode
17448 + * cmode == consumption mode,
17449 + * dmode == h/w dequeue mode.
17450 + * Enum values use 3 letter codes. First letter matches the portal mode,
17451 + * remaining two letters indicate;
17452 + * ci == cache-inhibited portal register
17453 + * ce == cache-enabled portal register
17454 + * vb == in-band valid-bit (cache-enabled)
17455 + * dc == DCA (Discrete Consumption Acknowledgement), DQRR-only
17456 + * As for "enum qm_dqrr_dmode", it should be self-explanatory.
17457 + */
17458 +enum qm_eqcr_pmode { /* matches QCSP_CFG::EPM */
17459 + qm_eqcr_pci = 0, /* PI index, cache-inhibited */
17460 + qm_eqcr_pce = 1, /* PI index, cache-enabled */
17461 + qm_eqcr_pvb = 2 /* valid-bit */
17462 +};
17463 +enum qm_dqrr_dmode { /* matches QCSP_CFG::DP */
17464 + qm_dqrr_dpush = 0, /* SDQCR + VDQCR */
17465 + qm_dqrr_dpull = 1 /* PDQCR */
17466 +};
17467 +enum qm_dqrr_pmode { /* s/w-only */
17468 + qm_dqrr_pci, /* reads DQRR_PI_CINH */
17469 + qm_dqrr_pce, /* reads DQRR_PI_CENA */
17470 + qm_dqrr_pvb /* reads valid-bit */
17471 +};
17472 +enum qm_dqrr_cmode { /* matches QCSP_CFG::DCM */
17473 + qm_dqrr_cci = 0, /* CI index, cache-inhibited */
17474 + qm_dqrr_cce = 1, /* CI index, cache-enabled */
17475 + qm_dqrr_cdc = 2 /* Discrete Consumption Acknowledgement */
17476 +};
17477 +enum qm_mr_pmode { /* s/w-only */
17478 + qm_mr_pci, /* reads MR_PI_CINH */
17479 + qm_mr_pce, /* reads MR_PI_CENA */
17480 + qm_mr_pvb /* reads valid-bit */
17481 +};
17482 +enum qm_mr_cmode { /* matches QCSP_CFG::MM */
17483 + qm_mr_cci = 0, /* CI index, cache-inhibited */
17484 + qm_mr_cce = 1 /* CI index, cache-enabled */
17485 +};
17486 +
17487 +
17488 +/* ------------------------- */
17489 +/* --- Portal structures --- */
17490 +
17491 +#define QM_EQCR_SIZE 8
17492 +#define QM_DQRR_SIZE 16
17493 +#define QM_MR_SIZE 8
17494 +
17495 +struct qm_eqcr {
17496 + struct qm_eqcr_entry *ring, *cursor;
17497 + u8 ci, available, ithresh, vbit;
17498 +#ifdef CONFIG_FSL_DPA_CHECKING
17499 + u32 busy;
17500 + enum qm_eqcr_pmode pmode;
17501 +#endif
17502 +};
17503 +
17504 +struct qm_dqrr {
17505 + const struct qm_dqrr_entry *ring, *cursor;
17506 + u8 pi, ci, fill, ithresh, vbit;
17507 +#ifdef CONFIG_FSL_DPA_CHECKING
17508 + enum qm_dqrr_dmode dmode;
17509 + enum qm_dqrr_pmode pmode;
17510 + enum qm_dqrr_cmode cmode;
17511 +#endif
17512 +};
17513 +
17514 +struct qm_mr {
17515 + const struct qm_mr_entry *ring, *cursor;
17516 + u8 pi, ci, fill, ithresh, vbit;
17517 +#ifdef CONFIG_FSL_DPA_CHECKING
17518 + enum qm_mr_pmode pmode;
17519 + enum qm_mr_cmode cmode;
17520 +#endif
17521 +};
17522 +
17523 +struct qm_mc {
17524 + struct qm_mc_command *cr;
17525 + struct qm_mc_result *rr;
17526 + u8 rridx, vbit;
17527 +#ifdef CONFIG_FSL_DPA_CHECKING
17528 + enum {
17529 + /* Can be _mc_start()ed */
17530 + qman_mc_idle,
17531 + /* Can be _mc_commit()ed or _mc_abort()ed */
17532 + qman_mc_user,
17533 + /* Can only be _mc_retry()ed */
17534 + qman_mc_hw
17535 + } state;
17536 +#endif
17537 +};
17538 +
17539 +#define QM_PORTAL_ALIGNMENT ____cacheline_aligned
17540 +
17541 +struct qm_addr {
17542 + void __iomem *addr_ce; /* cache-enabled */
17543 + void __iomem *addr_ci; /* cache-inhibited */
17544 +};
17545 +
17546 +struct qm_portal {
17547 + /* In the non-CONFIG_FSL_DPA_CHECKING case, the following stuff up to
17548 + * and including 'mc' fits within a cacheline (yay!). The 'config' part
17549 + * is setup-only, so isn't a cause for a concern. In other words, don't
17550 + * rearrange this structure on a whim, there be dragons ... */
17551 + struct qm_addr addr;
17552 + struct qm_eqcr eqcr;
17553 + struct qm_dqrr dqrr;
17554 + struct qm_mr mr;
17555 + struct qm_mc mc;
17556 +} QM_PORTAL_ALIGNMENT;
17557 +
17558 +
17559 +/* ---------------- */
17560 +/* --- EQCR API --- */
17561 +
17562 +/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
17563 +#define EQCR_CARRYCLEAR(p) \
17564 + (void *)((unsigned long)(p) & (~(unsigned long)(QM_EQCR_SIZE << 6)))
17565 +
17566 +/* Bit-wise logic to convert a ring pointer to a ring index */
17567 +static inline u8 EQCR_PTR2IDX(struct qm_eqcr_entry *e)
17568 +{
17569 + return ((uintptr_t)e >> 6) & (QM_EQCR_SIZE - 1);
17570 +}
17571 +
17572 +/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
17573 +static inline void EQCR_INC(struct qm_eqcr *eqcr)
17574 +{
17575 + /* NB: this is odd-looking, but experiments show that it generates fast
17576 + * code with essentially no branching overheads. We increment to the
17577 + * next EQCR pointer and handle overflow and 'vbit'. */
17578 + struct qm_eqcr_entry *partial = eqcr->cursor + 1;
17579 + eqcr->cursor = EQCR_CARRYCLEAR(partial);
17580 + if (partial != eqcr->cursor)
17581 + eqcr->vbit ^= QM_EQCR_VERB_VBIT;
17582 +}
17583 +
17584 +static inline int qm_eqcr_init(struct qm_portal *portal,
17585 + enum qm_eqcr_pmode pmode,
17586 + unsigned int eq_stash_thresh,
17587 + int eq_stash_prio)
17588 +{
17589 + /* This use of 'register', as well as all other occurrences, is because
17590 + * it has been observed to generate much faster code with gcc than is
17591 + * otherwise the case. */
17592 + register struct qm_eqcr *eqcr = &portal->eqcr;
17593 + u32 cfg;
17594 + u8 pi;
17595 +
17596 + eqcr->ring = portal->addr.addr_ce + QM_CL_EQCR;
17597 + eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
17598 + qm_cl_invalidate(EQCR_CI);
17599 + pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
17600 + eqcr->cursor = eqcr->ring + pi;
17601 + eqcr->vbit = (qm_in(EQCR_PI_CINH) & QM_EQCR_SIZE) ?
17602 + QM_EQCR_VERB_VBIT : 0;
17603 + eqcr->available = QM_EQCR_SIZE - 1 -
17604 + qm_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
17605 + eqcr->ithresh = qm_in(EQCR_ITR);
17606 +#ifdef CONFIG_FSL_DPA_CHECKING
17607 + eqcr->busy = 0;
17608 + eqcr->pmode = pmode;
17609 +#endif
17610 + cfg = (qm_in(CFG) & 0x00ffffff) |
17611 + (eq_stash_thresh << 28) | /* QCSP_CFG: EST */
17612 + (eq_stash_prio << 26) | /* QCSP_CFG: EP */
17613 + ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */
17614 + qm_out(CFG, cfg);
17615 + return 0;
17616 +}
17617 +
17618 +static inline unsigned int qm_eqcr_get_ci_stashing(struct qm_portal *portal)
17619 +{
17620 + return (qm_in(CFG) >> 28) & 0x7;
17621 +}
17622 +
17623 +static inline void qm_eqcr_finish(struct qm_portal *portal)
17624 +{
17625 + register struct qm_eqcr *eqcr = &portal->eqcr;
17626 + u8 pi, ci;
17627 + u32 cfg;
17628 +
17629 + /*
17630 + * Disable EQCI stashing because the QMan only
17631 + * presents the value it previously stashed to
17632 + * maintain coherency. Setting the stash threshold
17633 + * to 1 then 0 ensures that QMan has resyncronized
17634 + * its internal copy so that the portal is clean
17635 + * when it is reinitialized in the future
17636 + */
17637 + cfg = (qm_in(CFG) & 0x0fffffff) |
17638 + (1 << 28); /* QCSP_CFG: EST */
17639 + qm_out(CFG, cfg);
17640 + cfg &= 0x0fffffff; /* stash threshold = 0 */
17641 + qm_out(CFG, cfg);
17642 +
17643 + pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
17644 + ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
17645 +
17646 + /* Refresh EQCR CI cache value */
17647 + qm_cl_invalidate(EQCR_CI);
17648 + eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
17649 +
17650 + DPA_ASSERT(!eqcr->busy);
17651 + if (pi != EQCR_PTR2IDX(eqcr->cursor))
17652 + pr_crit("losing uncommited EQCR entries\n");
17653 + if (ci != eqcr->ci)
17654 + pr_crit("missing existing EQCR completions\n");
17655 + if (eqcr->ci != EQCR_PTR2IDX(eqcr->cursor))
17656 + pr_crit("EQCR destroyed unquiesced\n");
17657 +}
17658 +
17659 +static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal
17660 + *portal)
17661 +{
17662 + register struct qm_eqcr *eqcr = &portal->eqcr;
17663 + DPA_ASSERT(!eqcr->busy);
17664 + if (!eqcr->available)
17665 + return NULL;
17666 +
17667 +
17668 +#ifdef CONFIG_FSL_DPA_CHECKING
17669 + eqcr->busy = 1;
17670 +#endif
17671 +#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
17672 + dcbz_64(eqcr->cursor);
17673 +#endif
17674 + return eqcr->cursor;
17675 +}
17676 +
17677 +static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
17678 + *portal)
17679 +{
17680 + register struct qm_eqcr *eqcr = &portal->eqcr;
17681 + u8 diff, old_ci;
17682 +
17683 + DPA_ASSERT(!eqcr->busy);
17684 + if (!eqcr->available) {
17685 + old_ci = eqcr->ci;
17686 + eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
17687 + diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
17688 + eqcr->available += diff;
17689 + if (!diff)
17690 + return NULL;
17691 + }
17692 +#ifdef CONFIG_FSL_DPA_CHECKING
17693 + eqcr->busy = 1;
17694 +#endif
17695 +#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
17696 + dcbz_64(eqcr->cursor);
17697 +#endif
17698 + return eqcr->cursor;
17699 +}
17700 +
17701 +static inline void qm_eqcr_abort(struct qm_portal *portal)
17702 +{
17703 + __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
17704 + DPA_ASSERT(eqcr->busy);
17705 +#ifdef CONFIG_FSL_DPA_CHECKING
17706 + eqcr->busy = 0;
17707 +#endif
17708 +}
17709 +
17710 +static inline struct qm_eqcr_entry *qm_eqcr_pend_and_next(
17711 + struct qm_portal *portal, u8 myverb)
17712 +{
17713 + register struct qm_eqcr *eqcr = &portal->eqcr;
17714 + DPA_ASSERT(eqcr->busy);
17715 + DPA_ASSERT(eqcr->pmode != qm_eqcr_pvb);
17716 + if (eqcr->available == 1)
17717 + return NULL;
17718 + eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit;
17719 + dcbf(eqcr->cursor);
17720 + EQCR_INC(eqcr);
17721 + eqcr->available--;
17722 +#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
17723 + dcbz_64(eqcr->cursor);
17724 +#endif
17725 + return eqcr->cursor;
17726 +}
17727 +
17728 +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
17729 +#define EQCR_COMMIT_CHECKS(eqcr) \
17730 +do { \
17731 + DPA_ASSERT(eqcr->busy); \
17732 + DPA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & 0xffffff00)); \
17733 + DPA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0xffffff00)); \
17734 +} while (0)
17735 +#else
17736 +#define EQCR_COMMIT_CHECKS(eqcr) \
17737 +do { \
17738 + DPA_ASSERT(eqcr->busy); \
17739 + DPA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & \
17740 + cpu_to_be32(0x00ffffff))); \
17741 + DPA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & \
17742 + cpu_to_be32(0x00ffffff))); \
17743 +} while (0)
17744 +#endif
17745 +
17746 +static inline void qm_eqcr_pci_commit(struct qm_portal *portal, u8 myverb)
17747 +{
17748 + register struct qm_eqcr *eqcr = &portal->eqcr;
17749 + EQCR_COMMIT_CHECKS(eqcr);
17750 + DPA_ASSERT(eqcr->pmode == qm_eqcr_pci);
17751 + eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit;
17752 + EQCR_INC(eqcr);
17753 + eqcr->available--;
17754 + dcbf(eqcr->cursor);
17755 + hwsync();
17756 + qm_out(EQCR_PI_CINH, EQCR_PTR2IDX(eqcr->cursor));
17757 +#ifdef CONFIG_FSL_DPA_CHECKING
17758 + eqcr->busy = 0;
17759 +#endif
17760 +}
17761 +
17762 +static inline void qm_eqcr_pce_prefetch(struct qm_portal *portal)
17763 +{
17764 + __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
17765 + DPA_ASSERT(eqcr->pmode == qm_eqcr_pce);
17766 + qm_cl_invalidate(EQCR_PI);
17767 + qm_cl_touch_rw(EQCR_PI);
17768 +}
17769 +
17770 +static inline void qm_eqcr_pce_commit(struct qm_portal *portal, u8 myverb)
17771 +{
17772 + register struct qm_eqcr *eqcr = &portal->eqcr;
17773 + EQCR_COMMIT_CHECKS(eqcr);
17774 + DPA_ASSERT(eqcr->pmode == qm_eqcr_pce);
17775 + eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit;
17776 + EQCR_INC(eqcr);
17777 + eqcr->available--;
17778 + dcbf(eqcr->cursor);
17779 + lwsync();
17780 + qm_cl_out(EQCR_PI, EQCR_PTR2IDX(eqcr->cursor));
17781 +#ifdef CONFIG_FSL_DPA_CHECKING
17782 + eqcr->busy = 0;
17783 +#endif
17784 +}
17785 +
17786 +static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb)
17787 +{
17788 + register struct qm_eqcr *eqcr = &portal->eqcr;
17789 + struct qm_eqcr_entry *eqcursor;
17790 + EQCR_COMMIT_CHECKS(eqcr);
17791 + DPA_ASSERT(eqcr->pmode == qm_eqcr_pvb);
17792 + lwsync();
17793 + eqcursor = eqcr->cursor;
17794 + eqcursor->__dont_write_directly__verb = myverb | eqcr->vbit;
17795 + dcbf(eqcursor);
17796 + EQCR_INC(eqcr);
17797 + eqcr->available--;
17798 +#ifdef CONFIG_FSL_DPA_CHECKING
17799 + eqcr->busy = 0;
17800 +#endif
17801 +}
17802 +
17803 +static inline u8 qm_eqcr_cci_update(struct qm_portal *portal)
17804 +{
17805 + register struct qm_eqcr *eqcr = &portal->eqcr;
17806 + u8 diff, old_ci = eqcr->ci;
17807 + eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
17808 + diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
17809 + eqcr->available += diff;
17810 + return diff;
17811 +}
17812 +
17813 +static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal)
17814 +{
17815 + __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
17816 + qm_cl_touch_ro(EQCR_CI);
17817 +}
17818 +
17819 +static inline u8 qm_eqcr_cce_update(struct qm_portal *portal)
17820 +{
17821 + register struct qm_eqcr *eqcr = &portal->eqcr;
17822 + u8 diff, old_ci = eqcr->ci;
17823 + eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
17824 + qm_cl_invalidate(EQCR_CI);
17825 + diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
17826 + eqcr->available += diff;
17827 + return diff;
17828 +}
17829 +
17830 +static inline u8 qm_eqcr_get_ithresh(struct qm_portal *portal)
17831 +{
17832 + register struct qm_eqcr *eqcr = &portal->eqcr;
17833 + return eqcr->ithresh;
17834 +}
17835 +
17836 +static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh)
17837 +{
17838 + register struct qm_eqcr *eqcr = &portal->eqcr;
17839 + eqcr->ithresh = ithresh;
17840 + qm_out(EQCR_ITR, ithresh);
17841 +}
17842 +
17843 +static inline u8 qm_eqcr_get_avail(struct qm_portal *portal)
17844 +{
17845 + register struct qm_eqcr *eqcr = &portal->eqcr;
17846 + return eqcr->available;
17847 +}
17848 +
17849 +static inline u8 qm_eqcr_get_fill(struct qm_portal *portal)
17850 +{
17851 + register struct qm_eqcr *eqcr = &portal->eqcr;
17852 + return QM_EQCR_SIZE - 1 - eqcr->available;
17853 +}
17854 +
17855 +
17856 +/* ---------------- */
17857 +/* --- DQRR API --- */
17858 +
17859 +/* FIXME: many possible improvements;
17860 + * - look at changing the API to use pointer rather than index parameters now
17861 + * that 'cursor' is a pointer,
17862 + * - consider moving other parameters to pointer if it could help (ci)
17863 + */
17864 +
17865 +#define DQRR_CARRYCLEAR(p) \
17866 + (void *)((unsigned long)(p) & (~(unsigned long)(QM_DQRR_SIZE << 6)))
17867 +
17868 +static inline u8 DQRR_PTR2IDX(const struct qm_dqrr_entry *e)
17869 +{
17870 + return ((uintptr_t)e >> 6) & (QM_DQRR_SIZE - 1);
17871 +}
17872 +
17873 +static inline const struct qm_dqrr_entry *DQRR_INC(
17874 + const struct qm_dqrr_entry *e)
17875 +{
17876 + return DQRR_CARRYCLEAR(e + 1);
17877 +}
17878 +
17879 +static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf)
17880 +{
17881 + qm_out(CFG, (qm_in(CFG) & 0xff0fffff) |
17882 + ((mf & (QM_DQRR_SIZE - 1)) << 20));
17883 +}
17884 +
17885 +static inline void qm_dqrr_cci_consume(struct qm_portal *portal, u8 num)
17886 +{
17887 + register struct qm_dqrr *dqrr = &portal->dqrr;
17888 + DPA_ASSERT(dqrr->cmode == qm_dqrr_cci);
17889 + dqrr->ci = (dqrr->ci + num) & (QM_DQRR_SIZE - 1);
17890 + qm_out(DQRR_CI_CINH, dqrr->ci);
17891 +}
17892 +
17893 +static inline void qm_dqrr_cce_consume(struct qm_portal *portal, u8 num)
17894 +{
17895 + register struct qm_dqrr *dqrr = &portal->dqrr;
17896 + DPA_ASSERT(dqrr->cmode == qm_dqrr_cce);
17897 + dqrr->ci = (dqrr->ci + num) & (QM_DQRR_SIZE - 1);
17898 + qm_cl_out(DQRR_CI, dqrr->ci);
17899 +}
17900 +
17901 +static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u16 bitmask)
17902 +{
17903 + __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
17904 + DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
17905 + qm_out(DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */
17906 + ((u32)bitmask << 16)); /* DQRR_DCAP::DCAP_CI */
17907 + dqrr->ci = qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
17908 + dqrr->fill = qm_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
17909 +}
17910 +
17911 +static inline int qm_dqrr_init(struct qm_portal *portal,
17912 + const struct qm_portal_config *config,
17913 + enum qm_dqrr_dmode dmode,
17914 + __maybe_unused enum qm_dqrr_pmode pmode,
17915 + enum qm_dqrr_cmode cmode, u8 max_fill)
17916 +{
17917 + register struct qm_dqrr *dqrr = &portal->dqrr;
17918 + u32 cfg;
17919 +
17920 + /* Make sure the DQRR will be idle when we enable */
17921 + qm_out(DQRR_SDQCR, 0);
17922 + qm_out(DQRR_VDQCR, 0);
17923 + qm_out(DQRR_PDQCR, 0);
17924 + dqrr->ring = portal->addr.addr_ce + QM_CL_DQRR;
17925 + dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
17926 + dqrr->ci = qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
17927 + dqrr->cursor = dqrr->ring + dqrr->ci;
17928 + dqrr->fill = qm_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
17929 + dqrr->vbit = (qm_in(DQRR_PI_CINH) & QM_DQRR_SIZE) ?
17930 + QM_DQRR_VERB_VBIT : 0;
17931 + dqrr->ithresh = qm_in(DQRR_ITR);
17932 +
17933 + /* Free up pending DQRR entries if any as per current DCM */
17934 + if (dqrr->fill) {
17935 + enum qm_dqrr_cmode dcm = (qm_in(CFG) >> 16) & 3;
17936 +
17937 +#ifdef CONFIG_FSL_DPA_CHECKING
17938 + dqrr->cmode = dcm;
17939 +#endif
17940 + switch (dcm) {
17941 + case qm_dqrr_cci:
17942 + qm_dqrr_cci_consume(portal, dqrr->fill);
17943 + break;
17944 + case qm_dqrr_cce:
17945 + qm_dqrr_cce_consume(portal, dqrr->fill);
17946 + break;
17947 + case qm_dqrr_cdc:
17948 + qm_dqrr_cdc_consume_n(portal, (QM_DQRR_SIZE - 1));
17949 + break;
17950 + default:
17951 + DPA_ASSERT(0);
17952 + }
17953 + }
17954 +
17955 +#ifdef CONFIG_FSL_DPA_CHECKING
17956 + dqrr->dmode = dmode;
17957 + dqrr->pmode = pmode;
17958 + dqrr->cmode = cmode;
17959 +#endif
17960 + /* Invalidate every ring entry before beginning */
17961 + for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
17962 + dcbi(qm_cl(dqrr->ring, cfg));
17963 + cfg = (qm_in(CFG) & 0xff000f00) |
17964 + ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
17965 + ((dmode & 1) << 18) | /* DP */
17966 + ((cmode & 3) << 16) | /* DCM */
17967 + 0xa0 | /* RE+SE */
17968 + (0 ? 0x40 : 0) | /* Ignore RP */
17969 + (0 ? 0x10 : 0); /* Ignore SP */
17970 + qm_out(CFG, cfg);
17971 + qm_dqrr_set_maxfill(portal, max_fill);
17972 + return 0;
17973 +}
17974 +
17975 +static inline void qm_dqrr_finish(struct qm_portal *portal)
17976 +{
17977 + __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
17978 +#ifdef CONFIG_FSL_DPA_CHECKING
17979 + if ((dqrr->cmode != qm_dqrr_cdc) &&
17980 + (dqrr->ci != DQRR_PTR2IDX(dqrr->cursor)))
17981 + pr_crit("Ignoring completed DQRR entries\n");
17982 +#endif
17983 +}
17984 +
17985 +static inline const struct qm_dqrr_entry *qm_dqrr_current(
17986 + struct qm_portal *portal)
17987 +{
17988 + register struct qm_dqrr *dqrr = &portal->dqrr;
17989 + if (!dqrr->fill)
17990 + return NULL;
17991 + return dqrr->cursor;
17992 +}
17993 +
17994 +static inline u8 qm_dqrr_cursor(struct qm_portal *portal)
17995 +{
17996 + register struct qm_dqrr *dqrr = &portal->dqrr;
17997 + return DQRR_PTR2IDX(dqrr->cursor);
17998 +}
17999 +
18000 +static inline u8 qm_dqrr_next(struct qm_portal *portal)
18001 +{
18002 + register struct qm_dqrr *dqrr = &portal->dqrr;
18003 + DPA_ASSERT(dqrr->fill);
18004 + dqrr->cursor = DQRR_INC(dqrr->cursor);
18005 + return --dqrr->fill;
18006 +}
18007 +
18008 +static inline u8 qm_dqrr_pci_update(struct qm_portal *portal)
18009 +{
18010 + register struct qm_dqrr *dqrr = &portal->dqrr;
18011 + u8 diff, old_pi = dqrr->pi;
18012 + DPA_ASSERT(dqrr->pmode == qm_dqrr_pci);
18013 + dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
18014 + diff = qm_cyc_diff(QM_DQRR_SIZE, old_pi, dqrr->pi);
18015 + dqrr->fill += diff;
18016 + return diff;
18017 +}
18018 +
18019 +static inline void qm_dqrr_pce_prefetch(struct qm_portal *portal)
18020 +{
18021 + __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
18022 + DPA_ASSERT(dqrr->pmode == qm_dqrr_pce);
18023 + qm_cl_invalidate(DQRR_PI);
18024 + qm_cl_touch_ro(DQRR_PI);
18025 +}
18026 +
18027 +static inline u8 qm_dqrr_pce_update(struct qm_portal *portal)
18028 +{
18029 + register struct qm_dqrr *dqrr = &portal->dqrr;
18030 + u8 diff, old_pi = dqrr->pi;
18031 + DPA_ASSERT(dqrr->pmode == qm_dqrr_pce);
18032 + dqrr->pi = qm_cl_in(DQRR_PI) & (QM_DQRR_SIZE - 1);
18033 + diff = qm_cyc_diff(QM_DQRR_SIZE, old_pi, dqrr->pi);
18034 + dqrr->fill += diff;
18035 + return diff;
18036 +}
18037 +
18038 +static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
18039 +{
18040 + register struct qm_dqrr *dqrr = &portal->dqrr;
18041 + const struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi);
18042 + DPA_ASSERT(dqrr->pmode == qm_dqrr_pvb);
18043 +#if (defined CONFIG_PPC || defined CONFIG_PPC64) && !defined CONFIG_FSL_PAMU
18044 + /*
18045 + * On PowerPC platforms if PAMU is not available we need to
18046 + * manually invalidate the cache. When PAMU is available the
18047 + * cache is updated by stashing operations generated by QMan
18048 + */
18049 + dcbi(res);
18050 + dcbt_ro(res);
18051 +#endif
18052 +
18053 + /* when accessing 'verb', use __raw_readb() to ensure that compiler
18054 + * inlining doesn't try to optimise out "excess reads". */
18055 + if ((__raw_readb(&res->verb) & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
18056 + dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1);
18057 + if (!dqrr->pi)
18058 + dqrr->vbit ^= QM_DQRR_VERB_VBIT;
18059 + dqrr->fill++;
18060 + }
18061 +}
18062 +
18063 +
18064 +static inline void qm_dqrr_cci_consume_to_current(struct qm_portal *portal)
18065 +{
18066 + register struct qm_dqrr *dqrr = &portal->dqrr;
18067 + DPA_ASSERT(dqrr->cmode == qm_dqrr_cci);
18068 + dqrr->ci = DQRR_PTR2IDX(dqrr->cursor);
18069 + qm_out(DQRR_CI_CINH, dqrr->ci);
18070 +}
18071 +
18072 +static inline void qm_dqrr_cce_prefetch(struct qm_portal *portal)
18073 +{
18074 + __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
18075 + DPA_ASSERT(dqrr->cmode == qm_dqrr_cce);
18076 + qm_cl_invalidate(DQRR_CI);
18077 + qm_cl_touch_rw(DQRR_CI);
18078 +}
18079 +
18080 +static inline void qm_dqrr_cce_consume_to_current(struct qm_portal *portal)
18081 +{
18082 + register struct qm_dqrr *dqrr = &portal->dqrr;
18083 + DPA_ASSERT(dqrr->cmode == qm_dqrr_cce);
18084 + dqrr->ci = DQRR_PTR2IDX(dqrr->cursor);
18085 + qm_cl_out(DQRR_CI, dqrr->ci);
18086 +}
18087 +
18088 +static inline void qm_dqrr_cdc_consume_1(struct qm_portal *portal, u8 idx,
18089 + int park)
18090 +{
18091 + __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
18092 + DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
18093 + DPA_ASSERT(idx < QM_DQRR_SIZE);
18094 + qm_out(DQRR_DCAP, (0 << 8) | /* S */
18095 + ((park ? 1 : 0) << 6) | /* PK */
18096 + idx); /* DCAP_CI */
18097 +}
18098 +
18099 +static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal,
18100 + const struct qm_dqrr_entry *dq,
18101 + int park)
18102 +{
18103 + __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
18104 + u8 idx = DQRR_PTR2IDX(dq);
18105 + DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
18106 + DPA_ASSERT((dqrr->ring + idx) == dq);
18107 + DPA_ASSERT(idx < QM_DQRR_SIZE);
18108 + qm_out(DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */
18109 + ((park ? 1 : 0) << 6) | /* DQRR_DCAP::PK */
18110 + idx); /* DQRR_DCAP::DCAP_CI */
18111 +}
18112 +
18113 +static inline u8 qm_dqrr_cdc_cci(struct qm_portal *portal)
18114 +{
18115 + __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
18116 + DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
18117 + return qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
18118 +}
18119 +
18120 +static inline void qm_dqrr_cdc_cce_prefetch(struct qm_portal *portal)
18121 +{
18122 + __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
18123 + DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
18124 + qm_cl_invalidate(DQRR_CI);
18125 + qm_cl_touch_ro(DQRR_CI);
18126 +}
18127 +
18128 +static inline u8 qm_dqrr_cdc_cce(struct qm_portal *portal)
18129 +{
18130 + __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
18131 + DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
18132 + return qm_cl_in(DQRR_CI) & (QM_DQRR_SIZE - 1);
18133 +}
18134 +
18135 +static inline u8 qm_dqrr_get_ci(struct qm_portal *portal)
18136 +{
18137 + register struct qm_dqrr *dqrr = &portal->dqrr;
18138 + DPA_ASSERT(dqrr->cmode != qm_dqrr_cdc);
18139 + return dqrr->ci;
18140 +}
18141 +
18142 +static inline void qm_dqrr_park(struct qm_portal *portal, u8 idx)
18143 +{
18144 + __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
18145 + DPA_ASSERT(dqrr->cmode != qm_dqrr_cdc);
18146 + qm_out(DQRR_DCAP, (0 << 8) | /* S */
18147 + (1 << 6) | /* PK */
18148 + (idx & (QM_DQRR_SIZE - 1))); /* DCAP_CI */
18149 +}
18150 +
18151 +static inline void qm_dqrr_park_current(struct qm_portal *portal)
18152 +{
18153 + register struct qm_dqrr *dqrr = &portal->dqrr;
18154 + DPA_ASSERT(dqrr->cmode != qm_dqrr_cdc);
18155 + qm_out(DQRR_DCAP, (0 << 8) | /* S */
18156 + (1 << 6) | /* PK */
18157 + DQRR_PTR2IDX(dqrr->cursor)); /* DCAP_CI */
18158 +}
18159 +
18160 +static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr)
18161 +{
18162 + qm_out(DQRR_SDQCR, sdqcr);
18163 +}
18164 +
18165 +static inline u32 qm_dqrr_sdqcr_get(struct qm_portal *portal)
18166 +{
18167 + return qm_in(DQRR_SDQCR);
18168 +}
18169 +
18170 +static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr)
18171 +{
18172 + qm_out(DQRR_VDQCR, vdqcr);
18173 +}
18174 +
18175 +static inline u32 qm_dqrr_vdqcr_get(struct qm_portal *portal)
18176 +{
18177 + return qm_in(DQRR_VDQCR);
18178 +}
18179 +
18180 +static inline void qm_dqrr_pdqcr_set(struct qm_portal *portal, u32 pdqcr)
18181 +{
18182 + qm_out(DQRR_PDQCR, pdqcr);
18183 +}
18184 +
18185 +static inline u32 qm_dqrr_pdqcr_get(struct qm_portal *portal)
18186 +{
18187 + return qm_in(DQRR_PDQCR);
18188 +}
18189 +
18190 +static inline u8 qm_dqrr_get_ithresh(struct qm_portal *portal)
18191 +{
18192 + register struct qm_dqrr *dqrr = &portal->dqrr;
18193 + return dqrr->ithresh;
18194 +}
18195 +
18196 +static inline void qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh)
18197 +{
18198 + qm_out(DQRR_ITR, ithresh);
18199 +}
18200 +
18201 +static inline u8 qm_dqrr_get_maxfill(struct qm_portal *portal)
18202 +{
18203 + return (qm_in(CFG) & 0x00f00000) >> 20;
18204 +}
18205 +
18206 +
18207 +/* -------------- */
18208 +/* --- MR API --- */
18209 +
18210 +#define MR_CARRYCLEAR(p) \
18211 + (void *)((unsigned long)(p) & (~(unsigned long)(QM_MR_SIZE << 6)))
18212 +
18213 +static inline u8 MR_PTR2IDX(const struct qm_mr_entry *e)
18214 +{
18215 + return ((uintptr_t)e >> 6) & (QM_MR_SIZE - 1);
18216 +}
18217 +
18218 +static inline const struct qm_mr_entry *MR_INC(const struct qm_mr_entry *e)
18219 +{
18220 + return MR_CARRYCLEAR(e + 1);
18221 +}
18222 +
18223 +static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode,
18224 + enum qm_mr_cmode cmode)
18225 +{
18226 + register struct qm_mr *mr = &portal->mr;
18227 + u32 cfg;
18228 +
18229 + mr->ring = portal->addr.addr_ce + QM_CL_MR;
18230 + mr->pi = qm_in(MR_PI_CINH) & (QM_MR_SIZE - 1);
18231 + mr->ci = qm_in(MR_CI_CINH) & (QM_MR_SIZE - 1);
18232 + mr->cursor = mr->ring + mr->ci;
18233 + mr->fill = qm_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
18234 + mr->vbit = (qm_in(MR_PI_CINH) & QM_MR_SIZE) ? QM_MR_VERB_VBIT : 0;
18235 + mr->ithresh = qm_in(MR_ITR);
18236 +#ifdef CONFIG_FSL_DPA_CHECKING
18237 + mr->pmode = pmode;
18238 + mr->cmode = cmode;
18239 +#endif
18240 + cfg = (qm_in(CFG) & 0xfffff0ff) |
18241 + ((cmode & 1) << 8); /* QCSP_CFG:MM */
18242 + qm_out(CFG, cfg);
18243 + return 0;
18244 +}
18245 +
18246 +static inline void qm_mr_finish(struct qm_portal *portal)
18247 +{
18248 + register struct qm_mr *mr = &portal->mr;
18249 + if (mr->ci != MR_PTR2IDX(mr->cursor))
18250 + pr_crit("Ignoring completed MR entries\n");
18251 +}
18252 +
18253 +static inline const struct qm_mr_entry *qm_mr_current(struct qm_portal *portal)
18254 +{
18255 + register struct qm_mr *mr = &portal->mr;
18256 + if (!mr->fill)
18257 + return NULL;
18258 + return mr->cursor;
18259 +}
18260 +
18261 +static inline u8 qm_mr_cursor(struct qm_portal *portal)
18262 +{
18263 + register struct qm_mr *mr = &portal->mr;
18264 + return MR_PTR2IDX(mr->cursor);
18265 +}
18266 +
18267 +static inline u8 qm_mr_next(struct qm_portal *portal)
18268 +{
18269 + register struct qm_mr *mr = &portal->mr;
18270 + DPA_ASSERT(mr->fill);
18271 + mr->cursor = MR_INC(mr->cursor);
18272 + return --mr->fill;
18273 +}
18274 +
18275 +static inline u8 qm_mr_pci_update(struct qm_portal *portal)
18276 +{
18277 + register struct qm_mr *mr = &portal->mr;
18278 + u8 diff, old_pi = mr->pi;
18279 + DPA_ASSERT(mr->pmode == qm_mr_pci);
18280 + mr->pi = qm_in(MR_PI_CINH);
18281 + diff = qm_cyc_diff(QM_MR_SIZE, old_pi, mr->pi);
18282 + mr->fill += diff;
18283 + return diff;
18284 +}
18285 +
18286 +static inline void qm_mr_pce_prefetch(struct qm_portal *portal)
18287 +{
18288 + __maybe_unused register struct qm_mr *mr = &portal->mr;
18289 + DPA_ASSERT(mr->pmode == qm_mr_pce);
18290 + qm_cl_invalidate(MR_PI);
18291 + qm_cl_touch_ro(MR_PI);
18292 +}
18293 +
18294 +static inline u8 qm_mr_pce_update(struct qm_portal *portal)
18295 +{
18296 + register struct qm_mr *mr = &portal->mr;
18297 + u8 diff, old_pi = mr->pi;
18298 + DPA_ASSERT(mr->pmode == qm_mr_pce);
18299 + mr->pi = qm_cl_in(MR_PI) & (QM_MR_SIZE - 1);
18300 + diff = qm_cyc_diff(QM_MR_SIZE, old_pi, mr->pi);
18301 + mr->fill += diff;
18302 + return diff;
18303 +}
18304 +
18305 +static inline void qm_mr_pvb_update(struct qm_portal *portal)
18306 +{
18307 + register struct qm_mr *mr = &portal->mr;
18308 + const struct qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
18309 + DPA_ASSERT(mr->pmode == qm_mr_pvb);
18310 + /* when accessing 'verb', use __raw_readb() to ensure that compiler
18311 + * inlining doesn't try to optimise out "excess reads". */
18312 + if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) {
18313 + mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
18314 + if (!mr->pi)
18315 + mr->vbit ^= QM_MR_VERB_VBIT;
18316 + mr->fill++;
18317 + res = MR_INC(res);
18318 + }
18319 + dcbit_ro(res);
18320 +}
18321 +
18322 +static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num)
18323 +{
18324 + register struct qm_mr *mr = &portal->mr;
18325 + DPA_ASSERT(mr->cmode == qm_mr_cci);
18326 + mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
18327 + qm_out(MR_CI_CINH, mr->ci);
18328 +}
18329 +
18330 +static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal)
18331 +{
18332 + register struct qm_mr *mr = &portal->mr;
18333 + DPA_ASSERT(mr->cmode == qm_mr_cci);
18334 + mr->ci = MR_PTR2IDX(mr->cursor);
18335 + qm_out(MR_CI_CINH, mr->ci);
18336 +}
18337 +
18338 +static inline void qm_mr_cce_prefetch(struct qm_portal *portal)
18339 +{
18340 + __maybe_unused register struct qm_mr *mr = &portal->mr;
18341 + DPA_ASSERT(mr->cmode == qm_mr_cce);
18342 + qm_cl_invalidate(MR_CI);
18343 + qm_cl_touch_rw(MR_CI);
18344 +}
18345 +
18346 +static inline void qm_mr_cce_consume(struct qm_portal *portal, u8 num)
18347 +{
18348 + register struct qm_mr *mr = &portal->mr;
18349 + DPA_ASSERT(mr->cmode == qm_mr_cce);
18350 + mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
18351 + qm_cl_out(MR_CI, mr->ci);
18352 +}
18353 +
18354 +static inline void qm_mr_cce_consume_to_current(struct qm_portal *portal)
18355 +{
18356 + register struct qm_mr *mr = &portal->mr;
18357 + DPA_ASSERT(mr->cmode == qm_mr_cce);
18358 + mr->ci = MR_PTR2IDX(mr->cursor);
18359 + qm_cl_out(MR_CI, mr->ci);
18360 +}
18361 +
18362 +static inline u8 qm_mr_get_ci(struct qm_portal *portal)
18363 +{
18364 + register struct qm_mr *mr = &portal->mr;
18365 + return mr->ci;
18366 +}
18367 +
18368 +static inline u8 qm_mr_get_ithresh(struct qm_portal *portal)
18369 +{
18370 + register struct qm_mr *mr = &portal->mr;
18371 + return mr->ithresh;
18372 +}
18373 +
18374 +static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh)
18375 +{
18376 + qm_out(MR_ITR, ithresh);
18377 +}
18378 +
18379 +
18380 +/* ------------------------------ */
18381 +/* --- Management command API --- */
18382 +
18383 +static inline int qm_mc_init(struct qm_portal *portal)
18384 +{
18385 + register struct qm_mc *mc = &portal->mc;
18386 + mc->cr = portal->addr.addr_ce + QM_CL_CR;
18387 + mc->rr = portal->addr.addr_ce + QM_CL_RR0;
18388 + mc->rridx = (__raw_readb(&mc->cr->__dont_write_directly__verb) &
18389 + QM_MCC_VERB_VBIT) ? 0 : 1;
18390 + mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0;
18391 +#ifdef CONFIG_FSL_DPA_CHECKING
18392 + mc->state = qman_mc_idle;
18393 +#endif
18394 + return 0;
18395 +}
18396 +
18397 +static inline void qm_mc_finish(struct qm_portal *portal)
18398 +{
18399 + __maybe_unused register struct qm_mc *mc = &portal->mc;
18400 + DPA_ASSERT(mc->state == qman_mc_idle);
18401 +#ifdef CONFIG_FSL_DPA_CHECKING
18402 + if (mc->state != qman_mc_idle)
18403 + pr_crit("Losing incomplete MC command\n");
18404 +#endif
18405 +}
18406 +
18407 +static inline struct qm_mc_command *qm_mc_start(struct qm_portal *portal)
18408 +{
18409 + register struct qm_mc *mc = &portal->mc;
18410 + DPA_ASSERT(mc->state == qman_mc_idle);
18411 +#ifdef CONFIG_FSL_DPA_CHECKING
18412 + mc->state = qman_mc_user;
18413 +#endif
18414 +#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
18415 + dcbz_64(mc->cr);
18416 +#endif
18417 + return mc->cr;
18418 +}
18419 +
18420 +static inline void qm_mc_abort(struct qm_portal *portal)
18421 +{
18422 + __maybe_unused register struct qm_mc *mc = &portal->mc;
18423 + DPA_ASSERT(mc->state == qman_mc_user);
18424 +#ifdef CONFIG_FSL_DPA_CHECKING
18425 + mc->state = qman_mc_idle;
18426 +#endif
18427 +}
18428 +
18429 +static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb)
18430 +{
18431 + register struct qm_mc *mc = &portal->mc;
18432 + struct qm_mc_result *rr = mc->rr + mc->rridx;
18433 + DPA_ASSERT(mc->state == qman_mc_user);
18434 + lwsync();
18435 + mc->cr->__dont_write_directly__verb = myverb | mc->vbit;
18436 + dcbf(mc->cr);
18437 + dcbit_ro(rr);
18438 +#ifdef CONFIG_FSL_DPA_CHECKING
18439 + mc->state = qman_mc_hw;
18440 +#endif
18441 +}
18442 +
18443 +static inline struct qm_mc_result *qm_mc_result(struct qm_portal *portal)
18444 +{
18445 + register struct qm_mc *mc = &portal->mc;
18446 + struct qm_mc_result *rr = mc->rr + mc->rridx;
18447 + DPA_ASSERT(mc->state == qman_mc_hw);
18448 + /* The inactive response register's verb byte always returns zero until
18449 + * its command is submitted and completed. This includes the valid-bit,
18450 + * in case you were wondering... */
18451 + if (!__raw_readb(&rr->verb)) {
18452 + dcbit_ro(rr);
18453 + return NULL;
18454 + }
18455 + mc->rridx ^= 1;
18456 + mc->vbit ^= QM_MCC_VERB_VBIT;
18457 +#ifdef CONFIG_FSL_DPA_CHECKING
18458 + mc->state = qman_mc_idle;
18459 +#endif
18460 + return rr;
18461 +}
18462 +
18463 +
18464 +/* ------------------------------------- */
18465 +/* --- Portal interrupt register API --- */
18466 +
18467 +static inline int qm_isr_init(__always_unused struct qm_portal *portal)
18468 +{
18469 + return 0;
18470 +}
18471 +
18472 +static inline void qm_isr_finish(__always_unused struct qm_portal *portal)
18473 +{
18474 +}
18475 +
18476 +static inline void qm_isr_set_iperiod(struct qm_portal *portal, u16 iperiod)
18477 +{
18478 + qm_out(ITPR, iperiod);
18479 +}
18480 +
18481 +static inline u32 __qm_isr_read(struct qm_portal *portal, enum qm_isr_reg n)
18482 +{
18483 +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
18484 + return __qm_in(&portal->addr, QM_REG_ISR + (n << 6));
18485 +#else
18486 + return __qm_in(&portal->addr, QM_REG_ISR + (n << 2));
18487 +#endif
18488 +}
18489 +
18490 +static inline void __qm_isr_write(struct qm_portal *portal, enum qm_isr_reg n,
18491 + u32 val)
18492 +{
18493 +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
18494 + __qm_out(&portal->addr, QM_REG_ISR + (n << 6), val);
18495 +#else
18496 + __qm_out(&portal->addr, QM_REG_ISR + (n << 2), val);
18497 +#endif
18498 +}
18499 +
18500 +/* Cleanup FQs */
18501 +static inline int qm_shutdown_fq(struct qm_portal **portal, int portal_count,
18502 + u32 fqid)
18503 +{
18504 +
18505 + struct qm_mc_command *mcc;
18506 + struct qm_mc_result *mcr;
18507 + u8 state;
18508 + int orl_empty, fq_empty, i, drain = 0;
18509 + u32 result;
18510 + u32 channel, wq;
18511 + u16 dest_wq;
18512 +
18513 + /* Determine the state of the FQID */
18514 + mcc = qm_mc_start(portal[0]);
18515 + mcc->queryfq_np.fqid = cpu_to_be32(fqid);
18516 + qm_mc_commit(portal[0], QM_MCC_VERB_QUERYFQ_NP);
18517 + while (!(mcr = qm_mc_result(portal[0])))
18518 + cpu_relax();
18519 + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
18520 + state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
18521 + if (state == QM_MCR_NP_STATE_OOS)
18522 + return 0; /* Already OOS, no need to do anymore checks */
18523 +
18524 + /* Query which channel the FQ is using */
18525 + mcc = qm_mc_start(portal[0]);
18526 + mcc->queryfq.fqid = cpu_to_be32(fqid);
18527 + qm_mc_commit(portal[0], QM_MCC_VERB_QUERYFQ);
18528 + while (!(mcr = qm_mc_result(portal[0])))
18529 + cpu_relax();
18530 + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
18531 +
18532 + /* Need to store these since the MCR gets reused */
18533 + dest_wq = be16_to_cpu(mcr->queryfq.fqd.dest_wq);
18534 + wq = dest_wq & 0x7;
18535 + channel = dest_wq>>3;
18536 +
18537 + switch (state) {
18538 + case QM_MCR_NP_STATE_TEN_SCHED:
18539 + case QM_MCR_NP_STATE_TRU_SCHED:
18540 + case QM_MCR_NP_STATE_ACTIVE:
18541 + case QM_MCR_NP_STATE_PARKED:
18542 + orl_empty = 0;
18543 + mcc = qm_mc_start(portal[0]);
18544 + mcc->alterfq.fqid = cpu_to_be32(fqid);
18545 + qm_mc_commit(portal[0], QM_MCC_VERB_ALTER_RETIRE);
18546 + while (!(mcr = qm_mc_result(portal[0])))
18547 + cpu_relax();
18548 + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
18549 + QM_MCR_VERB_ALTER_RETIRE);
18550 + result = mcr->result; /* Make a copy as we reuse MCR below */
18551 +
18552 + if (result == QM_MCR_RESULT_PENDING) {
18553 + /* Need to wait for the FQRN in the message ring, which
18554 + will only occur once the FQ has been drained. In
18555 + order for the FQ to drain the portal needs to be set
18556 + to dequeue from the channel the FQ is scheduled on */
18557 + const struct qm_mr_entry *msg;
18558 + const struct qm_dqrr_entry *dqrr = NULL;
18559 + int found_fqrn = 0;
18560 + u16 dequeue_wq = 0;
18561 +
18562 + /* Flag that we need to drain FQ */
18563 + drain = 1;
18564 +
18565 + if (channel >= qm_channel_pool1 &&
18566 + channel < (qm_channel_pool1 + 15)) {
18567 + /* Pool channel, enable the bit in the portal */
18568 + dequeue_wq = (channel -
18569 + qm_channel_pool1 + 1)<<4 | wq;
18570 + } else if (channel < qm_channel_pool1) {
18571 + /* Dedicated channel */
18572 + dequeue_wq = wq;
18573 + } else {
18574 + pr_info("Cannot recover FQ 0x%x, it is "
18575 + "scheduled on channel 0x%x",
18576 + fqid, channel);
18577 + return -EBUSY;
18578 + }
18579 + /* Set the sdqcr to drain this channel */
18580 + if (channel < qm_channel_pool1)
18581 + for (i = 0; i < portal_count; i++)
18582 + qm_dqrr_sdqcr_set(portal[i],
18583 + QM_SDQCR_TYPE_ACTIVE |
18584 + QM_SDQCR_CHANNELS_DEDICATED);
18585 + else
18586 + for (i = 0; i < portal_count; i++)
18587 + qm_dqrr_sdqcr_set(
18588 + portal[i],
18589 + QM_SDQCR_TYPE_ACTIVE |
18590 + QM_SDQCR_CHANNELS_POOL_CONV
18591 + (channel));
18592 + while (!found_fqrn) {
18593 + /* Keep draining DQRR while checking the MR*/
18594 + for (i = 0; i < portal_count; i++) {
18595 + qm_dqrr_pvb_update(portal[i]);
18596 + dqrr = qm_dqrr_current(portal[i]);
18597 + while (dqrr) {
18598 + qm_dqrr_cdc_consume_1ptr(
18599 + portal[i], dqrr, 0);
18600 + qm_dqrr_pvb_update(portal[i]);
18601 + qm_dqrr_next(portal[i]);
18602 + dqrr = qm_dqrr_current(
18603 + portal[i]);
18604 + }
18605 + /* Process message ring too */
18606 + qm_mr_pvb_update(portal[i]);
18607 + msg = qm_mr_current(portal[i]);
18608 + while (msg) {
18609 + if ((msg->verb &
18610 + QM_MR_VERB_TYPE_MASK)
18611 + == QM_MR_VERB_FQRN)
18612 + found_fqrn = 1;
18613 + qm_mr_next(portal[i]);
18614 + qm_mr_cci_consume_to_current(
18615 + portal[i]);
18616 + qm_mr_pvb_update(portal[i]);
18617 + msg = qm_mr_current(portal[i]);
18618 + }
18619 + cpu_relax();
18620 + }
18621 + }
18622 + }
18623 + if (result != QM_MCR_RESULT_OK &&
18624 + result != QM_MCR_RESULT_PENDING) {
18625 + /* error */
18626 + pr_err("qman_retire_fq failed on FQ 0x%x, result=0x%x\n",
18627 + fqid, result);
18628 + return -1;
18629 + }
18630 + if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
18631 + /* ORL had no entries, no need to wait until the
18632 + ERNs come in */
18633 + orl_empty = 1;
18634 + }
18635 + /* Retirement succeeded, check to see if FQ needs
18636 + to be drained */
18637 + if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
18638 + /* FQ is Not Empty, drain using volatile DQ commands */
18639 + fq_empty = 0;
18640 + do {
18641 + const struct qm_dqrr_entry *dqrr = NULL;
18642 + u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
18643 + qm_dqrr_vdqcr_set(portal[0], vdqcr);
18644 +
18645 + /* Wait for a dequeue to occur */
18646 + while (dqrr == NULL) {
18647 + qm_dqrr_pvb_update(portal[0]);
18648 + dqrr = qm_dqrr_current(portal[0]);
18649 + if (!dqrr)
18650 + cpu_relax();
18651 + }
18652 + /* Process the dequeues, making sure to
18653 + empty the ring completely */
18654 + while (dqrr) {
18655 + if (be32_to_cpu(dqrr->fqid) == fqid &&
18656 + dqrr->stat & QM_DQRR_STAT_FQ_EMPTY)
18657 + fq_empty = 1;
18658 + qm_dqrr_cdc_consume_1ptr(portal[0],
18659 + dqrr, 0);
18660 + qm_dqrr_pvb_update(portal[0]);
18661 + qm_dqrr_next(portal[0]);
18662 + dqrr = qm_dqrr_current(portal[0]);
18663 + }
18664 + } while (fq_empty == 0);
18665 + }
18666 + for (i = 0; i < portal_count; i++)
18667 + qm_dqrr_sdqcr_set(portal[i], 0);
18668 +
18669 + /* Wait for the ORL to have been completely drained */
18670 + while (orl_empty == 0) {
18671 + const struct qm_mr_entry *msg;
18672 + qm_mr_pvb_update(portal[0]);
18673 + msg = qm_mr_current(portal[0]);
18674 + while (msg) {
18675 + if ((msg->verb & QM_MR_VERB_TYPE_MASK) ==
18676 + QM_MR_VERB_FQRL)
18677 + orl_empty = 1;
18678 + qm_mr_next(portal[0]);
18679 + qm_mr_cci_consume_to_current(portal[0]);
18680 + qm_mr_pvb_update(portal[0]);
18681 + msg = qm_mr_current(portal[0]);
18682 + }
18683 + cpu_relax();
18684 + }
18685 + mcc = qm_mc_start(portal[0]);
18686 + mcc->alterfq.fqid = cpu_to_be32(fqid);
18687 + qm_mc_commit(portal[0], QM_MCC_VERB_ALTER_OOS);
18688 + while (!(mcr = qm_mc_result(portal[0])))
18689 + cpu_relax();
18690 + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
18691 + QM_MCR_VERB_ALTER_OOS);
18692 + if (mcr->result != QM_MCR_RESULT_OK) {
18693 + pr_err("OOS after drain Failed on FQID 0x%x, result 0x%x\n",
18694 + fqid, mcr->result);
18695 + return -1;
18696 + }
18697 + return 0;
18698 + case QM_MCR_NP_STATE_RETIRED:
18699 + /* Send OOS Command */
18700 + mcc = qm_mc_start(portal[0]);
18701 + mcc->alterfq.fqid = cpu_to_be32(fqid);
18702 + qm_mc_commit(portal[0], QM_MCC_VERB_ALTER_OOS);
18703 + while (!(mcr = qm_mc_result(portal[0])))
18704 + cpu_relax();
18705 + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
18706 + QM_MCR_VERB_ALTER_OOS);
18707 + if (mcr->result) {
18708 + pr_err("OOS Failed on FQID 0x%x\n", fqid);
18709 + return -1;
18710 + }
18711 + return 0;
18712 + }
18713 + return -1;
18714 +}
18715 --- /dev/null
18716 +++ b/drivers/staging/fsl_qbman/qman_private.h
18717 @@ -0,0 +1,398 @@
18718 +/* Copyright 2008-2012 Freescale Semiconductor, Inc.
18719 + *
18720 + * Redistribution and use in source and binary forms, with or without
18721 + * modification, are permitted provided that the following conditions are met:
18722 + * * Redistributions of source code must retain the above copyright
18723 + * notice, this list of conditions and the following disclaimer.
18724 + * * Redistributions in binary form must reproduce the above copyright
18725 + * notice, this list of conditions and the following disclaimer in the
18726 + * documentation and/or other materials provided with the distribution.
18727 + * * Neither the name of Freescale Semiconductor nor the
18728 + * names of its contributors may be used to endorse or promote products
18729 + * derived from this software without specific prior written permission.
18730 + *
18731 + *
18732 + * ALTERNATIVELY, this software may be distributed under the terms of the
18733 + * GNU General Public License ("GPL") as published by the Free Software
18734 + * Foundation, either version 2 of that License or (at your option) any
18735 + * later version.
18736 + *
18737 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
18738 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18739 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18740 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
18741 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
18742 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
18743 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
18744 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
18745 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
18746 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
18747 + */
18748 +
18749 +#include "dpa_sys.h"
18750 +#include <linux/fsl_qman.h>
18751 +#include <linux/iommu.h>
18752 +
18753 +#if defined(CONFIG_FSL_PAMU)
18754 +#include <asm/fsl_pamu_stash.h>
18755 +#endif
18756 +
18757 +#if !defined(CONFIG_FSL_QMAN_FQ_LOOKUP) && defined(CONFIG_PPC64)
18758 +#error "_PPC64 requires _FSL_QMAN_FQ_LOOKUP"
18759 +#endif
18760 +
18761 +#define QBMAN_ANY_PORTAL_IDX 0xffffffff
18762 + /* ----------------- */
18763 + /* Congestion Groups */
18764 + /* ----------------- */
18765 +/* This wrapper represents a bit-array for the state of the 256 Qman congestion
18766 + * groups. Is also used as a *mask* for congestion groups, eg. so we ignore
18767 + * those that don't concern us. We harness the structure and accessor details
18768 + * already used in the management command to query congestion groups. */
18769 +struct qman_cgrs {
18770 + struct __qm_mcr_querycongestion q;
18771 +};
18772 +static inline void qman_cgrs_init(struct qman_cgrs *c)
18773 +{
18774 + memset(c, 0, sizeof(*c));
18775 +}
18776 +static inline void qman_cgrs_fill(struct qman_cgrs *c)
18777 +{
18778 + memset(c, 0xff, sizeof(*c));
18779 +}
18780 +static inline int qman_cgrs_get(struct qman_cgrs *c, int num)
18781 +{
18782 + return QM_MCR_QUERYCONGESTION(&c->q, num);
18783 +}
18784 +static inline void qman_cgrs_set(struct qman_cgrs *c, int num)
18785 +{
18786 + c->q.__state[__CGR_WORD(num)] |= (0x80000000 >> __CGR_SHIFT(num));
18787 +}
18788 +static inline void qman_cgrs_unset(struct qman_cgrs *c, int num)
18789 +{
18790 + c->q.__state[__CGR_WORD(num)] &= ~(0x80000000 >> __CGR_SHIFT(num));
18791 +}
18792 +static inline int qman_cgrs_next(struct qman_cgrs *c, int num)
18793 +{
18794 + while ((++num < __CGR_NUM) && !qman_cgrs_get(c, num))
18795 + ;
18796 + return num;
18797 +}
18798 +static inline void qman_cgrs_cp(struct qman_cgrs *dest,
18799 + const struct qman_cgrs *src)
18800 +{
18801 + *dest = *src;
18802 +}
18803 +static inline void qman_cgrs_and(struct qman_cgrs *dest,
18804 + const struct qman_cgrs *a, const struct qman_cgrs *b)
18805 +{
18806 + int ret;
18807 + u32 *_d = dest->q.__state;
18808 + const u32 *_a = a->q.__state;
18809 + const u32 *_b = b->q.__state;
18810 + for (ret = 0; ret < 8; ret++)
18811 + *(_d++) = *(_a++) & *(_b++);
18812 +}
18813 +static inline void qman_cgrs_xor(struct qman_cgrs *dest,
18814 + const struct qman_cgrs *a, const struct qman_cgrs *b)
18815 +{
18816 + int ret;
18817 + u32 *_d = dest->q.__state;
18818 + const u32 *_a = a->q.__state;
18819 + const u32 *_b = b->q.__state;
18820 + for (ret = 0; ret < 8; ret++)
18821 + *(_d++) = *(_a++) ^ *(_b++);
18822 +}
18823 +
18824 + /* ----------------------- */
18825 + /* CEETM Congestion Groups */
18826 + /* ----------------------- */
18827 +/* This wrapper represents a bit-array for the state of the 512 Qman CEETM
18828 + * congestion groups.
18829 + */
18830 +struct qman_ccgrs {
18831 + struct __qm_mcr_querycongestion q[2];
18832 +};
18833 +static inline void qman_ccgrs_init(struct qman_ccgrs *c)
18834 +{
18835 + memset(c, 0, sizeof(*c));
18836 +}
18837 +static inline void qman_ccgrs_fill(struct qman_ccgrs *c)
18838 +{
18839 + memset(c, 0xff, sizeof(*c));
18840 +}
18841 +static inline int qman_ccgrs_get(struct qman_ccgrs *c, int num)
18842 +{
18843 + if (num < __CGR_NUM)
18844 + return QM_MCR_QUERYCONGESTION(&c->q[0], num);
18845 + else
18846 + return QM_MCR_QUERYCONGESTION(&c->q[1], (num - __CGR_NUM));
18847 +}
18848 +static inline int qman_ccgrs_next(struct qman_ccgrs *c, int num)
18849 +{
18850 + while ((++num < __CGR_NUM) && !qman_ccgrs_get(c, num))
18851 + ;
18852 + return num;
18853 +}
18854 +static inline void qman_ccgrs_cp(struct qman_ccgrs *dest,
18855 + const struct qman_ccgrs *src)
18856 +{
18857 + *dest = *src;
18858 +}
18859 +static inline void qman_ccgrs_and(struct qman_ccgrs *dest,
18860 + const struct qman_ccgrs *a, const struct qman_ccgrs *b)
18861 +{
18862 + int ret, i;
18863 + u32 *_d;
18864 + const u32 *_a, *_b;
18865 + for (i = 0; i < 2; i++) {
18866 + _d = dest->q[i].__state;
18867 + _a = a->q[i].__state;
18868 + _b = b->q[i].__state;
18869 + for (ret = 0; ret < 8; ret++)
18870 + *(_d++) = *(_a++) & *(_b++);
18871 + }
18872 +}
18873 +static inline void qman_ccgrs_xor(struct qman_ccgrs *dest,
18874 + const struct qman_ccgrs *a, const struct qman_ccgrs *b)
18875 +{
18876 + int ret, i;
18877 + u32 *_d;
18878 + const u32 *_a, *_b;
18879 + for (i = 0; i < 2; i++) {
18880 + _d = dest->q[i].__state;
18881 + _a = a->q[i].__state;
18882 + _b = b->q[i].__state;
18883 + for (ret = 0; ret < 8; ret++)
18884 + *(_d++) = *(_a++) ^ *(_b++);
18885 + }
18886 +}
18887 +
18888 +/* used by CCSR and portal interrupt code */
18889 +enum qm_isr_reg {
18890 + qm_isr_status = 0,
18891 + qm_isr_enable = 1,
18892 + qm_isr_disable = 2,
18893 + qm_isr_inhibit = 3
18894 +};
18895 +
18896 +struct qm_portal_config {
18897 + /* Corenet portal addresses;
18898 + * [0]==cache-enabled, [1]==cache-inhibited. */
18899 + __iomem void *addr_virt[2];
18900 + struct resource addr_phys[2];
18901 + struct device dev;
18902 + struct iommu_domain *iommu_domain;
18903 + /* Allow these to be joined in lists */
18904 + struct list_head list;
18905 + /* User-visible portal configuration settings */
18906 + struct qman_portal_config public_cfg;
18907 + /* power management saved data */
18908 + u32 saved_isdr;
18909 +};
18910 +
18911 +/* Revision info (for errata and feature handling) */
18912 +#define QMAN_REV11 0x0101
18913 +#define QMAN_REV12 0x0102
18914 +#define QMAN_REV20 0x0200
18915 +#define QMAN_REV30 0x0300
18916 +#define QMAN_REV31 0x0301
18917 +#define QMAN_REV32 0x0302
18918 +
18919 +/* QMan REV_2 register contains the Cfg option */
18920 +#define QMAN_REV_CFG_0 0x0
18921 +#define QMAN_REV_CFG_1 0x1
18922 +#define QMAN_REV_CFG_2 0x2
18923 +#define QMAN_REV_CFG_3 0x3
18924 +
18925 +extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
18926 +extern u8 qman_ip_cfg;
18927 +extern u32 qman_clk;
18928 +extern u16 qman_portal_max;
18929 +
18930 +#ifdef CONFIG_FSL_QMAN_CONFIG
18931 +/* Hooks from qman_driver.c to qman_config.c */
18932 +int qman_init_ccsr(struct device_node *node);
18933 +void qman_liodn_fixup(u16 channel);
18934 +int qman_set_sdest(u16 channel, unsigned int cpu_idx);
18935 +size_t get_qman_fqd_size(void);
18936 +#else
18937 +static inline size_t get_qman_fqd_size(void)
18938 +{
18939 + return (PAGE_SIZE << CONFIG_FSL_QMAN_FQD_SZ);
18940 +}
18941 +#endif
18942 +
18943 +int qm_set_wpm(int wpm);
18944 +int qm_get_wpm(int *wpm);
18945 +
18946 +/* Hooks from qman_driver.c in to qman_high.c */
18947 +struct qman_portal *qman_create_portal(
18948 + struct qman_portal *portal,
18949 + const struct qm_portal_config *config,
18950 + const struct qman_cgrs *cgrs);
18951 +
18952 +struct qman_portal *qman_create_affine_portal(
18953 + const struct qm_portal_config *config,
18954 + const struct qman_cgrs *cgrs);
18955 +struct qman_portal *qman_create_affine_slave(struct qman_portal *redirect,
18956 + int cpu);
18957 +const struct qm_portal_config *qman_destroy_affine_portal(void);
18958 +void qman_destroy_portal(struct qman_portal *qm);
18959 +
18960 +/* Hooks from fsl_usdpaa.c to qman_driver.c */
18961 +struct qm_portal_config *qm_get_unused_portal(void);
18962 +struct qm_portal_config *qm_get_unused_portal_idx(uint32_t idx);
18963 +
18964 +void qm_put_unused_portal(struct qm_portal_config *pcfg);
18965 +void qm_set_liodns(struct qm_portal_config *pcfg);
18966 +
18967 +/* This CGR feature is supported by h/w and required by unit-tests and the
18968 + * debugfs hooks, so is implemented in the driver. However it allows an explicit
18969 + * corruption of h/w fields by s/w that are usually incorruptible (because the
18970 + * counters are usually maintained entirely within h/w). As such, we declare
18971 + * this API internally. */
18972 +int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,
18973 + struct qm_mcr_cgrtestwrite *result);
18974 +
18975 +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
18976 +/* If the fq object pointer is greater than the size of context_b field,
18977 + * than a lookup table is required. */
18978 +int qman_setup_fq_lookup_table(size_t num_entries);
18979 +#endif
18980 +
18981 +
18982 +/*************************************************/
18983 +/* QMan s/w corenet portal, low-level i/face */
18984 +/*************************************************/
18985 +
18986 +/* Note: most functions are only used by the high-level interface, so are
18987 + * inlined from qman_low.h. The stuff below is for use by other parts of the
18988 + * driver. */
18989 +
18990 +/* For qm_dqrr_sdqcr_set(); Choose one SOURCE. Choose one COUNT. Choose one
18991 + * dequeue TYPE. Choose TOKEN (8-bit).
18992 + * If SOURCE == CHANNELS,
18993 + * Choose CHANNELS_DEDICATED and/or CHANNELS_POOL(n).
18994 + * You can choose DEDICATED_PRECEDENCE if the portal channel should have
18995 + * priority.
18996 + * If SOURCE == SPECIFICWQ,
18997 + * Either select the work-queue ID with SPECIFICWQ_WQ(), or select the
18998 + * channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the
18999 + * work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the
19000 + * same value.
19001 + */
19002 +#define QM_SDQCR_SOURCE_CHANNELS 0x0
19003 +#define QM_SDQCR_SOURCE_SPECIFICWQ 0x40000000
19004 +#define QM_SDQCR_COUNT_EXACT1 0x0
19005 +#define QM_SDQCR_COUNT_UPTO3 0x20000000
19006 +#define QM_SDQCR_DEDICATED_PRECEDENCE 0x10000000
19007 +#define QM_SDQCR_TYPE_MASK 0x03000000
19008 +#define QM_SDQCR_TYPE_NULL 0x0
19009 +#define QM_SDQCR_TYPE_PRIO_QOS 0x01000000
19010 +#define QM_SDQCR_TYPE_ACTIVE_QOS 0x02000000
19011 +#define QM_SDQCR_TYPE_ACTIVE 0x03000000
19012 +#define QM_SDQCR_TOKEN_MASK 0x00ff0000
19013 +#define QM_SDQCR_TOKEN_SET(v) (((v) & 0xff) << 16)
19014 +#define QM_SDQCR_TOKEN_GET(v) (((v) >> 16) & 0xff)
19015 +#define QM_SDQCR_CHANNELS_DEDICATED 0x00008000
19016 +#define QM_SDQCR_SPECIFICWQ_MASK 0x000000f7
19017 +#define QM_SDQCR_SPECIFICWQ_DEDICATED 0x00000000
19018 +#define QM_SDQCR_SPECIFICWQ_POOL(n) ((n) << 4)
19019 +#define QM_SDQCR_SPECIFICWQ_WQ(n) (n)
19020 +
19021 +/* For qm_dqrr_vdqcr_set(): use FQID(n) to fill in the frame queue ID */
19022 +#define QM_VDQCR_FQID_MASK 0x00ffffff
19023 +#define QM_VDQCR_FQID(n) ((n) & QM_VDQCR_FQID_MASK)
19024 +
19025 +/* For qm_dqrr_pdqcr_set(); Choose one MODE. Choose one COUNT.
19026 + * If MODE==SCHEDULED
19027 + * Choose SCHEDULED_CHANNELS or SCHEDULED_SPECIFICWQ. Choose one dequeue TYPE.
19028 + * If CHANNELS,
19029 + * Choose CHANNELS_DEDICATED and/or CHANNELS_POOL() channels.
19030 + * You can choose DEDICATED_PRECEDENCE if the portal channel should have
19031 + * priority.
19032 + * If SPECIFICWQ,
19033 + * Either select the work-queue ID with SPECIFICWQ_WQ(), or select the
19034 + * channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the
19035 + * work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the
19036 + * same value.
19037 + * If MODE==UNSCHEDULED
19038 + * Choose FQID().
19039 + */
19040 +#define QM_PDQCR_MODE_SCHEDULED 0x0
19041 +#define QM_PDQCR_MODE_UNSCHEDULED 0x80000000
19042 +#define QM_PDQCR_SCHEDULED_CHANNELS 0x0
19043 +#define QM_PDQCR_SCHEDULED_SPECIFICWQ 0x40000000
19044 +#define QM_PDQCR_COUNT_EXACT1 0x0
19045 +#define QM_PDQCR_COUNT_UPTO3 0x20000000
19046 +#define QM_PDQCR_DEDICATED_PRECEDENCE 0x10000000
19047 +#define QM_PDQCR_TYPE_MASK 0x03000000
19048 +#define QM_PDQCR_TYPE_NULL 0x0
19049 +#define QM_PDQCR_TYPE_PRIO_QOS 0x01000000
19050 +#define QM_PDQCR_TYPE_ACTIVE_QOS 0x02000000
19051 +#define QM_PDQCR_TYPE_ACTIVE 0x03000000
19052 +#define QM_PDQCR_CHANNELS_DEDICATED 0x00008000
19053 +#define QM_PDQCR_CHANNELS_POOL(n) (0x00008000 >> (n))
19054 +#define QM_PDQCR_SPECIFICWQ_MASK 0x000000f7
19055 +#define QM_PDQCR_SPECIFICWQ_DEDICATED 0x00000000
19056 +#define QM_PDQCR_SPECIFICWQ_POOL(n) ((n) << 4)
19057 +#define QM_PDQCR_SPECIFICWQ_WQ(n) (n)
19058 +#define QM_PDQCR_FQID(n) ((n) & 0xffffff)
19059 +
19060 +/* Used by all portal interrupt registers except 'inhibit'
19061 + * Channels with frame availability
19062 + */
19063 +#define QM_PIRQ_DQAVAIL 0x0000ffff
19064 +
19065 +/* The DQAVAIL interrupt fields break down into these bits; */
19066 +#define QM_DQAVAIL_PORTAL 0x8000 /* Portal channel */
19067 +#define QM_DQAVAIL_POOL(n) (0x8000 >> (n)) /* Pool channel, n==[1..15] */
19068 +#define QM_DQAVAIL_MASK 0xffff
19069 +/* This mask contains all the "irqsource" bits visible to API users */
19070 +#define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI)
19071 +
19072 +/* These are qm_<reg>_<verb>(). So for example, qm_disable_write() means "write
19073 + * the disable register" rather than "disable the ability to write". */
19074 +#define qm_isr_status_read(qm) __qm_isr_read(qm, qm_isr_status)
19075 +#define qm_isr_status_clear(qm, m) __qm_isr_write(qm, qm_isr_status, m)
19076 +#define qm_isr_enable_read(qm) __qm_isr_read(qm, qm_isr_enable)
19077 +#define qm_isr_enable_write(qm, v) __qm_isr_write(qm, qm_isr_enable, v)
19078 +#define qm_isr_disable_read(qm) __qm_isr_read(qm, qm_isr_disable)
19079 +#define qm_isr_disable_write(qm, v) __qm_isr_write(qm, qm_isr_disable, v)
19080 +/* TODO: unfortunate name-clash here, reword? */
19081 +#define qm_isr_inhibit(qm) __qm_isr_write(qm, qm_isr_inhibit, 1)
19082 +#define qm_isr_uninhibit(qm) __qm_isr_write(qm, qm_isr_inhibit, 0)
19083 +
19084 +#ifdef CONFIG_FSL_QMAN_CONFIG
19085 +int qman_have_ccsr(void);
19086 +#else
19087 +#define qman_have_ccsr 0
19088 +#endif
19089 +
19090 +__init int qman_init(void);
19091 +__init int qman_resource_init(void);
19092 +
19093 +/* CEETM related */
19094 +#define QMAN_CEETM_MAX 2
19095 +extern u8 num_ceetms;
19096 +extern struct qm_ceetm qman_ceetms[QMAN_CEETM_MAX];
19097 +int qman_sp_enable_ceetm_mode(enum qm_dc_portal portal, u16 sub_portal);
19098 +int qman_sp_disable_ceetm_mode(enum qm_dc_portal portal, u16 sub_portal);
19099 +int qman_ceetm_set_prescaler(enum qm_dc_portal portal);
19100 +int qman_ceetm_get_prescaler(u16 *pres);
19101 +int qman_ceetm_query_cq(unsigned int cqid, unsigned int dcpid,
19102 + struct qm_mcr_ceetm_cq_query *cq_query);
19103 +int qman_ceetm_query_ccgr(struct qm_mcc_ceetm_ccgr_query *ccgr_query,
19104 + struct qm_mcr_ceetm_ccgr_query *response);
19105 +int qman_ceetm_get_xsfdr(enum qm_dc_portal portal, unsigned int *num);
19106 +
19107 +extern void *affine_portals[NR_CPUS];
19108 +const struct qm_portal_config *qman_get_qm_portal_config(
19109 + struct qman_portal *portal);
19110 +
19111 +/* power management */
19112 +#ifdef CONFIG_SUSPEND
19113 +void suspend_unused_qportal(void);
19114 +void resume_unused_qportal(void);
19115 +#endif
19116 --- /dev/null
19117 +++ b/drivers/staging/fsl_qbman/qman_test.c
19118 @@ -0,0 +1,57 @@
19119 +/* Copyright 2008-2011 Freescale Semiconductor, Inc.
19120 + *
19121 + * Redistribution and use in source and binary forms, with or without
19122 + * modification, are permitted provided that the following conditions are met:
19123 + * * Redistributions of source code must retain the above copyright
19124 + * notice, this list of conditions and the following disclaimer.
19125 + * * Redistributions in binary form must reproduce the above copyright
19126 + * notice, this list of conditions and the following disclaimer in the
19127 + * documentation and/or other materials provided with the distribution.
19128 + * * Neither the name of Freescale Semiconductor nor the
19129 + * names of its contributors may be used to endorse or promote products
19130 + * derived from this software without specific prior written permission.
19131 + *
19132 + *
19133 + * ALTERNATIVELY, this software may be distributed under the terms of the
19134 + * GNU General Public License ("GPL") as published by the Free Software
19135 + * Foundation, either version 2 of that License or (at your option) any
19136 + * later version.
19137 + *
19138 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
19139 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19140 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19141 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
19142 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
19143 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
19144 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
19145 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
19146 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
19147 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19148 + */
19149 +
19150 +#include "qman_test.h"
19151 +
19152 +MODULE_AUTHOR("Geoff Thorpe");
19153 +MODULE_LICENSE("Dual BSD/GPL");
19154 +MODULE_DESCRIPTION("Qman testing");
19155 +
19156 +static int test_init(void)
19157 +{
19158 + int loop = 1;
19159 + while (loop--) {
19160 +#ifdef CONFIG_FSL_QMAN_TEST_STASH_POTATO
19161 + qman_test_hotpotato();
19162 +#endif
19163 +#ifdef CONFIG_FSL_QMAN_TEST_HIGH
19164 + qman_test_high();
19165 +#endif
19166 + }
19167 + return 0;
19168 +}
19169 +
19170 +static void test_exit(void)
19171 +{
19172 +}
19173 +
19174 +module_init(test_init);
19175 +module_exit(test_exit);
19176 --- /dev/null
19177 +++ b/drivers/staging/fsl_qbman/qman_test.h
19178 @@ -0,0 +1,45 @@
19179 +/* Copyright 2008-2011 Freescale Semiconductor, Inc.
19180 + *
19181 + * Redistribution and use in source and binary forms, with or without
19182 + * modification, are permitted provided that the following conditions are met:
19183 + * * Redistributions of source code must retain the above copyright
19184 + * notice, this list of conditions and the following disclaimer.
19185 + * * Redistributions in binary form must reproduce the above copyright
19186 + * notice, this list of conditions and the following disclaimer in the
19187 + * documentation and/or other materials provided with the distribution.
19188 + * * Neither the name of Freescale Semiconductor nor the
19189 + * names of its contributors may be used to endorse or promote products
19190 + * derived from this software without specific prior written permission.
19191 + *
19192 + *
19193 + * ALTERNATIVELY, this software may be distributed under the terms of the
19194 + * GNU General Public License ("GPL") as published by the Free Software
19195 + * Foundation, either version 2 of that License or (at your option) any
19196 + * later version.
19197 + *
19198 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
19199 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19200 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19201 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
19202 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
19203 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
19204 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
19205 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
19206 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
19207 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19208 + */
19209 +
19210 +#include <linux/kernel.h>
19211 +#include <linux/errno.h>
19212 +#include <linux/io.h>
19213 +#include <linux/slab.h>
19214 +#include <linux/module.h>
19215 +#include <linux/interrupt.h>
19216 +#include <linux/delay.h>
19217 +#include <linux/sched.h>
19218 +
19219 +#include <linux/fsl_qman.h>
19220 +
19221 +void qman_test_hotpotato(void);
19222 +void qman_test_high(void);
19223 +
19224 --- /dev/null
19225 +++ b/drivers/staging/fsl_qbman/qman_test_high.c
19226 @@ -0,0 +1,216 @@
19227 +/* Copyright 2008-2011 Freescale Semiconductor, Inc.
19228 + *
19229 + * Redistribution and use in source and binary forms, with or without
19230 + * modification, are permitted provided that the following conditions are met:
19231 + * * Redistributions of source code must retain the above copyright
19232 + * notice, this list of conditions and the following disclaimer.
19233 + * * Redistributions in binary form must reproduce the above copyright
19234 + * notice, this list of conditions and the following disclaimer in the
19235 + * documentation and/or other materials provided with the distribution.
19236 + * * Neither the name of Freescale Semiconductor nor the
19237 + * names of its contributors may be used to endorse or promote products
19238 + * derived from this software without specific prior written permission.
19239 + *
19240 + *
19241 + * ALTERNATIVELY, this software may be distributed under the terms of the
19242 + * GNU General Public License ("GPL") as published by the Free Software
19243 + * Foundation, either version 2 of that License or (at your option) any
19244 + * later version.
19245 + *
19246 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
19247 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19248 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19249 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
19250 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
19251 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
19252 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
19253 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
19254 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
19255 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19256 + */
19257 +
19258 +#include "qman_test.h"
19259 +
19260 +/*************/
19261 +/* constants */
19262 +/*************/
19263 +
19264 +#define CGR_ID 27
19265 +#define POOL_ID 2
19266 +#define FQ_FLAGS QMAN_FQ_FLAG_DYNAMIC_FQID
19267 +#define NUM_ENQUEUES 10
19268 +#define NUM_PARTIAL 4
19269 +#define PORTAL_SDQCR (QM_SDQCR_SOURCE_CHANNELS | \
19270 + QM_SDQCR_TYPE_PRIO_QOS | \
19271 + QM_SDQCR_TOKEN_SET(0x98) | \
19272 + QM_SDQCR_CHANNELS_DEDICATED | \
19273 + QM_SDQCR_CHANNELS_POOL(POOL_ID))
19274 +#define PORTAL_OPAQUE ((void *)0xf00dbeef)
19275 +#define VDQCR_FLAGS (QMAN_VOLATILE_FLAG_WAIT | QMAN_VOLATILE_FLAG_FINISH)
19276 +
19277 +/*************************************/
19278 +/* Predeclarations (eg. for fq_base) */
19279 +/*************************************/
19280 +
19281 +static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *,
19282 + struct qman_fq *,
19283 + const struct qm_dqrr_entry *);
19284 +static void cb_ern(struct qman_portal *, struct qman_fq *,
19285 + const struct qm_mr_entry *);
19286 +static void cb_fqs(struct qman_portal *, struct qman_fq *,
19287 + const struct qm_mr_entry *);
19288 +
19289 +/***************/
19290 +/* global vars */
19291 +/***************/
19292 +
19293 +static struct qm_fd fd, fd_dq;
19294 +static struct qman_fq fq_base = {
19295 + .cb.dqrr = cb_dqrr,
19296 + .cb.ern = cb_ern,
19297 + .cb.fqs = cb_fqs
19298 +};
19299 +static DECLARE_WAIT_QUEUE_HEAD(waitqueue);
19300 +static int retire_complete, sdqcr_complete;
19301 +
19302 +/**********************/
19303 +/* internal functions */
19304 +/**********************/
19305 +
19306 +/* Helpers for initialising and "incrementing" a frame descriptor */
19307 +static void fd_init(struct qm_fd *__fd)
19308 +{
19309 + qm_fd_addr_set64(__fd, 0xabdeadbeefLLU);
19310 + __fd->format = qm_fd_contig_big;
19311 + __fd->length29 = 0x0000ffff;
19312 + __fd->cmd = 0xfeedf00d;
19313 +}
19314 +
19315 +static void fd_inc(struct qm_fd *__fd)
19316 +{
19317 + u64 t = qm_fd_addr_get64(__fd);
19318 + int z = t >> 40;
19319 + t <<= 1;
19320 + if (z)
19321 + t |= 1;
19322 + qm_fd_addr_set64(__fd, t);
19323 + __fd->length29--;
19324 + __fd->cmd++;
19325 +}
19326 +
19327 +/* The only part of the 'fd' we can't memcmp() is the ppid */
19328 +static int fd_cmp(const struct qm_fd *a, const struct qm_fd *b)
19329 +{
19330 + int r = (qm_fd_addr_get64(a) == qm_fd_addr_get64(b)) ? 0 : -1;
19331 + if (!r)
19332 + r = a->format - b->format;
19333 + if (!r)
19334 + r = a->opaque - b->opaque;
19335 + if (!r)
19336 + r = a->cmd - b->cmd;
19337 + return r;
19338 +}
19339 +
19340 +/********/
19341 +/* test */
19342 +/********/
19343 +
19344 +static void do_enqueues(struct qman_fq *fq)
19345 +{
19346 + unsigned int loop;
19347 + for (loop = 0; loop < NUM_ENQUEUES; loop++) {
19348 + if (qman_enqueue(fq, &fd, QMAN_ENQUEUE_FLAG_WAIT |
19349 + (((loop + 1) == NUM_ENQUEUES) ?
19350 + QMAN_ENQUEUE_FLAG_WAIT_SYNC : 0)))
19351 + panic("qman_enqueue() failed\n");
19352 + fd_inc(&fd);
19353 + }
19354 +}
19355 +
19356 +void qman_test_high(void)
19357 +{
19358 + unsigned int flags;
19359 + int res;
19360 + struct qman_fq *fq = &fq_base;
19361 +
19362 + pr_info("qman_test_high starting\n");
19363 + fd_init(&fd);
19364 + fd_init(&fd_dq);
19365 +
19366 + /* Initialise (parked) FQ */
19367 + if (qman_create_fq(0, FQ_FLAGS, fq))
19368 + panic("qman_create_fq() failed\n");
19369 + if (qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL))
19370 + panic("qman_init_fq() failed\n");
19371 +
19372 + /* Do enqueues + VDQCR, twice. (Parked FQ) */
19373 + do_enqueues(fq);
19374 + pr_info("VDQCR (till-empty);\n");
19375 + if (qman_volatile_dequeue(fq, VDQCR_FLAGS,
19376 + QM_VDQCR_NUMFRAMES_TILLEMPTY))
19377 + panic("qman_volatile_dequeue() failed\n");
19378 + do_enqueues(fq);
19379 + pr_info("VDQCR (%d of %d);\n", NUM_PARTIAL, NUM_ENQUEUES);
19380 + if (qman_volatile_dequeue(fq, VDQCR_FLAGS,
19381 + QM_VDQCR_NUMFRAMES_SET(NUM_PARTIAL)))
19382 + panic("qman_volatile_dequeue() failed\n");
19383 + pr_info("VDQCR (%d of %d);\n", NUM_ENQUEUES - NUM_PARTIAL,
19384 + NUM_ENQUEUES);
19385 + if (qman_volatile_dequeue(fq, VDQCR_FLAGS,
19386 + QM_VDQCR_NUMFRAMES_SET(NUM_ENQUEUES - NUM_PARTIAL)))
19387 + panic("qman_volatile_dequeue() failed\n");
19388 +
19389 + do_enqueues(fq);
19390 + pr_info("scheduled dequeue (till-empty)\n");
19391 + if (qman_schedule_fq(fq))
19392 + panic("qman_schedule_fq() failed\n");
19393 + wait_event(waitqueue, sdqcr_complete);
19394 +
19395 + /* Retire and OOS the FQ */
19396 + res = qman_retire_fq(fq, &flags);
19397 + if (res < 0)
19398 + panic("qman_retire_fq() failed\n");
19399 + wait_event(waitqueue, retire_complete);
19400 + if (flags & QMAN_FQ_STATE_BLOCKOOS)
19401 + panic("leaking frames\n");
19402 + if (qman_oos_fq(fq))
19403 + panic("qman_oos_fq() failed\n");
19404 + qman_destroy_fq(fq, 0);
19405 + pr_info("qman_test_high finished\n");
19406 +}
19407 +
19408 +static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p,
19409 + struct qman_fq *fq,
19410 + const struct qm_dqrr_entry *dq)
19411 +{
19412 + if (fd_cmp(&fd_dq, &dq->fd)) {
19413 + pr_err("BADNESS: dequeued frame doesn't match;\n");
19414 + pr_err("Expected 0x%llx, got 0x%llx\n",
19415 + (unsigned long long)fd_dq.length29,
19416 + (unsigned long long)dq->fd.length29);
19417 + BUG();
19418 + }
19419 + fd_inc(&fd_dq);
19420 + if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_cmp(&fd_dq, &fd)) {
19421 + sdqcr_complete = 1;
19422 + wake_up(&waitqueue);
19423 + }
19424 + return qman_cb_dqrr_consume;
19425 +}
19426 +
19427 +static void cb_ern(struct qman_portal *p, struct qman_fq *fq,
19428 + const struct qm_mr_entry *msg)
19429 +{
19430 + panic("cb_ern() unimplemented");
19431 +}
19432 +
19433 +static void cb_fqs(struct qman_portal *p, struct qman_fq *fq,
19434 + const struct qm_mr_entry *msg)
19435 +{
19436 + u8 verb = (msg->verb & QM_MR_VERB_TYPE_MASK);
19437 + if ((verb != QM_MR_VERB_FQRN) && (verb != QM_MR_VERB_FQRNI))
19438 + panic("unexpected FQS message");
19439 + pr_info("Retirement message received\n");
19440 + retire_complete = 1;
19441 + wake_up(&waitqueue);
19442 +}
19443 --- /dev/null
19444 +++ b/drivers/staging/fsl_qbman/qman_test_hotpotato.c
19445 @@ -0,0 +1,502 @@
19446 +/* Copyright 2009-2012 Freescale Semiconductor, Inc.
19447 + *
19448 + * Redistribution and use in source and binary forms, with or without
19449 + * modification, are permitted provided that the following conditions are met:
19450 + * * Redistributions of source code must retain the above copyright
19451 + * notice, this list of conditions and the following disclaimer.
19452 + * * Redistributions in binary form must reproduce the above copyright
19453 + * notice, this list of conditions and the following disclaimer in the
19454 + * documentation and/or other materials provided with the distribution.
19455 + * * Neither the name of Freescale Semiconductor nor the
19456 + * names of its contributors may be used to endorse or promote products
19457 + * derived from this software without specific prior written permission.
19458 + *
19459 + *
19460 + * ALTERNATIVELY, this software may be distributed under the terms of the
19461 + * GNU General Public License ("GPL") as published by the Free Software
19462 + * Foundation, either version 2 of that License or (at your option) any
19463 + * later version.
19464 + *
19465 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
19466 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19467 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19468 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
19469 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
19470 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
19471 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
19472 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
19473 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
19474 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19475 + */
19476 +
19477 +#include <linux/kthread.h>
19478 +#include <linux/platform_device.h>
19479 +#include <linux/dma-mapping.h>
19480 +#include "qman_test.h"
19481 +
19482 +/* Algorithm:
19483 + *
19484 + * Each cpu will have HP_PER_CPU "handlers" set up, each of which incorporates
19485 + * an rx/tx pair of FQ objects (both of which are stashed on dequeue). The
19486 + * organisation of FQIDs is such that the HP_PER_CPU*NUM_CPUS handlers will
19487 + * shuttle a "hot potato" frame around them such that every forwarding action
19488 + * moves it from one cpu to another. (The use of more than one handler per cpu
19489 + * is to allow enough handlers/FQs to truly test the significance of caching -
19490 + * ie. when cache-expiries are occurring.)
19491 + *
19492 + * The "hot potato" frame content will be HP_NUM_WORDS*4 bytes in size, and the
19493 + * first and last words of the frame data will undergo a transformation step on
19494 + * each forwarding action. To achieve this, each handler will be assigned a
19495 + * 32-bit "mixer", that is produced using a 32-bit LFSR. When a frame is
19496 + * received by a handler, the mixer of the expected sender is XOR'd into all
19497 + * words of the entire frame, which is then validated against the original
19498 + * values. Then, before forwarding, the entire frame is XOR'd with the mixer of
19499 + * the current handler. Apart from validating that the frame is taking the
19500 + * expected path, this also provides some quasi-realistic overheads to each
19501 + * forwarding action - dereferencing *all* the frame data, computation, and
19502 + * conditional branching. There is a "special" handler designated to act as the
19503 + * instigator of the test by creating an enqueuing the "hot potato" frame, and
19504 + * to determine when the test has completed by counting HP_LOOPS iterations.
19505 + *
19506 + * Init phases:
19507 + *
19508 + * 1. prepare each cpu's 'hp_cpu' struct using on_each_cpu(,,1) and link them
19509 + * into 'hp_cpu_list'. Specifically, set processor_id, allocate HP_PER_CPU
19510 + * handlers and link-list them (but do no other handler setup).
19511 + *
19512 + * 2. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each
19513 + * hp_cpu's 'iterator' to point to its first handler. With each loop,
19514 + * allocate rx/tx FQIDs and mixer values to the hp_cpu's iterator handler
19515 + * and advance the iterator for the next loop. This includes a final fixup,
19516 + * which connects the last handler to the first (and which is why phase 2
19517 + * and 3 are separate).
19518 + *
19519 + * 3. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each
19520 + * hp_cpu's 'iterator' to point to its first handler. With each loop,
19521 + * initialise FQ objects and advance the iterator for the next loop.
19522 + * Moreover, do this initialisation on the cpu it applies to so that Rx FQ
19523 + * initialisation targets the correct cpu.
19524 + */
19525 +
19526 +/* helper to run something on all cpus (can't use on_each_cpu(), as that invokes
19527 + * the fn from irq context, which is too restrictive). */
19528 +struct bstrap {
19529 + void (*fn)(void);
19530 + atomic_t started;
19531 +};
19532 +static int bstrap_fn(void *__bstrap)
19533 +{
19534 + struct bstrap *bstrap = __bstrap;
19535 + atomic_inc(&bstrap->started);
19536 + bstrap->fn();
19537 + while (!kthread_should_stop())
19538 + msleep(1);
19539 + return 0;
19540 +}
19541 +static int on_all_cpus(void (*fn)(void))
19542 +{
19543 + int cpu;
19544 + for_each_cpu(cpu, cpu_online_mask) {
19545 + struct bstrap bstrap = {
19546 + .fn = fn,
19547 + .started = ATOMIC_INIT(0)
19548 + };
19549 + struct task_struct *k = kthread_create(bstrap_fn, &bstrap,
19550 + "hotpotato%d", cpu);
19551 + int ret;
19552 + if (IS_ERR(k))
19553 + return -ENOMEM;
19554 + kthread_bind(k, cpu);
19555 + wake_up_process(k);
19556 + /* If we call kthread_stop() before the "wake up" has had an
19557 + * effect, then the thread may exit with -EINTR without ever
19558 + * running the function. So poll until it's started before
19559 + * requesting it to stop. */
19560 + while (!atomic_read(&bstrap.started))
19561 + msleep(10);
19562 + ret = kthread_stop(k);
19563 + if (ret)
19564 + return ret;
19565 + }
19566 + return 0;
19567 +}
19568 +
19569 +struct hp_handler {
19570 +
19571 + /* The following data is stashed when 'rx' is dequeued; */
19572 + /* -------------- */
19573 + /* The Rx FQ, dequeues of which will stash the entire hp_handler */
19574 + struct qman_fq rx;
19575 + /* The Tx FQ we should forward to */
19576 + struct qman_fq tx;
19577 + /* The value we XOR post-dequeue, prior to validating */
19578 + u32 rx_mixer;
19579 + /* The value we XOR pre-enqueue, after validating */
19580 + u32 tx_mixer;
19581 + /* what the hotpotato address should be on dequeue */
19582 + dma_addr_t addr;
19583 + u32 *frame_ptr;
19584 +
19585 + /* The following data isn't (necessarily) stashed on dequeue; */
19586 + /* -------------- */
19587 + u32 fqid_rx, fqid_tx;
19588 + /* list node for linking us into 'hp_cpu' */
19589 + struct list_head node;
19590 + /* Just to check ... */
19591 + unsigned int processor_id;
19592 +} ____cacheline_aligned;
19593 +
19594 +struct hp_cpu {
19595 + /* identify the cpu we run on; */
19596 + unsigned int processor_id;
19597 + /* root node for the per-cpu list of handlers */
19598 + struct list_head handlers;
19599 + /* list node for linking us into 'hp_cpu_list' */
19600 + struct list_head node;
19601 + /* when repeatedly scanning 'hp_list', each time linking the n'th
19602 + * handlers together, this is used as per-cpu iterator state */
19603 + struct hp_handler *iterator;
19604 +};
19605 +
19606 +/* Each cpu has one of these */
19607 +static DEFINE_PER_CPU(struct hp_cpu, hp_cpus);
19608 +
19609 +/* links together the hp_cpu structs, in first-come first-serve order. */
19610 +static LIST_HEAD(hp_cpu_list);
19611 +static spinlock_t hp_lock = __SPIN_LOCK_UNLOCKED(hp_lock);
19612 +
19613 +static unsigned int hp_cpu_list_length;
19614 +
19615 +/* the "special" handler, that starts and terminates the test. */
19616 +static struct hp_handler *special_handler;
19617 +static int loop_counter;
19618 +
19619 +/* handlers are allocated out of this, so they're properly aligned. */
19620 +static struct kmem_cache *hp_handler_slab;
19621 +
19622 +/* this is the frame data */
19623 +static void *__frame_ptr;
19624 +static u32 *frame_ptr;
19625 +static dma_addr_t frame_dma;
19626 +
19627 +/* the main function waits on this */
19628 +static DECLARE_WAIT_QUEUE_HEAD(queue);
19629 +
19630 +#define HP_PER_CPU 2
19631 +#define HP_LOOPS 8
19632 +/* 80 bytes, like a small ethernet frame, and bleeds into a second cacheline */
19633 +#define HP_NUM_WORDS 80
19634 +/* First word of the LFSR-based frame data */
19635 +#define HP_FIRST_WORD 0xabbaf00d
19636 +
19637 +static inline u32 do_lfsr(u32 prev)
19638 +{
19639 + return (prev >> 1) ^ (-(prev & 1u) & 0xd0000001u);
19640 +}
19641 +
19642 +static void allocate_frame_data(void)
19643 +{
19644 + u32 lfsr = HP_FIRST_WORD;
19645 + int loop;
19646 + struct platform_device *pdev = platform_device_alloc("foobar", -1);
19647 + if (!pdev)
19648 + panic("platform_device_alloc() failed");
19649 + if (platform_device_add(pdev))
19650 + panic("platform_device_add() failed");
19651 + __frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL);
19652 + if (!__frame_ptr)
19653 + panic("kmalloc() failed");
19654 + frame_ptr = (void *)(((unsigned long)__frame_ptr + 63) &
19655 + ~(unsigned long)63);
19656 + for (loop = 0; loop < HP_NUM_WORDS; loop++) {
19657 + frame_ptr[loop] = lfsr;
19658 + lfsr = do_lfsr(lfsr);
19659 + }
19660 + frame_dma = dma_map_single(&pdev->dev, frame_ptr, 4 * HP_NUM_WORDS,
19661 + DMA_BIDIRECTIONAL);
19662 + platform_device_del(pdev);
19663 + platform_device_put(pdev);
19664 +}
19665 +
19666 +static void deallocate_frame_data(void)
19667 +{
19668 + kfree(__frame_ptr);
19669 +}
19670 +
19671 +static inline void process_frame_data(struct hp_handler *handler,
19672 + const struct qm_fd *fd)
19673 +{
19674 + u32 *p = handler->frame_ptr;
19675 + u32 lfsr = HP_FIRST_WORD;
19676 + int loop;
19677 + if (qm_fd_addr_get64(fd) != (handler->addr & 0xffffffffff)) {
19678 + pr_err("Got 0x%llx expected 0x%llx\n",
19679 + qm_fd_addr_get64(fd), handler->addr);
19680 + panic("bad frame address");
19681 + }
19682 + for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
19683 + *p ^= handler->rx_mixer;
19684 + if (*p != lfsr)
19685 + panic("corrupt frame data");
19686 + *p ^= handler->tx_mixer;
19687 + lfsr = do_lfsr(lfsr);
19688 + }
19689 +}
19690 +
19691 +static enum qman_cb_dqrr_result normal_dqrr(struct qman_portal *portal,
19692 + struct qman_fq *fq,
19693 + const struct qm_dqrr_entry *dqrr)
19694 +{
19695 + struct hp_handler *handler = (struct hp_handler *)fq;
19696 +
19697 + process_frame_data(handler, &dqrr->fd);
19698 + if (qman_enqueue(&handler->tx, &dqrr->fd, 0))
19699 + panic("qman_enqueue() failed");
19700 + return qman_cb_dqrr_consume;
19701 +}
19702 +
19703 +static enum qman_cb_dqrr_result special_dqrr(struct qman_portal *portal,
19704 + struct qman_fq *fq,
19705 + const struct qm_dqrr_entry *dqrr)
19706 +{
19707 + struct hp_handler *handler = (struct hp_handler *)fq;
19708 +
19709 + process_frame_data(handler, &dqrr->fd);
19710 + if (++loop_counter < HP_LOOPS) {
19711 + if (qman_enqueue(&handler->tx, &dqrr->fd, 0))
19712 + panic("qman_enqueue() failed");
19713 + } else {
19714 + pr_info("Received final (%dth) frame\n", loop_counter);
19715 + wake_up(&queue);
19716 + }
19717 + return qman_cb_dqrr_consume;
19718 +}
19719 +
19720 +static void create_per_cpu_handlers(void)
19721 +{
19722 + struct hp_handler *handler;
19723 + int loop;
19724 + struct hp_cpu *hp_cpu = &get_cpu_var(hp_cpus);
19725 +
19726 + hp_cpu->processor_id = smp_processor_id();
19727 + spin_lock(&hp_lock);
19728 + list_add_tail(&hp_cpu->node, &hp_cpu_list);
19729 + hp_cpu_list_length++;
19730 + spin_unlock(&hp_lock);
19731 + INIT_LIST_HEAD(&hp_cpu->handlers);
19732 + for (loop = 0; loop < HP_PER_CPU; loop++) {
19733 + handler = kmem_cache_alloc(hp_handler_slab, GFP_KERNEL);
19734 + if (!handler)
19735 + panic("kmem_cache_alloc() failed");
19736 + handler->processor_id = hp_cpu->processor_id;
19737 + handler->addr = frame_dma;
19738 + handler->frame_ptr = frame_ptr;
19739 + list_add_tail(&handler->node, &hp_cpu->handlers);
19740 + }
19741 + put_cpu_var(hp_cpus);
19742 +}
19743 +
19744 +static void destroy_per_cpu_handlers(void)
19745 +{
19746 + struct list_head *loop, *tmp;
19747 + struct hp_cpu *hp_cpu = &get_cpu_var(hp_cpus);
19748 +
19749 + spin_lock(&hp_lock);
19750 + list_del(&hp_cpu->node);
19751 + spin_unlock(&hp_lock);
19752 + list_for_each_safe(loop, tmp, &hp_cpu->handlers) {
19753 + u32 flags;
19754 + struct hp_handler *handler = list_entry(loop, struct hp_handler,
19755 + node);
19756 + if (qman_retire_fq(&handler->rx, &flags))
19757 + panic("qman_retire_fq(rx) failed");
19758 + BUG_ON(flags & QMAN_FQ_STATE_BLOCKOOS);
19759 + if (qman_oos_fq(&handler->rx))
19760 + panic("qman_oos_fq(rx) failed");
19761 + qman_destroy_fq(&handler->rx, 0);
19762 + qman_destroy_fq(&handler->tx, 0);
19763 + qman_release_fqid(handler->fqid_rx);
19764 + list_del(&handler->node);
19765 + kmem_cache_free(hp_handler_slab, handler);
19766 + }
19767 + put_cpu_var(hp_cpus);
19768 +}
19769 +
19770 +static inline u8 num_cachelines(u32 offset)
19771 +{
19772 + u8 res = (offset + (L1_CACHE_BYTES - 1))
19773 + / (L1_CACHE_BYTES);
19774 + if (res > 3)
19775 + return 3;
19776 + return res;
19777 +}
19778 +#define STASH_DATA_CL \
19779 + num_cachelines(HP_NUM_WORDS * 4)
19780 +#define STASH_CTX_CL \
19781 + num_cachelines(offsetof(struct hp_handler, fqid_rx))
19782 +
19783 +static void init_handler(void *__handler)
19784 +{
19785 + struct qm_mcc_initfq opts;
19786 + struct hp_handler *handler = __handler;
19787 + BUG_ON(handler->processor_id != smp_processor_id());
19788 + /* Set up rx */
19789 + memset(&handler->rx, 0, sizeof(handler->rx));
19790 + if (handler == special_handler)
19791 + handler->rx.cb.dqrr = special_dqrr;
19792 + else
19793 + handler->rx.cb.dqrr = normal_dqrr;
19794 + if (qman_create_fq(handler->fqid_rx, 0, &handler->rx))
19795 + panic("qman_create_fq(rx) failed");
19796 + memset(&opts, 0, sizeof(opts));
19797 + opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
19798 + opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING;
19799 + opts.fqd.context_a.stashing.data_cl = STASH_DATA_CL;
19800 + opts.fqd.context_a.stashing.context_cl = STASH_CTX_CL;
19801 + if (qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED |
19802 + QMAN_INITFQ_FLAG_LOCAL, &opts))
19803 + panic("qman_init_fq(rx) failed");
19804 + /* Set up tx */
19805 + memset(&handler->tx, 0, sizeof(handler->tx));
19806 + if (qman_create_fq(handler->fqid_tx, QMAN_FQ_FLAG_NO_MODIFY,
19807 + &handler->tx))
19808 + panic("qman_create_fq(tx) failed");
19809 +}
19810 +
19811 +static void init_phase2(void)
19812 +{
19813 + int loop;
19814 + u32 fqid = 0;
19815 + u32 lfsr = 0xdeadbeef;
19816 + struct hp_cpu *hp_cpu;
19817 + struct hp_handler *handler;
19818 +
19819 + for (loop = 0; loop < HP_PER_CPU; loop++) {
19820 + list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
19821 + int ret;
19822 + if (!loop)
19823 + hp_cpu->iterator = list_first_entry(
19824 + &hp_cpu->handlers,
19825 + struct hp_handler, node);
19826 + else
19827 + hp_cpu->iterator = list_entry(
19828 + hp_cpu->iterator->node.next,
19829 + struct hp_handler, node);
19830 + /* Rx FQID is the previous handler's Tx FQID */
19831 + hp_cpu->iterator->fqid_rx = fqid;
19832 + /* Allocate new FQID for Tx */
19833 + ret = qman_alloc_fqid(&fqid);
19834 + if (ret)
19835 + panic("qman_alloc_fqid() failed");
19836 + hp_cpu->iterator->fqid_tx = fqid;
19837 + /* Rx mixer is the previous handler's Tx mixer */
19838 + hp_cpu->iterator->rx_mixer = lfsr;
19839 + /* Get new mixer for Tx */
19840 + lfsr = do_lfsr(lfsr);
19841 + hp_cpu->iterator->tx_mixer = lfsr;
19842 + }
19843 + }
19844 + /* Fix up the first handler (fqid_rx==0, rx_mixer=0xdeadbeef) */
19845 + hp_cpu = list_first_entry(&hp_cpu_list, struct hp_cpu, node);
19846 + handler = list_first_entry(&hp_cpu->handlers, struct hp_handler, node);
19847 + BUG_ON((handler->fqid_rx != 0) || (handler->rx_mixer != 0xdeadbeef));
19848 + handler->fqid_rx = fqid;
19849 + handler->rx_mixer = lfsr;
19850 + /* and tag it as our "special" handler */
19851 + special_handler = handler;
19852 +}
19853 +
19854 +static void init_phase3(void)
19855 +{
19856 + int loop;
19857 + struct hp_cpu *hp_cpu;
19858 +
19859 + for (loop = 0; loop < HP_PER_CPU; loop++) {
19860 + list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
19861 + if (!loop)
19862 + hp_cpu->iterator = list_first_entry(
19863 + &hp_cpu->handlers,
19864 + struct hp_handler, node);
19865 + else
19866 + hp_cpu->iterator = list_entry(
19867 + hp_cpu->iterator->node.next,
19868 + struct hp_handler, node);
19869 + preempt_disable();
19870 + if (hp_cpu->processor_id == smp_processor_id())
19871 + init_handler(hp_cpu->iterator);
19872 + else
19873 + smp_call_function_single(hp_cpu->processor_id,
19874 + init_handler, hp_cpu->iterator, 1);
19875 + preempt_enable();
19876 + }
19877 + }
19878 +}
19879 +
19880 +static void send_first_frame(void *ignore)
19881 +{
19882 + u32 *p = special_handler->frame_ptr;
19883 + u32 lfsr = HP_FIRST_WORD;
19884 + int loop;
19885 + struct qm_fd fd;
19886 +
19887 + BUG_ON(special_handler->processor_id != smp_processor_id());
19888 + memset(&fd, 0, sizeof(fd));
19889 + qm_fd_addr_set64(&fd, special_handler->addr);
19890 + fd.format = qm_fd_contig_big;
19891 + fd.length29 = HP_NUM_WORDS * 4;
19892 + for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
19893 + if (*p != lfsr)
19894 + panic("corrupt frame data");
19895 + *p ^= special_handler->tx_mixer;
19896 + lfsr = do_lfsr(lfsr);
19897 + }
19898 + pr_info("Sending first frame\n");
19899 + if (qman_enqueue(&special_handler->tx, &fd, 0))
19900 + panic("qman_enqueue() failed");
19901 +}
19902 +
19903 +void qman_test_hotpotato(void)
19904 +{
19905 + if (cpumask_weight(cpu_online_mask) < 2) {
19906 + pr_info("qman_test_hotpotato, skip - only 1 CPU\n");
19907 + return;
19908 + }
19909 +
19910 + pr_info("qman_test_hotpotato starting\n");
19911 +
19912 + hp_cpu_list_length = 0;
19913 + loop_counter = 0;
19914 + hp_handler_slab = kmem_cache_create("hp_handler_slab",
19915 + sizeof(struct hp_handler), L1_CACHE_BYTES,
19916 + SLAB_HWCACHE_ALIGN, NULL);
19917 + if (!hp_handler_slab)
19918 + panic("kmem_cache_create() failed");
19919 +
19920 + allocate_frame_data();
19921 +
19922 + /* Init phase 1 */
19923 + pr_info("Creating %d handlers per cpu...\n", HP_PER_CPU);
19924 + if (on_all_cpus(create_per_cpu_handlers))
19925 + panic("on_each_cpu() failed");
19926 + pr_info("Number of cpus: %d, total of %d handlers\n",
19927 + hp_cpu_list_length, hp_cpu_list_length * HP_PER_CPU);
19928 +
19929 + init_phase2();
19930 +
19931 + init_phase3();
19932 +
19933 + preempt_disable();
19934 + if (special_handler->processor_id == smp_processor_id())
19935 + send_first_frame(NULL);
19936 + else
19937 + smp_call_function_single(special_handler->processor_id,
19938 + send_first_frame, NULL, 1);
19939 + preempt_enable();
19940 +
19941 + wait_event(queue, loop_counter == HP_LOOPS);
19942 + deallocate_frame_data();
19943 + if (on_all_cpus(destroy_per_cpu_handlers))
19944 + panic("on_each_cpu() failed");
19945 + kmem_cache_destroy(hp_handler_slab);
19946 + pr_info("qman_test_hotpotato finished\n");
19947 +}
19948 --- /dev/null
19949 +++ b/drivers/staging/fsl_qbman/qman_utility.c
19950 @@ -0,0 +1,129 @@
19951 +/* Copyright 2008-2011 Freescale Semiconductor, Inc.
19952 + *
19953 + * Redistribution and use in source and binary forms, with or without
19954 + * modification, are permitted provided that the following conditions are met:
19955 + * * Redistributions of source code must retain the above copyright
19956 + * notice, this list of conditions and the following disclaimer.
19957 + * * Redistributions in binary form must reproduce the above copyright
19958 + * notice, this list of conditions and the following disclaimer in the
19959 + * documentation and/or other materials provided with the distribution.
19960 + * * Neither the name of Freescale Semiconductor nor the
19961 + * names of its contributors may be used to endorse or promote products
19962 + * derived from this software without specific prior written permission.
19963 + *
19964 + *
19965 + * ALTERNATIVELY, this software may be distributed under the terms of the
19966 + * GNU General Public License ("GPL") as published by the Free Software
19967 + * Foundation, either version 2 of that License or (at your option) any
19968 + * later version.
19969 + *
19970 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
19971 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19972 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19973 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
19974 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
19975 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
19976 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
19977 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
19978 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
19979 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19980 + */
19981 +
19982 +#include "qman_private.h"
19983 +
19984 +/* ----------------- */
19985 +/* --- FQID Pool --- */
19986 +
19987 +struct qman_fqid_pool {
19988 + /* Base and size of the FQID range */
19989 + u32 fqid_base;
19990 + u32 total;
19991 + /* Number of FQIDs currently "allocated" */
19992 + u32 used;
19993 + /* Allocation optimisation. When 'used<total', it is the index of an
19994 + * available FQID. Otherwise there are no available FQIDs, and this
19995 + * will be set when the next deallocation occurs. */
19996 + u32 next;
19997 + /* A bit-field representation of the FQID range. */
19998 + unsigned long *bits;
19999 +};
20000 +
20001 +#define QLONG_BYTES sizeof(unsigned long)
20002 +#define QLONG_BITS (QLONG_BYTES * 8)
20003 +/* Number of 'longs' required for the given number of bits */
20004 +#define QNUM_LONGS(b) (((b) + QLONG_BITS - 1) / QLONG_BITS)
20005 +/* Shorthand for the number of bytes of same (kmalloc, memset, etc) */
20006 +#define QNUM_BYTES(b) (QNUM_LONGS(b) * QLONG_BYTES)
20007 +/* And in bits */
20008 +#define QNUM_BITS(b) (QNUM_LONGS(b) * QLONG_BITS)
20009 +
20010 +struct qman_fqid_pool *qman_fqid_pool_create(u32 fqid_start, u32 num)
20011 +{
20012 + struct qman_fqid_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
20013 + unsigned int i;
20014 +
20015 + BUG_ON(!num);
20016 + if (!pool)
20017 + return NULL;
20018 + pool->fqid_base = fqid_start;
20019 + pool->total = num;
20020 + pool->used = 0;
20021 + pool->next = 0;
20022 + pool->bits = kzalloc(QNUM_BYTES(num), GFP_KERNEL);
20023 + if (!pool->bits) {
20024 + kfree(pool);
20025 + return NULL;
20026 + }
20027 + /* If num is not an even multiple of QLONG_BITS (or even 8, for
20028 + * byte-oriented searching) then we fill the trailing bits with 1, to
20029 + * make them look allocated (permanently). */
20030 + for (i = num + 1; i < QNUM_BITS(num); i++)
20031 + set_bit(i, pool->bits);
20032 + return pool;
20033 +}
20034 +EXPORT_SYMBOL(qman_fqid_pool_create);
20035 +
20036 +int qman_fqid_pool_destroy(struct qman_fqid_pool *pool)
20037 +{
20038 + int ret = pool->used;
20039 + kfree(pool->bits);
20040 + kfree(pool);
20041 + return ret;
20042 +}
20043 +EXPORT_SYMBOL(qman_fqid_pool_destroy);
20044 +
20045 +int qman_fqid_pool_alloc(struct qman_fqid_pool *pool, u32 *fqid)
20046 +{
20047 + int ret;
20048 + if (pool->used == pool->total)
20049 + return -ENOMEM;
20050 + *fqid = pool->fqid_base + pool->next;
20051 + ret = test_and_set_bit(pool->next, pool->bits);
20052 + BUG_ON(ret);
20053 + if (++pool->used == pool->total)
20054 + return 0;
20055 + pool->next = find_next_zero_bit(pool->bits, pool->total, pool->next);
20056 + if (pool->next >= pool->total)
20057 + pool->next = find_first_zero_bit(pool->bits, pool->total);
20058 + BUG_ON(pool->next >= pool->total);
20059 + return 0;
20060 +}
20061 +EXPORT_SYMBOL(qman_fqid_pool_alloc);
20062 +
20063 +void qman_fqid_pool_free(struct qman_fqid_pool *pool, u32 fqid)
20064 +{
20065 + int ret;
20066 +
20067 + fqid -= pool->fqid_base;
20068 + ret = test_and_clear_bit(fqid, pool->bits);
20069 + BUG_ON(!ret);
20070 + if (pool->used-- == pool->total)
20071 + pool->next = fqid;
20072 +}
20073 +EXPORT_SYMBOL(qman_fqid_pool_free);
20074 +
20075 +u32 qman_fqid_pool_used(struct qman_fqid_pool *pool)
20076 +{
20077 + return pool->used;
20078 +}
20079 +EXPORT_SYMBOL(qman_fqid_pool_used);
20080 --- /dev/null
20081 +++ b/include/linux/fsl_bman.h
20082 @@ -0,0 +1,532 @@
20083 +/* Copyright 2008-2012 Freescale Semiconductor, Inc.
20084 + *
20085 + * Redistribution and use in source and binary forms, with or without
20086 + * modification, are permitted provided that the following conditions are met:
20087 + * * Redistributions of source code must retain the above copyright
20088 + * notice, this list of conditions and the following disclaimer.
20089 + * * Redistributions in binary form must reproduce the above copyright
20090 + * notice, this list of conditions and the following disclaimer in the
20091 + * documentation and/or other materials provided with the distribution.
20092 + * * Neither the name of Freescale Semiconductor nor the
20093 + * names of its contributors may be used to endorse or promote products
20094 + * derived from this software without specific prior written permission.
20095 + *
20096 + *
20097 + * ALTERNATIVELY, this software may be distributed under the terms of the
20098 + * GNU General Public License ("GPL") as published by the Free Software
20099 + * Foundation, either version 2 of that License or (at your option) any
20100 + * later version.
20101 + *
20102 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20103 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20104 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20105 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
20106 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20107 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
20108 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
20109 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
20110 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
20111 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
20112 + */
20113 +
20114 +#ifndef FSL_BMAN_H
20115 +#define FSL_BMAN_H
20116 +
20117 +#ifdef __cplusplus
20118 +extern "C" {
20119 +#endif
20120 +
20121 +/* Last updated for v00.79 of the BG */
20122 +
20123 +/* Portal processing (interrupt) sources */
20124 +#define BM_PIRQ_RCRI 0x00000002 /* RCR Ring (below threshold) */
20125 +#define BM_PIRQ_BSCN 0x00000001 /* Buffer depletion State Change */
20126 +
20127 +/* This wrapper represents a bit-array for the depletion state of the 64 Bman
20128 + * buffer pools. */
20129 +struct bman_depletion {
20130 + u32 __state[2];
20131 +};
20132 +#define BMAN_DEPLETION_EMPTY { { 0x00000000, 0x00000000 } }
20133 +#define BMAN_DEPLETION_FULL { { 0xffffffff, 0xffffffff } }
20134 +#define __bmdep_word(x) ((x) >> 5)
20135 +#define __bmdep_shift(x) ((x) & 0x1f)
20136 +#define __bmdep_bit(x) (0x80000000 >> __bmdep_shift(x))
20137 +static inline void bman_depletion_init(struct bman_depletion *c)
20138 +{
20139 + c->__state[0] = c->__state[1] = 0;
20140 +}
20141 +static inline void bman_depletion_fill(struct bman_depletion *c)
20142 +{
20143 + c->__state[0] = c->__state[1] = ~0;
20144 +}
20145 +static inline int bman_depletion_get(const struct bman_depletion *c, u8 bpid)
20146 +{
20147 + return c->__state[__bmdep_word(bpid)] & __bmdep_bit(bpid);
20148 +}
20149 +static inline void bman_depletion_set(struct bman_depletion *c, u8 bpid)
20150 +{
20151 + c->__state[__bmdep_word(bpid)] |= __bmdep_bit(bpid);
20152 +}
20153 +static inline void bman_depletion_unset(struct bman_depletion *c, u8 bpid)
20154 +{
20155 + c->__state[__bmdep_word(bpid)] &= ~__bmdep_bit(bpid);
20156 +}
20157 +
20158 +/* ------------------------------------------------------- */
20159 +/* --- Bman data structures (and associated constants) --- */
20160 +
20161 +/* Represents s/w corenet portal mapped data structures */
20162 +struct bm_rcr_entry; /* RCR (Release Command Ring) entries */
20163 +struct bm_mc_command; /* MC (Management Command) command */
20164 +struct bm_mc_result; /* MC result */
20165 +
20166 +/* Code-reduction, define a wrapper for 48-bit buffers. In cases where a buffer
20167 + * pool id specific to this buffer is needed (BM_RCR_VERB_CMD_BPID_MULTI,
20168 + * BM_MCC_VERB_ACQUIRE), the 'bpid' field is used. */
20169 +struct bm_buffer {
20170 + union {
20171 + struct {
20172 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
20173 + u8 __reserved1;
20174 + u8 bpid;
20175 + u16 hi; /* High 16-bits of 48-bit address */
20176 + u32 lo; /* Low 32-bits of 48-bit address */
20177 +#else
20178 + u32 lo;
20179 + u16 hi;
20180 + u8 bpid;
20181 + u8 __reserved;
20182 +#endif
20183 + };
20184 + struct {
20185 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
20186 + u64 __notaddress:16;
20187 + u64 addr:48;
20188 +#else
20189 + u64 addr:48;
20190 + u64 __notaddress:16;
20191 +#endif
20192 + };
20193 + u64 opaque;
20194 + };
20195 +} __aligned(8);
20196 +static inline u64 bm_buffer_get64(const struct bm_buffer *buf)
20197 +{
20198 + return buf->addr;
20199 +}
20200 +static inline dma_addr_t bm_buf_addr(const struct bm_buffer *buf)
20201 +{
20202 + return (dma_addr_t)buf->addr;
20203 +}
20204 +/* Macro, so we compile better if 'v' isn't always 64-bit */
20205 +#define bm_buffer_set64(buf, v) \
20206 + do { \
20207 + struct bm_buffer *__buf931 = (buf); \
20208 + __buf931->hi = upper_32_bits(v); \
20209 + __buf931->lo = lower_32_bits(v); \
20210 + } while (0)
20211 +
20212 +/* See 1.5.3.5.4: "Release Command" */
20213 +struct bm_rcr_entry {
20214 + union {
20215 + struct {
20216 + u8 __dont_write_directly__verb;
20217 + u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */
20218 + u8 __reserved1[62];
20219 + };
20220 + struct bm_buffer bufs[8];
20221 + };
20222 +} __packed;
20223 +#define BM_RCR_VERB_VBIT 0x80
20224 +#define BM_RCR_VERB_CMD_MASK 0x70 /* one of two values; */
20225 +#define BM_RCR_VERB_CMD_BPID_SINGLE 0x20
20226 +#define BM_RCR_VERB_CMD_BPID_MULTI 0x30
20227 +#define BM_RCR_VERB_BUFCOUNT_MASK 0x0f /* values 1..8 */
20228 +
20229 +/* See 1.5.3.1: "Acquire Command" */
20230 +/* See 1.5.3.2: "Query Command" */
20231 +struct bm_mcc_acquire {
20232 + u8 bpid;
20233 + u8 __reserved1[62];
20234 +} __packed;
20235 +struct bm_mcc_query {
20236 + u8 __reserved2[63];
20237 +} __packed;
20238 +struct bm_mc_command {
20239 + u8 __dont_write_directly__verb;
20240 + union {
20241 + struct bm_mcc_acquire acquire;
20242 + struct bm_mcc_query query;
20243 + };
20244 +} __packed;
20245 +#define BM_MCC_VERB_VBIT 0x80
20246 +#define BM_MCC_VERB_CMD_MASK 0x70 /* where the verb contains; */
20247 +#define BM_MCC_VERB_CMD_ACQUIRE 0x10
20248 +#define BM_MCC_VERB_CMD_QUERY 0x40
20249 +#define BM_MCC_VERB_ACQUIRE_BUFCOUNT 0x0f /* values 1..8 go here */
20250 +
20251 +/* See 1.5.3.3: "Acquire Response" */
20252 +/* See 1.5.3.4: "Query Response" */
20253 +struct bm_pool_state {
20254 + u8 __reserved1[32];
20255 + /* "availability state" and "depletion state" */
20256 + struct {
20257 + u8 __reserved1[8];
20258 + /* Access using bman_depletion_***() */
20259 + struct bman_depletion state;
20260 + } as, ds;
20261 +};
20262 +struct bm_mc_result {
20263 + union {
20264 + struct {
20265 + u8 verb;
20266 + u8 __reserved1[63];
20267 + };
20268 + union {
20269 + struct {
20270 + u8 __reserved1;
20271 + u8 bpid;
20272 + u8 __reserved2[62];
20273 + };
20274 + struct bm_buffer bufs[8];
20275 + } acquire;
20276 + struct bm_pool_state query;
20277 + };
20278 +} __packed;
20279 +#define BM_MCR_VERB_VBIT 0x80
20280 +#define BM_MCR_VERB_CMD_MASK BM_MCC_VERB_CMD_MASK
20281 +#define BM_MCR_VERB_CMD_ACQUIRE BM_MCC_VERB_CMD_ACQUIRE
20282 +#define BM_MCR_VERB_CMD_QUERY BM_MCC_VERB_CMD_QUERY
20283 +#define BM_MCR_VERB_CMD_ERR_INVALID 0x60
20284 +#define BM_MCR_VERB_CMD_ERR_ECC 0x70
20285 +#define BM_MCR_VERB_ACQUIRE_BUFCOUNT BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */
20286 +/* Determine the "availability state" of pool 'p' from a query result 'r' */
20287 +#define BM_MCR_QUERY_AVAILABILITY(r, p) \
20288 + bman_depletion_get(&r->query.as.state, p)
20289 +/* Determine the "depletion state" of pool 'p' from a query result 'r' */
20290 +#define BM_MCR_QUERY_DEPLETION(r, p) \
20291 + bman_depletion_get(&r->query.ds.state, p)
20292 +
20293 +/*******************************************************************/
20294 +/* Managed (aka "shared" or "mux/demux") portal, high-level i/face */
20295 +/*******************************************************************/
20296 +
20297 + /* Portal and Buffer Pools */
20298 + /* ----------------------- */
20299 +/* Represents a managed portal */
20300 +struct bman_portal;
20301 +
20302 +/* This object type represents Bman buffer pools. */
20303 +struct bman_pool;
20304 +
20305 +struct bman_portal_config {
20306 + /* This is used for any "core-affine" portals, ie. default portals
20307 + * associated to the corresponding cpu. -1 implies that there is no core
20308 + * affinity configured. */
20309 + int cpu;
20310 + /* portal interrupt line */
20311 + int irq;
20312 + /* the unique index of this portal */
20313 + u32 index;
20314 + /* Is this portal shared? (If so, it has coarser locking and demuxes
20315 + * processing on behalf of other CPUs.) */
20316 + int is_shared;
20317 + /* These are the buffer pool IDs that may be used via this portal. */
20318 + struct bman_depletion mask;
20319 +};
20320 +
20321 +/* This callback type is used when handling pool depletion entry/exit. The
20322 + * 'cb_ctx' value is the opaque value associated with the pool object in
20323 + * bman_new_pool(). 'depleted' is non-zero on depletion-entry, and zero on
20324 + * depletion-exit. */
20325 +typedef void (*bman_cb_depletion)(struct bman_portal *bm,
20326 + struct bman_pool *pool, void *cb_ctx, int depleted);
20327 +
20328 +/* This struct specifies parameters for a bman_pool object. */
20329 +struct bman_pool_params {
20330 + /* index of the buffer pool to encapsulate (0-63), ignored if
20331 + * BMAN_POOL_FLAG_DYNAMIC_BPID is set. */
20332 + u32 bpid;
20333 + /* bit-mask of BMAN_POOL_FLAG_*** options */
20334 + u32 flags;
20335 + /* depletion-entry/exit callback, if BMAN_POOL_FLAG_DEPLETION is set */
20336 + bman_cb_depletion cb;
20337 + /* opaque user value passed as a parameter to 'cb' */
20338 + void *cb_ctx;
20339 + /* depletion-entry/exit thresholds, if BMAN_POOL_FLAG_THRESH is set. NB:
20340 + * this is only allowed if BMAN_POOL_FLAG_DYNAMIC_BPID is used *and*
20341 + * when run in the control plane (which controls Bman CCSR). This array
20342 + * matches the definition of bm_pool_set(). */
20343 + u32 thresholds[4];
20344 +};
20345 +
20346 +/* Flags to bman_new_pool() */
20347 +#define BMAN_POOL_FLAG_NO_RELEASE 0x00000001 /* can't release to pool */
20348 +#define BMAN_POOL_FLAG_ONLY_RELEASE 0x00000002 /* can only release to pool */
20349 +#define BMAN_POOL_FLAG_DEPLETION 0x00000004 /* track depletion entry/exit */
20350 +#define BMAN_POOL_FLAG_DYNAMIC_BPID 0x00000008 /* (de)allocate bpid */
20351 +#define BMAN_POOL_FLAG_THRESH 0x00000010 /* set depletion thresholds */
20352 +#define BMAN_POOL_FLAG_STOCKPILE 0x00000020 /* stockpile to reduce hw ops */
20353 +
20354 +/* Flags to bman_release() */
20355 +#ifdef CONFIG_FSL_DPA_CAN_WAIT
20356 +#define BMAN_RELEASE_FLAG_WAIT 0x00000001 /* wait if RCR is full */
20357 +#define BMAN_RELEASE_FLAG_WAIT_INT 0x00000002 /* if we wait, interruptible? */
20358 +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
20359 +#define BMAN_RELEASE_FLAG_WAIT_SYNC 0x00000004 /* if wait, until consumed? */
20360 +#endif
20361 +#endif
20362 +#define BMAN_RELEASE_FLAG_NOW 0x00000008 /* issue immediate release */
20363 +
20364 +/* Flags to bman_acquire() */
20365 +#define BMAN_ACQUIRE_FLAG_STOCKPILE 0x00000001 /* no hw op, stockpile only */
20366 +
20367 + /* Portal Management */
20368 + /* ----------------- */
20369 +/**
20370 + * bman_get_portal_config - get portal configuration settings
20371 + *
20372 + * This returns a read-only view of the current cpu's affine portal settings.
20373 + */
20374 +const struct bman_portal_config *bman_get_portal_config(void);
20375 +
20376 +/**
20377 + * bman_irqsource_get - return the portal work that is interrupt-driven
20378 + *
20379 + * Returns a bitmask of BM_PIRQ_**I processing sources that are currently
20380 + * enabled for interrupt handling on the current cpu's affine portal. These
20381 + * sources will trigger the portal interrupt and the interrupt handler (or a
20382 + * tasklet/bottom-half it defers to) will perform the corresponding processing
20383 + * work. The bman_poll_***() functions will only process sources that are not in
20384 + * this bitmask. If the current CPU is sharing a portal hosted on another CPU,
20385 + * this always returns zero.
20386 + */
20387 +u32 bman_irqsource_get(void);
20388 +
20389 +/**
20390 + * bman_irqsource_add - add processing sources to be interrupt-driven
20391 + * @bits: bitmask of BM_PIRQ_**I processing sources
20392 + *
20393 + * Adds processing sources that should be interrupt-driven (rather than
20394 + * processed via bman_poll_***() functions). Returns zero for success, or
20395 + * -EINVAL if the current CPU is sharing a portal hosted on another CPU. */
20396 +int bman_irqsource_add(u32 bits);
20397 +
20398 +/**
20399 + * bman_irqsource_remove - remove processing sources from being interrupt-driven
20400 + * @bits: bitmask of BM_PIRQ_**I processing sources
20401 + *
20402 + * Removes processing sources from being interrupt-driven, so that they will
20403 + * instead be processed via bman_poll_***() functions. Returns zero for success,
20404 + * or -EINVAL if the current CPU is sharing a portal hosted on another CPU. */
20405 +int bman_irqsource_remove(u32 bits);
20406 +
20407 +/**
20408 + * bman_affine_cpus - return a mask of cpus that have affine portals
20409 + */
20410 +const cpumask_t *bman_affine_cpus(void);
20411 +
20412 +/**
20413 + * bman_poll_slow - process anything that isn't interrupt-driven.
20414 + *
20415 + * This function does any portal processing that isn't interrupt-driven. If the
20416 + * current CPU is sharing a portal hosted on another CPU, this function will
20417 + * return -EINVAL, otherwise the return value is a bitmask of BM_PIRQ_* sources
20418 + * indicating what interrupt sources were actually processed by the call.
20419 + *
20420 + * NB, unlike the legacy wrapper bman_poll(), this function will
20421 + * deterministically check for the presence of portal processing work and do it,
20422 + * which implies some latency even if there's nothing to do. The bman_poll()
20423 + * wrapper on the other hand (like the qman_poll() wrapper) attenuates this by
20424 + * checking for (and doing) portal processing infrequently. Ie. such that
20425 + * qman_poll() and bman_poll() can be called from core-processing loops. Use
20426 + * bman_poll_slow() when you yourself are deciding when to incur the overhead of
20427 + * processing.
20428 + */
20429 +u32 bman_poll_slow(void);
20430 +
20431 +/**
20432 + * bman_poll - process anything that isn't interrupt-driven.
20433 + *
20434 + * Dispatcher logic on a cpu can use this to trigger any maintenance of the
20435 + * affine portal. This function does whatever processing is not triggered by
20436 + * interrupts. This is a legacy wrapper that can be used in core-processing
20437 + * loops but mitigates the performance overhead of portal processing by
20438 + * adaptively bypassing true portal processing most of the time. (Processing is
20439 + * done once every 10 calls if the previous processing revealed that work needed
20440 + * to be done, or once very 1000 calls if the previous processing revealed no
20441 + * work needed doing.) If you wish to control this yourself, call
20442 + * bman_poll_slow() instead, which always checks for portal processing work.
20443 + */
20444 +void bman_poll(void);
20445 +
20446 +/**
20447 + * bman_rcr_is_empty - Determine if portal's RCR is empty
20448 + *
20449 + * For use in situations where a cpu-affine caller needs to determine when all
20450 + * releases for the local portal have been processed by Bman but can't use the
20451 + * BMAN_RELEASE_FLAG_WAIT_SYNC flag to do this from the final bman_release().
20452 + * The function forces tracking of RCR consumption (which normally doesn't
20453 + * happen until release processing needs to find space to put new release
20454 + * commands), and returns zero if the ring still has unprocessed entries,
20455 + * non-zero if it is empty.
20456 + */
20457 +int bman_rcr_is_empty(void);
20458 +
20459 +/**
20460 + * bman_alloc_bpid_range - Allocate a contiguous range of BPIDs
20461 + * @result: is set by the API to the base BPID of the allocated range
20462 + * @count: the number of BPIDs required
20463 + * @align: required alignment of the allocated range
20464 + * @partial: non-zero if the API can return fewer than @count BPIDs
20465 + *
20466 + * Returns the number of buffer pools allocated, or a negative error code. If
20467 + * @partial is non zero, the allocation request may return a smaller range of
20468 + * BPs than requested (though alignment will be as requested). If @partial is
20469 + * zero, the return value will either be 'count' or negative.
20470 + */
20471 +int bman_alloc_bpid_range(u32 *result, u32 count, u32 align, int partial);
20472 +static inline int bman_alloc_bpid(u32 *result)
20473 +{
20474 + int ret = bman_alloc_bpid_range(result, 1, 0, 0);
20475 + return (ret > 0) ? 0 : ret;
20476 +}
20477 +
20478 +/**
20479 + * bman_release_bpid_range - Release the specified range of buffer pool IDs
20480 + * @bpid: the base BPID of the range to deallocate
20481 + * @count: the number of BPIDs in the range
20482 + *
20483 + * This function can also be used to seed the allocator with ranges of BPIDs
20484 + * that it can subsequently allocate from.
20485 + */
20486 +void bman_release_bpid_range(u32 bpid, unsigned int count);
20487 +static inline void bman_release_bpid(u32 bpid)
20488 +{
20489 + bman_release_bpid_range(bpid, 1);
20490 +}
20491 +
20492 +int bman_reserve_bpid_range(u32 bpid, unsigned int count);
20493 +static inline int bman_reserve_bpid(u32 bpid)
20494 +{
20495 + return bman_reserve_bpid_range(bpid, 1);
20496 +}
20497 +
20498 +void bman_seed_bpid_range(u32 bpid, unsigned int count);
20499 +
20500 +
20501 +int bman_shutdown_pool(u32 bpid);
20502 +
20503 + /* Pool management */
20504 + /* --------------- */
20505 +/**
20506 + * bman_new_pool - Allocates a Buffer Pool object
20507 + * @params: parameters specifying the buffer pool ID and behaviour
20508 + *
20509 + * Creates a pool object for the given @params. A portal and the depletion
20510 + * callback field of @params are only used if the BMAN_POOL_FLAG_DEPLETION flag
20511 + * is set. NB, the fields from @params are copied into the new pool object, so
20512 + * the structure provided by the caller can be released or reused after the
20513 + * function returns.
20514 + */
20515 +struct bman_pool *bman_new_pool(const struct bman_pool_params *params);
20516 +
20517 +/**
20518 + * bman_free_pool - Deallocates a Buffer Pool object
20519 + * @pool: the pool object to release
20520 + *
20521 + */
20522 +void bman_free_pool(struct bman_pool *pool);
20523 +
20524 +/**
20525 + * bman_get_params - Returns a pool object's parameters.
20526 + * @pool: the pool object
20527 + *
20528 + * The returned pointer refers to state within the pool object so must not be
20529 + * modified and can no longer be read once the pool object is destroyed.
20530 + */
20531 +const struct bman_pool_params *bman_get_params(const struct bman_pool *pool);
20532 +
20533 +/**
20534 + * bman_release - Release buffer(s) to the buffer pool
20535 + * @pool: the buffer pool object to release to
20536 + * @bufs: an array of buffers to release
20537 + * @num: the number of buffers in @bufs (1-8)
20538 + * @flags: bit-mask of BMAN_RELEASE_FLAG_*** options
20539 + *
20540 + * Adds the given buffers to RCR entries. If the portal @p was created with the
20541 + * "COMPACT" flag, then it will be using a compaction algorithm to improve
20542 + * utilisation of RCR. As such, these buffers may join an existing ring entry
20543 + * and/or it may not be issued right away so as to allow future releases to join
20544 + * the same ring entry. Use the BMAN_RELEASE_FLAG_NOW flag to override this
20545 + * behaviour by committing the RCR entry (or entries) right away. If the RCR
20546 + * ring is full, the function will return -EBUSY unless BMAN_RELEASE_FLAG_WAIT
20547 + * is selected, in which case it will sleep waiting for space to become
20548 + * available in RCR. If the function receives a signal before such time (and
20549 + * BMAN_RELEASE_FLAG_WAIT_INT is set), the function returns -EINTR. Otherwise,
20550 + * it returns zero.
20551 + */
20552 +int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num,
20553 + u32 flags);
20554 +
20555 +/**
20556 + * bman_acquire - Acquire buffer(s) from a buffer pool
20557 + * @pool: the buffer pool object to acquire from
20558 + * @bufs: array for storing the acquired buffers
20559 + * @num: the number of buffers desired (@bufs is at least this big)
20560 + *
20561 + * Issues an "Acquire" command via the portal's management command interface.
20562 + * The return value will be the number of buffers obtained from the pool, or a
20563 + * negative error code if a h/w error or pool starvation was encountered. In
20564 + * the latter case, the content of @bufs is undefined.
20565 + */
20566 +int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num,
20567 + u32 flags);
20568 +
20569 +/**
20570 + * bman_flush_stockpile - Flush stockpile buffer(s) to the buffer pool
20571 + * @pool: the buffer pool object the stockpile belongs
20572 + * @flags: bit-mask of BMAN_RELEASE_FLAG_*** options
20573 + *
20574 + * Adds stockpile buffers to RCR entries until the stockpile is empty.
20575 + * The return value will be a negative error code if a h/w error occurred.
20576 + * If BMAN_RELEASE_FLAG_NOW flag is passed and RCR ring is full,
20577 + * -EAGAIN will be returned.
20578 + */
20579 +int bman_flush_stockpile(struct bman_pool *pool, u32 flags);
20580 +
20581 +/**
20582 + * bman_query_pools - Query all buffer pool states
20583 + * @state: storage for the queried availability and depletion states
20584 + */
20585 +int bman_query_pools(struct bm_pool_state *state);
20586 +
20587 +#ifdef CONFIG_FSL_BMAN_CONFIG
20588 +/**
20589 + * bman_query_free_buffers - Query how many free buffers are in buffer pool
20590 + * @pool: the buffer pool object to query
20591 + *
20592 + * Return the number of the free buffers
20593 + */
20594 +u32 bman_query_free_buffers(struct bman_pool *pool);
20595 +
20596 +/**
20597 + * bman_update_pool_thresholds - Change the buffer pool's depletion thresholds
20598 + * @pool: the buffer pool object to which the thresholds will be set
20599 + * @thresholds: the new thresholds
20600 + */
20601 +int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds);
20602 +#endif
20603 +
20604 +/**
20605 + * The below bman_p_***() variant might be called in a situation that the cpu
20606 + * which the portal affine to is not online yet.
20607 + * @bman_portal specifies which portal the API will use.
20608 +*/
20609 +int bman_p_irqsource_add(struct bman_portal *p, __maybe_unused u32 bits);
20610 +#ifdef __cplusplus
20611 +}
20612 +#endif
20613 +
20614 +#endif /* FSL_BMAN_H */
20615 --- /dev/null
20616 +++ b/include/linux/fsl_qman.h
20617 @@ -0,0 +1,3888 @@
20618 +/* Copyright 2008-2012 Freescale Semiconductor, Inc.
20619 + *
20620 + * Redistribution and use in source and binary forms, with or without
20621 + * modification, are permitted provided that the following conditions are met:
20622 + * * Redistributions of source code must retain the above copyright
20623 + * notice, this list of conditions and the following disclaimer.
20624 + * * Redistributions in binary form must reproduce the above copyright
20625 + * notice, this list of conditions and the following disclaimer in the
20626 + * documentation and/or other materials provided with the distribution.
20627 + * * Neither the name of Freescale Semiconductor nor the
20628 + * names of its contributors may be used to endorse or promote products
20629 + * derived from this software without specific prior written permission.
20630 + *
20631 + *
20632 + * ALTERNATIVELY, this software may be distributed under the terms of the
20633 + * GNU General Public License ("GPL") as published by the Free Software
20634 + * Foundation, either version 2 of that License or (at your option) any
20635 + * later version.
20636 + *
20637 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20638 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20639 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20640 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
20641 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20642 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
20643 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
20644 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
20645 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
20646 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
20647 + */
20648 +
20649 +#ifndef FSL_QMAN_H
20650 +#define FSL_QMAN_H
20651 +
20652 +#ifdef __cplusplus
20653 +extern "C" {
20654 +#endif
20655 +
20656 +/* Last updated for v00.800 of the BG */
20657 +
20658 +/* Hardware constants */
20659 +#define QM_CHANNEL_SWPORTAL0 0
20660 +#define QMAN_CHANNEL_POOL1 0x21
20661 +#define QMAN_CHANNEL_CAAM 0x80
20662 +#define QMAN_CHANNEL_PME 0xa0
20663 +#define QMAN_CHANNEL_POOL1_REV3 0x401
20664 +#define QMAN_CHANNEL_CAAM_REV3 0x840
20665 +#define QMAN_CHANNEL_PME_REV3 0x860
20666 +#define QMAN_CHANNEL_DCE 0x8a0
20667 +#define QMAN_CHANNEL_DCE_QMANREV312 0x880
20668 +extern u16 qm_channel_pool1;
20669 +extern u16 qm_channel_caam;
20670 +extern u16 qm_channel_pme;
20671 +extern u16 qm_channel_dce;
20672 +enum qm_dc_portal {
20673 + qm_dc_portal_fman0 = 0,
20674 + qm_dc_portal_fman1 = 1,
20675 + qm_dc_portal_caam = 2,
20676 + qm_dc_portal_pme = 3,
20677 + qm_dc_portal_rman = 4,
20678 + qm_dc_portal_dce = 5
20679 +};
20680 +
20681 +/* Portal processing (interrupt) sources */
20682 +#define QM_PIRQ_CCSCI 0x00200000 /* CEETM Congestion State Change */
20683 +#define QM_PIRQ_CSCI 0x00100000 /* Congestion State Change */
20684 +#define QM_PIRQ_EQCI 0x00080000 /* Enqueue Command Committed */
20685 +#define QM_PIRQ_EQRI 0x00040000 /* EQCR Ring (below threshold) */
20686 +#define QM_PIRQ_DQRI 0x00020000 /* DQRR Ring (non-empty) */
20687 +#define QM_PIRQ_MRI 0x00010000 /* MR Ring (non-empty) */
20688 +/* This mask contains all the interrupt sources that need handling except DQRI,
20689 + * ie. that if present should trigger slow-path processing. */
20690 +#define QM_PIRQ_SLOW (QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | \
20691 + QM_PIRQ_MRI | QM_PIRQ_CCSCI)
20692 +
20693 +/* --- Clock speed --- */
20694 +/* A qman driver instance may or may not know the current qman clock speed.
20695 + * However, certain CEETM calculations may not be possible if this is not known.
20696 + * The 'set' function will only succeed (return zero) if the driver did not
20697 + * already know the clock speed. Likewise, the 'get' function will only succeed
20698 + * if the driver does know the clock speed (either because it knew when booting,
20699 + * or was told via 'set'). In cases where software is running on a driver
20700 + * instance that does not know the clock speed (eg. on a hypervised data-plane),
20701 + * and the user can obtain the current qman clock speed by other means (eg. from
20702 + * a message sent from the control-plane), then the 'set' function can be used
20703 + * to enable rate-calculations in a driver where it would otherwise not be
20704 + * possible. */
20705 +int qm_get_clock(u64 *clock_hz);
20706 +int qm_set_clock(u64 clock_hz);
20707 +
20708 +/* For qman_static_dequeue_*** APIs */
20709 +#define QM_SDQCR_CHANNELS_POOL_MASK 0x00007fff
20710 +/* for n in [1,15] */
20711 +#define QM_SDQCR_CHANNELS_POOL(n) (0x00008000 >> (n))
20712 +/* for conversion from n of qm_channel */
20713 +static inline u32 QM_SDQCR_CHANNELS_POOL_CONV(u16 channel)
20714 +{
20715 + return QM_SDQCR_CHANNELS_POOL(channel + 1 - qm_channel_pool1);
20716 +}
20717 +
20718 +/* For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use
20719 + * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use
20720 + * FQID(n) to fill in the frame queue ID. */
20721 +#define QM_VDQCR_PRECEDENCE_VDQCR 0x0
20722 +#define QM_VDQCR_PRECEDENCE_SDQCR 0x80000000
20723 +#define QM_VDQCR_EXACT 0x40000000
20724 +#define QM_VDQCR_NUMFRAMES_MASK 0x3f000000
20725 +#define QM_VDQCR_NUMFRAMES_SET(n) (((n) & 0x3f) << 24)
20726 +#define QM_VDQCR_NUMFRAMES_GET(n) (((n) >> 24) & 0x3f)
20727 +#define QM_VDQCR_NUMFRAMES_TILLEMPTY QM_VDQCR_NUMFRAMES_SET(0)
20728 +
20729 +
20730 +/* ------------------------------------------------------- */
20731 +/* --- Qman data structures (and associated constants) --- */
20732 +
20733 +/* Represents s/w corenet portal mapped data structures */
20734 +struct qm_eqcr_entry; /* EQCR (EnQueue Command Ring) entries */
20735 +struct qm_dqrr_entry; /* DQRR (DeQueue Response Ring) entries */
20736 +struct qm_mr_entry; /* MR (Message Ring) entries */
20737 +struct qm_mc_command; /* MC (Management Command) command */
20738 +struct qm_mc_result; /* MC result */
20739 +
20740 +/* See David Lapp's "Frame formats" document, "dpateam", Jan 07, 2008 */
20741 +#define QM_FD_FORMAT_SG 0x4
20742 +#define QM_FD_FORMAT_LONG 0x2
20743 +#define QM_FD_FORMAT_COMPOUND 0x1
20744 +enum qm_fd_format {
20745 + /* 'contig' implies a contiguous buffer, whereas 'sg' implies a
20746 + * scatter-gather table. 'big' implies a 29-bit length with no offset
20747 + * field, otherwise length is 20-bit and offset is 9-bit. 'compound'
20748 + * implies a s/g-like table, where each entry itself represents a frame
20749 + * (contiguous or scatter-gather) and the 29-bit "length" is
20750 + * interpreted purely for congestion calculations, ie. a "congestion
20751 + * weight". */
20752 + qm_fd_contig = 0,
20753 + qm_fd_contig_big = QM_FD_FORMAT_LONG,
20754 + qm_fd_sg = QM_FD_FORMAT_SG,
20755 + qm_fd_sg_big = QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG,
20756 + qm_fd_compound = QM_FD_FORMAT_COMPOUND
20757 +};
20758 +
20759 +/* Capitalised versions are un-typed but can be used in static expressions */
20760 +#define QM_FD_CONTIG 0
20761 +#define QM_FD_CONTIG_BIG QM_FD_FORMAT_LONG
20762 +#define QM_FD_SG QM_FD_FORMAT_SG
20763 +#define QM_FD_SG_BIG (QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG)
20764 +#define QM_FD_COMPOUND QM_FD_FORMAT_COMPOUND
20765 +
20766 +/* See 1.5.1.1: "Frame Descriptor (FD)" */
20767 +struct qm_fd {
20768 + union {
20769 + struct {
20770 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
20771 + u8 dd:2; /* dynamic debug */
20772 + u8 liodn_offset:6;
20773 + u8 bpid:8; /* Buffer Pool ID */
20774 + u8 eliodn_offset:4;
20775 + u8 __reserved:4;
20776 + u8 addr_hi; /* high 8-bits of 40-bit address */
20777 + u32 addr_lo; /* low 32-bits of 40-bit address */
20778 +#else
20779 + u32 addr_lo; /* low 32-bits of 40-bit address */
20780 + u8 addr_hi; /* high 8-bits of 40-bit address */
20781 + u8 __reserved:4;
20782 + u8 eliodn_offset:4;
20783 + u8 bpid:8; /* Buffer Pool ID */
20784 + u8 liodn_offset:6;
20785 + u8 dd:2; /* dynamic debug */
20786 +#endif
20787 + };
20788 + struct {
20789 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
20790 + u64 __notaddress:24;
20791 + u64 addr:40;
20792 +#else
20793 + u64 addr:40;
20794 + u64 __notaddress:24;
20795 +#endif
20796 + };
20797 + u64 opaque_addr;
20798 + };
20799 + /* The 'format' field indicates the interpretation of the remaining 29
20800 + * bits of the 32-bit word. For packing reasons, it is duplicated in the
20801 + * other union elements. Note, union'd structs are difficult to use with
20802 + * static initialisation under gcc, in which case use the "opaque" form
20803 + * with one of the macros. */
20804 + union {
20805 + /* For easier/faster copying of this part of the fd (eg. from a
20806 + * DQRR entry to an EQCR entry) copy 'opaque' */
20807 + u32 opaque;
20808 + /* If 'format' is _contig or _sg, 20b length and 9b offset */
20809 + struct {
20810 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
20811 + enum qm_fd_format format:3;
20812 + u16 offset:9;
20813 + u32 length20:20;
20814 +#else
20815 + u32 length20:20;
20816 + u16 offset:9;
20817 + enum qm_fd_format format:3;
20818 +#endif
20819 + };
20820 + /* If 'format' is _contig_big or _sg_big, 29b length */
20821 + struct {
20822 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
20823 + enum qm_fd_format _format1:3;
20824 + u32 length29:29;
20825 +#else
20826 + u32 length29:29;
20827 + enum qm_fd_format _format1:3;
20828 +#endif
20829 + };
20830 + /* If 'format' is _compound, 29b "congestion weight" */
20831 + struct {
20832 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
20833 + enum qm_fd_format _format2:3;
20834 + u32 cong_weight:29;
20835 +#else
20836 + u32 cong_weight:29;
20837 + enum qm_fd_format _format2:3;
20838 +#endif
20839 + };
20840 + };
20841 + union {
20842 + u32 cmd;
20843 + u32 status;
20844 + };
20845 +} __aligned(8);
20846 +#define QM_FD_DD_NULL 0x00
20847 +#define QM_FD_PID_MASK 0x3f
20848 +static inline u64 qm_fd_addr_get64(const struct qm_fd *fd)
20849 +{
20850 + return fd->addr;
20851 +}
20852 +
20853 +static inline dma_addr_t qm_fd_addr(const struct qm_fd *fd)
20854 +{
20855 + return (dma_addr_t)fd->addr;
20856 +}
20857 +/* Macro, so we compile better if 'v' isn't always 64-bit */
20858 +#define qm_fd_addr_set64(fd, v) \
20859 + do { \
20860 + struct qm_fd *__fd931 = (fd); \
20861 + __fd931->addr = v; \
20862 + } while (0)
20863 +
20864 +/* For static initialisation of FDs (which is complicated by the use of unions
20865 + * in "struct qm_fd"), use the following macros. Note that;
20866 + * - 'dd', 'pid' and 'bpid' are ignored because there's no static initialisation
20867 + * use-case),
20868 + * - use capitalised QM_FD_*** formats for static initialisation.
20869 + */
20870 +#define QM_FD_FMT_20(cmd, addr_hi, addr_lo, fmt, off, len) \
20871 + { 0, 0, 0, 0, 0, addr_hi, addr_lo, \
20872 + { (((fmt)&0x7) << 29) | (((off)&0x1ff) << 20) | ((len)&0xfffff) }, \
20873 + { cmd } }
20874 +#define QM_FD_FMT_29(cmd, addr_hi, addr_lo, fmt, len) \
20875 + { 0, 0, 0, 0, 0, addr_hi, addr_lo, \
20876 + { (((fmt)&0x7) << 29) | ((len)&0x1fffffff) }, \
20877 + { cmd } }
20878 +
20879 +/* See 2.2.1.3 Multi-Core Datapath Acceleration Architecture */
20880 +#define QM_SG_OFFSET_MASK 0x1FFF
20881 +struct qm_sg_entry {
20882 + union {
20883 + struct {
20884 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
20885 + u8 __reserved1[3];
20886 + u8 addr_hi; /* high 8-bits of 40-bit address */
20887 + u32 addr_lo; /* low 32-bits of 40-bit address */
20888 +#else
20889 + u32 addr_lo; /* low 32-bits of 40-bit address */
20890 + u8 addr_hi; /* high 8-bits of 40-bit address */
20891 + u8 __reserved1[3];
20892 +#endif
20893 + };
20894 + struct {
20895 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
20896 + u64 __notaddress:24;
20897 + u64 addr:40;
20898 +#else
20899 + u64 addr:40;
20900 + u64 __notaddress:24;
20901 +#endif
20902 + };
20903 + u64 opaque;
20904 + };
20905 + union {
20906 + struct {
20907 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
20908 + u32 extension:1; /* Extension bit */
20909 + u32 final:1; /* Final bit */
20910 + u32 length:30;
20911 +#else
20912 + u32 length:30;
20913 + u32 final:1; /* Final bit */
20914 + u32 extension:1; /* Extension bit */
20915 +#endif
20916 + };
20917 + u32 sgt_efl;
20918 + };
20919 + u8 __reserved2;
20920 + u8 bpid;
20921 + union {
20922 + struct {
20923 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
20924 + u16 __reserved3:3;
20925 + u16 offset:13;
20926 +#else
20927 + u16 offset:13;
20928 + u16 __reserved3:3;
20929 +#endif
20930 + };
20931 + u16 opaque_offset;
20932 + };
20933 +} __packed;
20934 +union qm_sg_efl {
20935 + struct {
20936 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
20937 + u32 extension:1; /* Extension bit */
20938 + u32 final:1; /* Final bit */
20939 + u32 length:30;
20940 +#else
20941 + u32 length:30;
20942 + u32 final:1; /* Final bit */
20943 + u32 extension:1; /* Extension bit */
20944 +#endif
20945 + };
20946 + u32 efl;
20947 +};
20948 +static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg)
20949 +{
20950 + return (dma_addr_t)be64_to_cpu(sg->opaque) & 0xffffffffffULL;
20951 +}
20952 +static inline u8 qm_sg_entry_get_ext(const struct qm_sg_entry *sg)
20953 +{
20954 + union qm_sg_efl u;
20955 +
20956 + u.efl = be32_to_cpu(sg->sgt_efl);
20957 + return u.extension;
20958 +}
20959 +static inline u8 qm_sg_entry_get_final(const struct qm_sg_entry *sg)
20960 +{
20961 + union qm_sg_efl u;
20962 +
20963 + u.efl = be32_to_cpu(sg->sgt_efl);
20964 + return u.final;
20965 +}
20966 +static inline u32 qm_sg_entry_get_len(const struct qm_sg_entry *sg)
20967 +{
20968 + union qm_sg_efl u;
20969 +
20970 + u.efl = be32_to_cpu(sg->sgt_efl);
20971 + return u.length;
20972 +}
20973 +static inline u8 qm_sg_entry_get_bpid(const struct qm_sg_entry *sg)
20974 +{
20975 + return sg->bpid;
20976 +}
20977 +static inline u16 qm_sg_entry_get_offset(const struct qm_sg_entry *sg)
20978 +{
20979 + u32 opaque_offset = be16_to_cpu(sg->opaque_offset);
20980 +
20981 + return opaque_offset & 0x1fff;
20982 +}
20983 +
20984 +/* Macro, so we compile better if 'v' isn't always 64-bit */
20985 +#define qm_sg_entry_set64(sg, v) \
20986 + do { \
20987 + struct qm_sg_entry *__sg931 = (sg); \
20988 + __sg931->opaque = cpu_to_be64(v); \
20989 + } while (0)
20990 +#define qm_sg_entry_set_ext(sg, v) \
20991 + do { \
20992 + union qm_sg_efl __u932; \
20993 + __u932.efl = be32_to_cpu((sg)->sgt_efl); \
20994 + __u932.extension = v; \
20995 + (sg)->sgt_efl = cpu_to_be32(__u932.efl); \
20996 + } while (0)
20997 +#define qm_sg_entry_set_final(sg, v) \
20998 + do { \
20999 + union qm_sg_efl __u933; \
21000 + __u933.efl = be32_to_cpu((sg)->sgt_efl); \
21001 + __u933.final = v; \
21002 + (sg)->sgt_efl = cpu_to_be32(__u933.efl); \
21003 + } while (0)
21004 +#define qm_sg_entry_set_len(sg, v) \
21005 + do { \
21006 + union qm_sg_efl __u934; \
21007 + __u934.efl = be32_to_cpu((sg)->sgt_efl); \
21008 + __u934.length = v; \
21009 + (sg)->sgt_efl = cpu_to_be32(__u934.efl); \
21010 + } while (0)
21011 +#define qm_sg_entry_set_bpid(sg, v) \
21012 + do { \
21013 + struct qm_sg_entry *__u935 = (sg); \
21014 + __u935->bpid = v; \
21015 + } while (0)
21016 +#define qm_sg_entry_set_offset(sg, v) \
21017 + do { \
21018 + struct qm_sg_entry *__u936 = (sg); \
21019 + __u936->opaque_offset = cpu_to_be16(v); \
21020 + } while (0)
21021 +
21022 +/* See 1.5.8.1: "Enqueue Command" */
21023 +struct qm_eqcr_entry {
21024 + u8 __dont_write_directly__verb;
21025 + u8 dca;
21026 + u16 seqnum;
21027 + u32 orp; /* 24-bit */
21028 + u32 fqid; /* 24-bit */
21029 + u32 tag;
21030 + struct qm_fd fd;
21031 + u8 __reserved3[32];
21032 +} __packed;
21033 +#define QM_EQCR_VERB_VBIT 0x80
21034 +#define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */
21035 +#define QM_EQCR_VERB_CMD_ENQUEUE 0x01
21036 +#define QM_EQCR_VERB_COLOUR_MASK 0x18 /* 4 possible values; */
21037 +#define QM_EQCR_VERB_COLOUR_GREEN 0x00
21038 +#define QM_EQCR_VERB_COLOUR_YELLOW 0x08
21039 +#define QM_EQCR_VERB_COLOUR_RED 0x10
21040 +#define QM_EQCR_VERB_COLOUR_OVERRIDE 0x18
21041 +#define QM_EQCR_VERB_INTERRUPT 0x04 /* on command consumption */
21042 +#define QM_EQCR_VERB_ORP 0x02 /* enable order restoration */
21043 +#define QM_EQCR_DCA_ENABLE 0x80
21044 +#define QM_EQCR_DCA_PARK 0x40
21045 +#define QM_EQCR_DCA_IDXMASK 0x0f /* "DQRR::idx" goes here */
21046 +#define QM_EQCR_SEQNUM_NESN 0x8000 /* Advance NESN */
21047 +#define QM_EQCR_SEQNUM_NLIS 0x4000 /* More fragments to come */
21048 +#define QM_EQCR_SEQNUM_SEQMASK 0x3fff /* sequence number goes here */
21049 +#define QM_EQCR_FQID_NULL 0 /* eg. for an ORP seqnum hole */
21050 +
21051 +/* See 1.5.8.2: "Frame Dequeue Response" */
21052 +struct qm_dqrr_entry {
21053 + u8 verb;
21054 + u8 stat;
21055 + u16 seqnum; /* 15-bit */
21056 + u8 tok;
21057 + u8 __reserved2[3];
21058 + u32 fqid; /* 24-bit */
21059 + u32 contextB;
21060 + struct qm_fd fd;
21061 + u8 __reserved4[32];
21062 +};
21063 +#define QM_DQRR_VERB_VBIT 0x80
21064 +#define QM_DQRR_VERB_MASK 0x7f /* where the verb contains; */
21065 +#define QM_DQRR_VERB_FRAME_DEQUEUE 0x60 /* "this format" */
21066 +#define QM_DQRR_STAT_FQ_EMPTY 0x80 /* FQ empty */
21067 +#define QM_DQRR_STAT_FQ_HELDACTIVE 0x40 /* FQ held active */
21068 +#define QM_DQRR_STAT_FQ_FORCEELIGIBLE 0x20 /* FQ was force-eligible'd */
21069 +#define QM_DQRR_STAT_FD_VALID 0x10 /* has a non-NULL FD */
21070 +#define QM_DQRR_STAT_UNSCHEDULED 0x02 /* Unscheduled dequeue */
21071 +#define QM_DQRR_STAT_DQCR_EXPIRED 0x01 /* VDQCR or PDQCR expired*/
21072 +
21073 +/* See 1.5.8.3: "ERN Message Response" */
21074 +/* See 1.5.8.4: "FQ State Change Notification" */
21075 +struct qm_mr_entry {
21076 + u8 verb;
21077 + union {
21078 + struct {
21079 + u8 dca;
21080 + u16 seqnum;
21081 + u8 rc; /* Rejection Code */
21082 + u32 orp:24;
21083 + u32 fqid; /* 24-bit */
21084 + u32 tag;
21085 + struct qm_fd fd;
21086 + } __packed ern;
21087 + struct {
21088 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
21089 + u8 colour:2; /* See QM_MR_DCERN_COLOUR_* */
21090 + u8 __reserved1:3;
21091 + enum qm_dc_portal portal:3;
21092 +#else
21093 + enum qm_dc_portal portal:3;
21094 + u8 __reserved1:3;
21095 + u8 colour:2; /* See QM_MR_DCERN_COLOUR_* */
21096 +#endif
21097 + u16 __reserved2;
21098 + u8 rc; /* Rejection Code */
21099 + u32 __reserved3:24;
21100 + u32 fqid; /* 24-bit */
21101 + u32 tag;
21102 + struct qm_fd fd;
21103 + } __packed dcern;
21104 + struct {
21105 + u8 fqs; /* Frame Queue Status */
21106 + u8 __reserved1[6];
21107 + u32 fqid; /* 24-bit */
21108 + u32 contextB;
21109 + u8 __reserved2[16];
21110 + } __packed fq; /* FQRN/FQRNI/FQRL/FQPN */
21111 + };
21112 + u8 __reserved2[32];
21113 +} __packed;
21114 +#define QM_MR_VERB_VBIT 0x80
21115 +/* The "ern" VERB bits match QM_EQCR_VERB_*** so aren't reproduced here. ERNs
21116 + * originating from direct-connect portals ("dcern") use 0x20 as a verb which
21117 + * would be invalid as a s/w enqueue verb. A s/w ERN can be distinguished from
21118 + * the other MR types by noting if the 0x20 bit is unset. */
21119 +#define QM_MR_VERB_TYPE_MASK 0x27
21120 +#define QM_MR_VERB_DC_ERN 0x20
21121 +#define QM_MR_VERB_FQRN 0x21
21122 +#define QM_MR_VERB_FQRNI 0x22
21123 +#define QM_MR_VERB_FQRL 0x23
21124 +#define QM_MR_VERB_FQPN 0x24
21125 +#define QM_MR_RC_MASK 0xf0 /* contains one of; */
21126 +#define QM_MR_RC_CGR_TAILDROP 0x00
21127 +#define QM_MR_RC_WRED 0x10
21128 +#define QM_MR_RC_ERROR 0x20
21129 +#define QM_MR_RC_ORPWINDOW_EARLY 0x30
21130 +#define QM_MR_RC_ORPWINDOW_LATE 0x40
21131 +#define QM_MR_RC_FQ_TAILDROP 0x50
21132 +#define QM_MR_RC_ORPWINDOW_RETIRED 0x60
21133 +#define QM_MR_RC_ORP_ZERO 0x70
21134 +#define QM_MR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
21135 +#define QM_MR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
21136 +#define QM_MR_DCERN_COLOUR_GREEN 0x00
21137 +#define QM_MR_DCERN_COLOUR_YELLOW 0x01
21138 +#define QM_MR_DCERN_COLOUR_RED 0x02
21139 +#define QM_MR_DCERN_COLOUR_OVERRIDE 0x03
21140 +
21141 +/* An identical structure of FQD fields is present in the "Init FQ" command and
21142 + * the "Query FQ" result, it's suctioned out into the "struct qm_fqd" type.
21143 + * Within that, the 'stashing' and 'taildrop' pieces are also factored out, the
21144 + * latter has two inlines to assist with converting to/from the mant+exp
21145 + * representation. */
21146 +struct qm_fqd_stashing {
21147 + /* See QM_STASHING_EXCL_<...> */
21148 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
21149 + u8 exclusive;
21150 + u8 __reserved1:2;
21151 + /* Numbers of cachelines */
21152 + u8 annotation_cl:2;
21153 + u8 data_cl:2;
21154 + u8 context_cl:2;
21155 +#else
21156 + u8 context_cl:2;
21157 + u8 data_cl:2;
21158 + u8 annotation_cl:2;
21159 + u8 __reserved1:2;
21160 + u8 exclusive;
21161 +#endif
21162 +} __packed;
21163 +struct qm_fqd_taildrop {
21164 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
21165 + u16 __reserved1:3;
21166 + u16 mant:8;
21167 + u16 exp:5;
21168 +#else
21169 + u16 exp:5;
21170 + u16 mant:8;
21171 + u16 __reserved1:3;
21172 +#endif
21173 +} __packed;
21174 +struct qm_fqd_oac {
21175 + /* See QM_OAC_<...> */
21176 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
21177 + u8 oac:2; /* "Overhead Accounting Control" */
21178 + u8 __reserved1:6;
21179 +#else
21180 + u8 __reserved1:6;
21181 + u8 oac:2; /* "Overhead Accounting Control" */
21182 +#endif
21183 + /* Two's-complement value (-128 to +127) */
21184 + signed char oal; /* "Overhead Accounting Length" */
21185 +} __packed;
21186 +struct qm_fqd {
21187 + union {
21188 + u8 orpc;
21189 + struct {
21190 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
21191 + u8 __reserved1:2;
21192 + u8 orprws:3;
21193 + u8 oa:1;
21194 + u8 olws:2;
21195 +#else
21196 + u8 olws:2;
21197 + u8 oa:1;
21198 + u8 orprws:3;
21199 + u8 __reserved1:2;
21200 +#endif
21201 + } __packed;
21202 + };
21203 + u8 cgid;
21204 + u16 fq_ctrl; /* See QM_FQCTRL_<...> */
21205 + union {
21206 + u16 dest_wq;
21207 + struct {
21208 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
21209 + u16 channel:13; /* qm_channel */
21210 + u16 wq:3;
21211 +#else
21212 + u16 wq:3;
21213 + u16 channel:13; /* qm_channel */
21214 +#endif
21215 + } __packed dest;
21216 + };
21217 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
21218 + u16 __reserved2:1;
21219 + u16 ics_cred:15;
21220 +#else
21221 + u16 __reserved2:1;
21222 + u16 ics_cred:15;
21223 +#endif
21224 + /* For "Initialize Frame Queue" commands, the write-enable mask
21225 + * determines whether 'td' or 'oac_init' is observed. For query
21226 + * commands, this field is always 'td', and 'oac_query' (below) reflects
21227 + * the Overhead ACcounting values. */
21228 + union {
21229 + struct qm_fqd_taildrop td;
21230 + struct qm_fqd_oac oac_init;
21231 + };
21232 + u32 context_b;
21233 + union {
21234 + /* Treat it as 64-bit opaque */
21235 + u64 opaque;
21236 + struct {
21237 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
21238 + u32 hi;
21239 + u32 lo;
21240 +#else
21241 + u32 lo;
21242 + u32 hi;
21243 +#endif
21244 + };
21245 + /* Treat it as s/w portal stashing config */
21246 + /* See 1.5.6.7.1: "FQD Context_A field used for [...] */
21247 + struct {
21248 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
21249 + struct qm_fqd_stashing stashing;
21250 + /* 48-bit address of FQ context to
21251 + * stash, must be cacheline-aligned */
21252 + u16 context_hi;
21253 + u32 context_lo;
21254 +#else
21255 + u32 context_lo;
21256 + u16 context_hi;
21257 + struct qm_fqd_stashing stashing;
21258 +#endif
21259 + } __packed;
21260 + } context_a;
21261 + struct qm_fqd_oac oac_query;
21262 +} __packed;
21263 +/* 64-bit converters for context_hi/lo */
21264 +static inline u64 qm_fqd_stashing_get64(const struct qm_fqd *fqd)
21265 +{
21266 + return ((u64)fqd->context_a.context_hi << 32) |
21267 + (u64)fqd->context_a.context_lo;
21268 +}
21269 +static inline dma_addr_t qm_fqd_stashing_addr(const struct qm_fqd *fqd)
21270 +{
21271 + return (dma_addr_t)qm_fqd_stashing_get64(fqd);
21272 +}
21273 +static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd)
21274 +{
21275 + return ((u64)fqd->context_a.hi << 32) |
21276 + (u64)fqd->context_a.lo;
21277 +}
21278 +/* Macro, so we compile better when 'v' isn't necessarily 64-bit */
21279 +#define qm_fqd_stashing_set64(fqd, v) \
21280 + do { \
21281 + struct qm_fqd *__fqd931 = (fqd); \
21282 + __fqd931->context_a.context_hi = upper_32_bits(v); \
21283 + __fqd931->context_a.context_lo = lower_32_bits(v); \
21284 + } while (0)
21285 +#define qm_fqd_context_a_set64(fqd, v) \
21286 + do { \
21287 + struct qm_fqd *__fqd931 = (fqd); \
21288 + __fqd931->context_a.hi = upper_32_bits(v); \
21289 + __fqd931->context_a.lo = lower_32_bits(v); \
21290 + } while (0)
21291 +/* convert a threshold value into mant+exp representation */
21292 +static inline int qm_fqd_taildrop_set(struct qm_fqd_taildrop *td, u32 val,
21293 + int roundup)
21294 +{
21295 + u32 e = 0;
21296 + int oddbit = 0;
21297 + if (val > 0xe0000000)
21298 + return -ERANGE;
21299 + while (val > 0xff) {
21300 + oddbit = val & 1;
21301 + val >>= 1;
21302 + e++;
21303 + if (roundup && oddbit)
21304 + val++;
21305 + }
21306 + td->exp = e;
21307 + td->mant = val;
21308 + return 0;
21309 +}
21310 +/* and the other direction */
21311 +static inline u32 qm_fqd_taildrop_get(const struct qm_fqd_taildrop *td)
21312 +{
21313 + return (u32)td->mant << td->exp;
21314 +}
21315 +
21316 +/* See 1.5.2.2: "Frame Queue Descriptor (FQD)" */
21317 +/* Frame Queue Descriptor (FQD) field 'fq_ctrl' uses these constants */
21318 +#define QM_FQCTRL_MASK 0x07ff /* 'fq_ctrl' flags; */
21319 +#define QM_FQCTRL_CGE 0x0400 /* Congestion Group Enable */
21320 +#define QM_FQCTRL_TDE 0x0200 /* Tail-Drop Enable */
21321 +#define QM_FQCTRL_ORP 0x0100 /* ORP Enable */
21322 +#define QM_FQCTRL_CTXASTASHING 0x0080 /* Context-A stashing */
21323 +#define QM_FQCTRL_CPCSTASH 0x0040 /* CPC Stash Enable */
21324 +#define QM_FQCTRL_FORCESFDR 0x0008 /* High-priority SFDRs */
21325 +#define QM_FQCTRL_AVOIDBLOCK 0x0004 /* Don't block active */
21326 +#define QM_FQCTRL_HOLDACTIVE 0x0002 /* Hold active in portal */
21327 +#define QM_FQCTRL_PREFERINCACHE 0x0001 /* Aggressively cache FQD */
21328 +#define QM_FQCTRL_LOCKINCACHE QM_FQCTRL_PREFERINCACHE /* older naming */
21329 +
21330 +/* See 1.5.6.7.1: "FQD Context_A field used for [...] */
21331 +/* Frame Queue Descriptor (FQD) field 'CONTEXT_A' uses these constants */
21332 +#define QM_STASHING_EXCL_ANNOTATION 0x04
21333 +#define QM_STASHING_EXCL_DATA 0x02
21334 +#define QM_STASHING_EXCL_CTX 0x01
21335 +
21336 +/* See 1.5.5.3: "Intra Class Scheduling" */
21337 +/* FQD field 'OAC' (Overhead ACcounting) uses these constants */
21338 +#define QM_OAC_ICS 0x2 /* Accounting for Intra-Class Scheduling */
21339 +#define QM_OAC_CG 0x1 /* Accounting for Congestion Groups */
21340 +
21341 +/* See 1.5.8.4: "FQ State Change Notification" */
21342 +/* This struct represents the 32-bit "WR_PARM_[GYR]" parameters in CGR fields
21343 + * and associated commands/responses. The WRED parameters are calculated from
21344 + * these fields as follows;
21345 + * MaxTH = MA * (2 ^ Mn)
21346 + * Slope = SA / (2 ^ Sn)
21347 + * MaxP = 4 * (Pn + 1)
21348 + */
21349 +struct qm_cgr_wr_parm {
21350 + union {
21351 + u32 word;
21352 + struct {
21353 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
21354 + u32 MA:8;
21355 + u32 Mn:5;
21356 + u32 SA:7; /* must be between 64-127 */
21357 + u32 Sn:6;
21358 + u32 Pn:6;
21359 +#else
21360 + u32 Pn:6;
21361 + u32 Sn:6;
21362 + u32 SA:7; /* must be between 64-127 */
21363 + u32 Mn:5;
21364 + u32 MA:8;
21365 +#endif
21366 + } __packed;
21367 + };
21368 +} __packed;
21369 +/* This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding
21370 + * management commands, this is padded to a 16-bit structure field, so that's
21371 + * how we represent it here. The congestion state threshold is calculated from
21372 + * these fields as follows;
21373 + * CS threshold = TA * (2 ^ Tn)
21374 + */
21375 +struct qm_cgr_cs_thres {
21376 + union {
21377 + u16 hword;
21378 + struct {
21379 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
21380 + u16 __reserved:3;
21381 + u16 TA:8;
21382 + u16 Tn:5;
21383 +#else
21384 + u16 Tn:5;
21385 + u16 TA:8;
21386 + u16 __reserved:3;
21387 +#endif
21388 + } __packed;
21389 + };
21390 +} __packed;
21391 +/* This identical structure of CGR fields is present in the "Init/Modify CGR"
21392 + * commands and the "Query CGR" result. It's suctioned out here into its own
21393 + * struct. */
21394 +struct __qm_mc_cgr {
21395 + struct qm_cgr_wr_parm wr_parm_g;
21396 + struct qm_cgr_wr_parm wr_parm_y;
21397 + struct qm_cgr_wr_parm wr_parm_r;
21398 + u8 wr_en_g; /* boolean, use QM_CGR_EN */
21399 + u8 wr_en_y; /* boolean, use QM_CGR_EN */
21400 + u8 wr_en_r; /* boolean, use QM_CGR_EN */
21401 + u8 cscn_en; /* boolean, use QM_CGR_EN */
21402 + union {
21403 + struct {
21404 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
21405 + u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */
21406 + u16 cscn_targ_dcp_low; /* CSCN_TARG_DCP low-16bits */
21407 +#else
21408 + u16 cscn_targ_dcp_low; /* CSCN_TARG_DCP low-16bits */
21409 + u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */
21410 +#endif
21411 + };
21412 + u32 cscn_targ; /* use QM_CGR_TARG_* */
21413 + };
21414 + u8 cstd_en; /* boolean, use QM_CGR_EN */
21415 + u8 cs; /* boolean, only used in query response */
21416 + union {
21417 + /* use qm_cgr_cs_thres_set64() */
21418 + struct qm_cgr_cs_thres cs_thres;
21419 + u16 __cs_thres;
21420 + };
21421 + u8 mode; /* QMAN_CGR_MODE_FRAME not supported in rev1.0 */
21422 +} __packed;
21423 +#define QM_CGR_EN 0x01 /* For wr_en_*, cscn_en, cstd_en */
21424 +#define QM_CGR_TARG_UDP_CTRL_WRITE_BIT 0x8000 /* value written to portal bit*/
21425 +#define QM_CGR_TARG_UDP_CTRL_DCP 0x4000 /* 0: SWP, 1: DCP */
21426 +#define QM_CGR_TARG_PORTAL(n) (0x80000000 >> (n)) /* s/w portal, 0-9 */
21427 +#define QM_CGR_TARG_FMAN0 0x00200000 /* direct-connect portal: fman0 */
21428 +#define QM_CGR_TARG_FMAN1 0x00100000 /* : fman1 */
21429 +/* Convert CGR thresholds to/from "cs_thres" format */
21430 +static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th)
21431 +{
21432 + return (u64)th->TA << th->Tn;
21433 +}
21434 +static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val,
21435 + int roundup)
21436 +{
21437 + u32 e = 0;
21438 + int oddbit = 0;
21439 + while (val > 0xff) {
21440 + oddbit = val & 1;
21441 + val >>= 1;
21442 + e++;
21443 + if (roundup && oddbit)
21444 + val++;
21445 + }
21446 + th->Tn = e;
21447 + th->TA = val;
21448 + return 0;
21449 +}
21450 +
21451 +/* See 1.5.8.5.1: "Initialize FQ" */
21452 +/* See 1.5.8.5.2: "Query FQ" */
21453 +/* See 1.5.8.5.3: "Query FQ Non-Programmable Fields" */
21454 +/* See 1.5.8.5.4: "Alter FQ State Commands " */
21455 +/* See 1.5.8.6.1: "Initialize/Modify CGR" */
21456 +/* See 1.5.8.6.2: "CGR Test Write" */
21457 +/* See 1.5.8.6.3: "Query CGR" */
21458 +/* See 1.5.8.6.4: "Query Congestion Group State" */
21459 +struct qm_mcc_initfq {
21460 + u8 __reserved1;
21461 + u16 we_mask; /* Write Enable Mask */
21462 + u32 fqid; /* 24-bit */
21463 + u16 count; /* Initialises 'count+1' FQDs */
21464 + struct qm_fqd fqd; /* the FQD fields go here */
21465 + u8 __reserved3[30];
21466 +} __packed;
21467 +struct qm_mcc_queryfq {
21468 + u8 __reserved1[3];
21469 + u32 fqid; /* 24-bit */
21470 + u8 __reserved2[56];
21471 +} __packed;
21472 +struct qm_mcc_queryfq_np {
21473 + u8 __reserved1[3];
21474 + u32 fqid; /* 24-bit */
21475 + u8 __reserved2[56];
21476 +} __packed;
21477 +struct qm_mcc_alterfq {
21478 + u8 __reserved1[3];
21479 + u32 fqid; /* 24-bit */
21480 + u8 __reserved2;
21481 + u8 count; /* number of consecutive FQID */
21482 + u8 __reserved3[10];
21483 + u32 context_b; /* frame queue context b */
21484 + u8 __reserved4[40];
21485 +} __packed;
21486 +struct qm_mcc_initcgr {
21487 + u8 __reserved1;
21488 + u16 we_mask; /* Write Enable Mask */
21489 + struct __qm_mc_cgr cgr; /* CGR fields */
21490 + u8 __reserved2[2];
21491 + u8 cgid;
21492 + u8 __reserved4[32];
21493 +} __packed;
21494 +struct qm_mcc_cgrtestwrite {
21495 + u8 __reserved1[2];
21496 + u8 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
21497 + u32 i_bcnt_lo; /* low 32-bits of 40-bit */
21498 + u8 __reserved2[23];
21499 + u8 cgid;
21500 + u8 __reserved3[32];
21501 +} __packed;
21502 +struct qm_mcc_querycgr {
21503 + u8 __reserved1[30];
21504 + u8 cgid;
21505 + u8 __reserved2[32];
21506 +} __packed;
21507 +struct qm_mcc_querycongestion {
21508 + u8 __reserved[63];
21509 +} __packed;
21510 +struct qm_mcc_querywq {
21511 + u8 __reserved;
21512 + /* select channel if verb != QUERYWQ_DEDICATED */
21513 + union {
21514 + u16 channel_wq; /* ignores wq (3 lsbits) */
21515 + struct {
21516 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
21517 + u16 id:13; /* qm_channel */
21518 + u16 __reserved1:3;
21519 +#else
21520 + u16 __reserved1:3;
21521 + u16 id:13; /* qm_channel */
21522 +#endif
21523 + } __packed channel;
21524 + };
21525 + u8 __reserved2[60];
21526 +} __packed;
21527 +
21528 +struct qm_mcc_ceetm_lfqmt_config {
21529 + u8 __reserved1[4];
21530 + u32 lfqid:24;
21531 + u8 __reserved2[2];
21532 + u16 cqid;
21533 + u8 __reserved3[2];
21534 + u16 dctidx;
21535 + u8 __reserved4[48];
21536 +} __packed;
21537 +
21538 +struct qm_mcc_ceetm_lfqmt_query {
21539 + u8 __reserved1[4];
21540 + u32 lfqid:24;
21541 + u8 __reserved2[56];
21542 +} __packed;
21543 +
21544 +struct qm_mcc_ceetm_cq_config {
21545 + u8 __reserved1;
21546 + u16 cqid;
21547 + u8 dcpid;
21548 + u8 __reserved2;
21549 + u16 ccgid;
21550 + u8 __reserved3[56];
21551 +} __packed;
21552 +
21553 +struct qm_mcc_ceetm_cq_query {
21554 + u8 __reserved1;
21555 + u16 cqid;
21556 + u8 dcpid;
21557 + u8 __reserved2[59];
21558 +} __packed;
21559 +
21560 +struct qm_mcc_ceetm_dct_config {
21561 + u8 __reserved1;
21562 + u16 dctidx;
21563 + u8 dcpid;
21564 + u8 __reserved2[15];
21565 + u32 context_b;
21566 + u64 context_a;
21567 + u8 __reserved3[32];
21568 +} __packed;
21569 +
21570 +struct qm_mcc_ceetm_dct_query {
21571 + u8 __reserved1;
21572 + u16 dctidx;
21573 + u8 dcpid;
21574 + u8 __reserved2[59];
21575 +} __packed;
21576 +
21577 +struct qm_mcc_ceetm_class_scheduler_config {
21578 + u8 __reserved1;
21579 + u16 cqcid;
21580 + u8 dcpid;
21581 + u8 __reserved2[6];
21582 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
21583 + u8 gpc_reserved:1;
21584 + u8 gpc_combine_flag:1;
21585 + u8 gpc_prio_b:3;
21586 + u8 gpc_prio_a:3;
21587 +#else
21588 + u8 gpc_prio_a:3;
21589 + u8 gpc_prio_b:3;
21590 + u8 gpc_combine_flag:1;
21591 + u8 gpc_reserved:1;
21592 +#endif
21593 + u16 crem;
21594 + u16 erem;
21595 + u8 w[8];
21596 + u8 __reserved3[40];
21597 +} __packed;
21598 +
21599 +struct qm_mcc_ceetm_class_scheduler_query {
21600 + u8 __reserved1;
21601 + u16 cqcid;
21602 + u8 dcpid;
21603 + u8 __reserved2[59];
21604 +} __packed;
21605 +
21606 +#define CEETM_COMMAND_CHANNEL_MAPPING (0 << 12)
21607 +#define CEETM_COMMAND_SP_MAPPING (1 << 12)
21608 +#define CEETM_COMMAND_CHANNEL_SHAPER (2 << 12)
21609 +#define CEETM_COMMAND_LNI_SHAPER (3 << 12)
21610 +#define CEETM_COMMAND_TCFC (4 << 12)
21611 +
21612 +#define CEETM_CCGRID_MASK 0x01FF
21613 +#define CEETM_CCGR_CM_CONFIGURE (0 << 14)
21614 +#define CEETM_CCGR_DN_CONFIGURE (1 << 14)
21615 +#define CEETM_CCGR_TEST_WRITE (2 << 14)
21616 +#define CEETM_CCGR_CM_QUERY (0 << 14)
21617 +#define CEETM_CCGR_DN_QUERY (1 << 14)
21618 +#define CEETM_CCGR_DN_QUERY_FLUSH (2 << 14)
21619 +#define CEETM_QUERY_CONGESTION_STATE (3 << 14)
21620 +
21621 +struct qm_mcc_ceetm_mapping_shaper_tcfc_config {
21622 + u8 __reserved1;
21623 + u16 cid;
21624 + u8 dcpid;
21625 + union {
21626 + struct {
21627 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
21628 + u8 map_shaped:1;
21629 + u8 map_reserved:4;
21630 + u8 map_lni_id:3;
21631 +#else
21632 + u8 map_lni_id:3;
21633 + u8 map_reserved:4;
21634 + u8 map_shaped:1;
21635 +#endif
21636 + u8 __reserved2[58];
21637 + } __packed channel_mapping;
21638 + struct {
21639 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
21640 + u8 map_reserved:5;
21641 + u8 map_lni_id:3;
21642 +#else
21643 + u8 map_lni_id:3;
21644 + u8 map_reserved:5;
21645 +#endif
21646 + u8 __reserved2[58];
21647 + } __packed sp_mapping;
21648 + struct {
21649 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
21650 + u8 cpl:1;
21651 + u8 cpl_reserved:2;
21652 + u8 oal:5;
21653 +#else
21654 + u8 oal:5;
21655 + u8 cpl_reserved:2;
21656 + u8 cpl:1;
21657 +#endif
21658 + u32 crtcr:24;
21659 + u32 ertcr:24;
21660 + u16 crtbl;
21661 + u16 ertbl;
21662 + u8 mps; /* This will be hardcoded by driver with 60 */
21663 + u8 __reserved2[47];
21664 + } __packed shaper_config;
21665 + struct {
21666 + u8 __reserved2[11];
21667 + u64 lnitcfcc;
21668 + u8 __reserved3[40];
21669 + } __packed tcfc_config;
21670 + };
21671 +} __packed;
21672 +
21673 +struct qm_mcc_ceetm_mapping_shaper_tcfc_query {
21674 + u8 __reserved1;
21675 + u16 cid;
21676 + u8 dcpid;
21677 + u8 __reserved2[59];
21678 +} __packed;
21679 +
21680 +struct qm_mcc_ceetm_ccgr_config {
21681 + u8 __reserved1;
21682 + u16 ccgrid;
21683 + u8 dcpid;
21684 + u8 __reserved2;
21685 + u16 we_mask;
21686 + union {
21687 + struct {
21688 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
21689 + u8 ctl_reserved:1;
21690 + u8 ctl_wr_en_g:1;
21691 + u8 ctl_wr_en_y:1;
21692 + u8 ctl_wr_en_r:1;
21693 + u8 ctl_td_en:1;
21694 + u8 ctl_td_mode:1;
21695 + u8 ctl_cscn_en:1;
21696 + u8 ctl_mode:1;
21697 +#else
21698 + u8 ctl_mode:1;
21699 + u8 ctl_cscn_en:1;
21700 + u8 ctl_td_mode:1;
21701 + u8 ctl_td_en:1;
21702 + u8 ctl_wr_en_r:1;
21703 + u8 ctl_wr_en_y:1;
21704 + u8 ctl_wr_en_g:1;
21705 + u8 ctl_reserved:1;
21706 +#endif
21707 + u8 cdv;
21708 + u16 cscn_tupd;
21709 + u8 oal;
21710 + u8 __reserved3;
21711 + struct qm_cgr_cs_thres cs_thres;
21712 + struct qm_cgr_cs_thres cs_thres_x;
21713 + struct qm_cgr_cs_thres td_thres;
21714 + struct qm_cgr_wr_parm wr_parm_g;
21715 + struct qm_cgr_wr_parm wr_parm_y;
21716 + struct qm_cgr_wr_parm wr_parm_r;
21717 + } __packed cm_config;
21718 + struct {
21719 + u8 dnc;
21720 + u8 dn0;
21721 + u8 dn1;
21722 + u64 dnba:40;
21723 + u8 __reserved3[2];
21724 + u16 dnth_0;
21725 + u8 __reserved4[2];
21726 + u16 dnth_1;
21727 + u8 __reserved5[8];
21728 + } __packed dn_config;
21729 + struct {
21730 + u8 __reserved3[3];
21731 + u64 i_cnt:40;
21732 + u8 __reserved4[16];
21733 + } __packed test_write;
21734 + };
21735 + u8 __reserved5[32];
21736 +} __packed;
21737 +
21738 +struct qm_mcc_ceetm_ccgr_query {
21739 + u8 __reserved1;
21740 + u16 ccgrid;
21741 + u8 dcpid;
21742 + u8 __reserved2[59];
21743 +} __packed;
21744 +
21745 +struct qm_mcc_ceetm_cq_peek_pop_xsfdrread {
21746 + u8 __reserved1;
21747 + u16 cqid;
21748 + u8 dcpid;
21749 + u8 ct;
21750 + u16 xsfdr;
21751 + u8 __reserved2[56];
21752 +} __packed;
21753 +
21754 +#define CEETM_QUERY_DEQUEUE_STATISTICS 0x00
21755 +#define CEETM_QUERY_DEQUEUE_CLEAR_STATISTICS 0x01
21756 +#define CEETM_WRITE_DEQUEUE_STATISTICS 0x02
21757 +#define CEETM_QUERY_REJECT_STATISTICS 0x03
21758 +#define CEETM_QUERY_REJECT_CLEAR_STATISTICS 0x04
21759 +#define CEETM_WRITE_REJECT_STATISTICS 0x05
21760 +struct qm_mcc_ceetm_statistics_query_write {
21761 + u8 __reserved1;
21762 + u16 cid;
21763 + u8 dcpid;
21764 + u8 ct;
21765 + u8 __reserved2[13];
21766 + u64 frm_cnt:40;
21767 + u8 __reserved3[2];
21768 + u64 byte_cnt:48;
21769 + u8 __reserved[32];
21770 +} __packed;
21771 +
21772 +struct qm_mc_command {
21773 + u8 __dont_write_directly__verb;
21774 + union {
21775 + struct qm_mcc_initfq initfq;
21776 + struct qm_mcc_queryfq queryfq;
21777 + struct qm_mcc_queryfq_np queryfq_np;
21778 + struct qm_mcc_alterfq alterfq;
21779 + struct qm_mcc_initcgr initcgr;
21780 + struct qm_mcc_cgrtestwrite cgrtestwrite;
21781 + struct qm_mcc_querycgr querycgr;
21782 + struct qm_mcc_querycongestion querycongestion;
21783 + struct qm_mcc_querywq querywq;
21784 + struct qm_mcc_ceetm_lfqmt_config lfqmt_config;
21785 + struct qm_mcc_ceetm_lfqmt_query lfqmt_query;
21786 + struct qm_mcc_ceetm_cq_config cq_config;
21787 + struct qm_mcc_ceetm_cq_query cq_query;
21788 + struct qm_mcc_ceetm_dct_config dct_config;
21789 + struct qm_mcc_ceetm_dct_query dct_query;
21790 + struct qm_mcc_ceetm_class_scheduler_config csch_config;
21791 + struct qm_mcc_ceetm_class_scheduler_query csch_query;
21792 + struct qm_mcc_ceetm_mapping_shaper_tcfc_config mst_config;
21793 + struct qm_mcc_ceetm_mapping_shaper_tcfc_query mst_query;
21794 + struct qm_mcc_ceetm_ccgr_config ccgr_config;
21795 + struct qm_mcc_ceetm_ccgr_query ccgr_query;
21796 + struct qm_mcc_ceetm_cq_peek_pop_xsfdrread cq_ppxr;
21797 + struct qm_mcc_ceetm_statistics_query_write stats_query_write;
21798 + };
21799 +} __packed;
21800 +#define QM_MCC_VERB_VBIT 0x80
21801 +#define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */
21802 +#define QM_MCC_VERB_INITFQ_PARKED 0x40
21803 +#define QM_MCC_VERB_INITFQ_SCHED 0x41
21804 +#define QM_MCC_VERB_QUERYFQ 0x44
21805 +#define QM_MCC_VERB_QUERYFQ_NP 0x45 /* "non-programmable" fields */
21806 +#define QM_MCC_VERB_QUERYWQ 0x46
21807 +#define QM_MCC_VERB_QUERYWQ_DEDICATED 0x47
21808 +#define QM_MCC_VERB_ALTER_SCHED 0x48 /* Schedule FQ */
21809 +#define QM_MCC_VERB_ALTER_FE 0x49 /* Force Eligible FQ */
21810 +#define QM_MCC_VERB_ALTER_RETIRE 0x4a /* Retire FQ */
21811 +#define QM_MCC_VERB_ALTER_OOS 0x4b /* Take FQ out of service */
21812 +#define QM_MCC_VERB_ALTER_FQXON 0x4d /* FQ XON */
21813 +#define QM_MCC_VERB_ALTER_FQXOFF 0x4e /* FQ XOFF */
21814 +#define QM_MCC_VERB_INITCGR 0x50
21815 +#define QM_MCC_VERB_MODIFYCGR 0x51
21816 +#define QM_MCC_VERB_CGRTESTWRITE 0x52
21817 +#define QM_MCC_VERB_QUERYCGR 0x58
21818 +#define QM_MCC_VERB_QUERYCONGESTION 0x59
21819 +/* INITFQ-specific flags */
21820 +#define QM_INITFQ_WE_MASK 0x01ff /* 'Write Enable' flags; */
21821 +#define QM_INITFQ_WE_OAC 0x0100
21822 +#define QM_INITFQ_WE_ORPC 0x0080
21823 +#define QM_INITFQ_WE_CGID 0x0040
21824 +#define QM_INITFQ_WE_FQCTRL 0x0020
21825 +#define QM_INITFQ_WE_DESTWQ 0x0010
21826 +#define QM_INITFQ_WE_ICSCRED 0x0008
21827 +#define QM_INITFQ_WE_TDTHRESH 0x0004
21828 +#define QM_INITFQ_WE_CONTEXTB 0x0002
21829 +#define QM_INITFQ_WE_CONTEXTA 0x0001
21830 +/* INITCGR/MODIFYCGR-specific flags */
21831 +#define QM_CGR_WE_MASK 0x07ff /* 'Write Enable Mask'; */
21832 +#define QM_CGR_WE_WR_PARM_G 0x0400
21833 +#define QM_CGR_WE_WR_PARM_Y 0x0200
21834 +#define QM_CGR_WE_WR_PARM_R 0x0100
21835 +#define QM_CGR_WE_WR_EN_G 0x0080
21836 +#define QM_CGR_WE_WR_EN_Y 0x0040
21837 +#define QM_CGR_WE_WR_EN_R 0x0020
21838 +#define QM_CGR_WE_CSCN_EN 0x0010
21839 +#define QM_CGR_WE_CSCN_TARG 0x0008
21840 +#define QM_CGR_WE_CSTD_EN 0x0004
21841 +#define QM_CGR_WE_CS_THRES 0x0002
21842 +#define QM_CGR_WE_MODE 0x0001
21843 +
21844 +/* See 1.5.9.7 CEETM Management Commands */
21845 +#define QM_CEETM_VERB_LFQMT_CONFIG 0x70
21846 +#define QM_CEETM_VERB_LFQMT_QUERY 0x71
21847 +#define QM_CEETM_VERB_CQ_CONFIG 0x72
21848 +#define QM_CEETM_VERB_CQ_QUERY 0x73
21849 +#define QM_CEETM_VERB_DCT_CONFIG 0x74
21850 +#define QM_CEETM_VERB_DCT_QUERY 0x75
21851 +#define QM_CEETM_VERB_CLASS_SCHEDULER_CONFIG 0x76
21852 +#define QM_CEETM_VERB_CLASS_SCHEDULER_QUERY 0x77
21853 +#define QM_CEETM_VERB_MAPPING_SHAPER_TCFC_CONFIG 0x78
21854 +#define QM_CEETM_VERB_MAPPING_SHAPER_TCFC_QUERY 0x79
21855 +#define QM_CEETM_VERB_CCGR_CONFIG 0x7A
21856 +#define QM_CEETM_VERB_CCGR_QUERY 0x7B
21857 +#define QM_CEETM_VERB_CQ_PEEK_POP_XFDRREAD 0x7C
21858 +#define QM_CEETM_VERB_STATISTICS_QUERY_WRITE 0x7D
21859 +
21860 +/* See 1.5.8.5.1: "Initialize FQ" */
21861 +/* See 1.5.8.5.2: "Query FQ" */
21862 +/* See 1.5.8.5.3: "Query FQ Non-Programmable Fields" */
21863 +/* See 1.5.8.5.4: "Alter FQ State Commands " */
21864 +/* See 1.5.8.6.1: "Initialize/Modify CGR" */
21865 +/* See 1.5.8.6.2: "CGR Test Write" */
21866 +/* See 1.5.8.6.3: "Query CGR" */
21867 +/* See 1.5.8.6.4: "Query Congestion Group State" */
21868 +struct qm_mcr_initfq {
21869 + u8 __reserved1[62];
21870 +} __packed;
21871 +struct qm_mcr_queryfq {
21872 + u8 __reserved1[8];
21873 + struct qm_fqd fqd; /* the FQD fields are here */
21874 + u8 __reserved2[30];
21875 +} __packed;
21876 +struct qm_mcr_queryfq_np {
21877 + u8 __reserved1;
21878 + u8 state; /* QM_MCR_NP_STATE_*** */
21879 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
21880 + u8 __reserved2;
21881 + u32 fqd_link:24;
21882 + u16 __reserved3:2;
21883 + u16 odp_seq:14;
21884 + u16 __reserved4:2;
21885 + u16 orp_nesn:14;
21886 + u16 __reserved5:1;
21887 + u16 orp_ea_hseq:15;
21888 + u16 __reserved6:1;
21889 + u16 orp_ea_tseq:15;
21890 + u8 __reserved7;
21891 + u32 orp_ea_hptr:24;
21892 + u8 __reserved8;
21893 + u32 orp_ea_tptr:24;
21894 + u8 __reserved9;
21895 + u32 pfdr_hptr:24;
21896 + u8 __reserved10;
21897 + u32 pfdr_tptr:24;
21898 + u8 __reserved11[5];
21899 + u8 __reserved12:7;
21900 + u8 is:1;
21901 + u16 ics_surp;
21902 + u32 byte_cnt;
21903 + u8 __reserved13;
21904 + u32 frm_cnt:24;
21905 + u32 __reserved14;
21906 + u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */
21907 + u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */
21908 + u16 __reserved15;
21909 + u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */
21910 + u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */
21911 + u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */
21912 +#else
21913 + u8 __reserved2;
21914 + u32 fqd_link:24;
21915 +
21916 + u16 odp_seq:14;
21917 + u16 __reserved3:2;
21918 +
21919 + u16 orp_nesn:14;
21920 + u16 __reserved4:2;
21921 +
21922 + u16 orp_ea_hseq:15;
21923 + u16 __reserved5:1;
21924 +
21925 + u16 orp_ea_tseq:15;
21926 + u16 __reserved6:1;
21927 +
21928 + u8 __reserved7;
21929 + u32 orp_ea_hptr:24;
21930 +
21931 + u8 __reserved8;
21932 + u32 orp_ea_tptr:24;
21933 +
21934 + u8 __reserved9;
21935 + u32 pfdr_hptr:24;
21936 +
21937 + u8 __reserved10;
21938 + u32 pfdr_tptr:24;
21939 +
21940 + u8 __reserved11[5];
21941 + u8 is:1;
21942 + u8 __reserved12:7;
21943 + u16 ics_surp;
21944 + u32 byte_cnt;
21945 + u8 __reserved13;
21946 + u32 frm_cnt:24;
21947 + u32 __reserved14;
21948 + u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */
21949 + u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */
21950 + u16 __reserved15;
21951 + u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */
21952 + u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */
21953 + u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */
21954 +#endif
21955 +} __packed;
21956 +
21957 +
21958 +struct qm_mcr_alterfq {
21959 + u8 fqs; /* Frame Queue Status */
21960 + u8 __reserved1[61];
21961 +} __packed;
21962 +struct qm_mcr_initcgr {
21963 + u8 __reserved1[62];
21964 +} __packed;
21965 +struct qm_mcr_cgrtestwrite {
21966 + u16 __reserved1;
21967 + struct __qm_mc_cgr cgr; /* CGR fields */
21968 + u8 __reserved2[3];
21969 + u32 __reserved3:24;
21970 + u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
21971 + u32 i_bcnt_lo; /* low 32-bits of 40-bit */
21972 + u32 __reserved4:24;
21973 + u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
21974 + u32 a_bcnt_lo; /* low 32-bits of 40-bit */
21975 + u16 lgt; /* Last Group Tick */
21976 + u16 wr_prob_g;
21977 + u16 wr_prob_y;
21978 + u16 wr_prob_r;
21979 + u8 __reserved5[8];
21980 +} __packed;
21981 +struct qm_mcr_querycgr {
21982 + u16 __reserved1;
21983 + struct __qm_mc_cgr cgr; /* CGR fields */
21984 + u8 __reserved2[3];
21985 + union {
21986 + struct {
21987 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
21988 + u32 __reserved3:24;
21989 + u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
21990 + u32 i_bcnt_lo; /* low 32-bits of 40-bit */
21991 +#else
21992 + u32 i_bcnt_lo; /* low 32-bits of 40-bit */
21993 + u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
21994 + u32 __reserved3:24;
21995 +#endif
21996 + };
21997 + u64 i_bcnt;
21998 + };
21999 + union {
22000 + struct {
22001 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
22002 + u32 __reserved4:24;
22003 + u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
22004 + u32 a_bcnt_lo; /* low 32-bits of 40-bit */
22005 +#else
22006 + u32 a_bcnt_lo; /* low 32-bits of 40-bit */
22007 + u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
22008 + u32 __reserved4:24;
22009 +#endif
22010 + };
22011 + u64 a_bcnt;
22012 + };
22013 + union {
22014 + u32 cscn_targ_swp[4];
22015 + u8 __reserved5[16];
22016 + };
22017 +} __packed;
22018 +static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q)
22019 +{
22020 + return be64_to_cpu(q->i_bcnt);
22021 +}
22022 +static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q)
22023 +{
22024 + return be64_to_cpu(q->a_bcnt);
22025 +}
22026 +static inline u64 qm_mcr_cgrtestwrite_i_get64(
22027 + const struct qm_mcr_cgrtestwrite *q)
22028 +{
22029 + return be64_to_cpu(((u64)q->i_bcnt_hi << 32) | (u64)q->i_bcnt_lo);
22030 +}
22031 +static inline u64 qm_mcr_cgrtestwrite_a_get64(
22032 + const struct qm_mcr_cgrtestwrite *q)
22033 +{
22034 + return be64_to_cpu(((u64)q->a_bcnt_hi << 32) | (u64)q->a_bcnt_lo);
22035 +}
22036 +/* Macro, so we compile better if 'v' isn't always 64-bit */
22037 +#define qm_mcr_querycgr_i_set64(q, v) \
22038 + do { \
22039 + struct qm_mcr_querycgr *__q931 = (fd); \
22040 + __q931->i_bcnt_hi = upper_32_bits(v); \
22041 + __q931->i_bcnt_lo = lower_32_bits(v); \
22042 + } while (0)
22043 +#define qm_mcr_querycgr_a_set64(q, v) \
22044 + do { \
22045 + struct qm_mcr_querycgr *__q931 = (fd); \
22046 + __q931->a_bcnt_hi = upper_32_bits(v); \
22047 + __q931->a_bcnt_lo = lower_32_bits(v); \
22048 + } while (0)
22049 +struct __qm_mcr_querycongestion {
22050 + u32 __state[8];
22051 +};
22052 +struct qm_mcr_querycongestion {
22053 + u8 __reserved[30];
22054 + /* Access this struct using QM_MCR_QUERYCONGESTION() */
22055 + struct __qm_mcr_querycongestion state;
22056 +} __packed;
22057 +struct qm_mcr_querywq {
22058 + union {
22059 + u16 channel_wq; /* ignores wq (3 lsbits) */
22060 + struct {
22061 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
22062 + u16 id:13; /* qm_channel */
22063 + u16 __reserved:3;
22064 +#else
22065 + u16 __reserved:3;
22066 + u16 id:13; /* qm_channel */
22067 +#endif
22068 + } __packed channel;
22069 + };
22070 + u8 __reserved[28];
22071 + u32 wq_len[8];
22072 +} __packed;
22073 +
22074 +/* QMAN CEETM Management Command Response */
22075 +struct qm_mcr_ceetm_lfqmt_config {
22076 + u8 __reserved1[62];
22077 +} __packed;
22078 +struct qm_mcr_ceetm_lfqmt_query {
22079 + u8 __reserved1[8];
22080 + u16 cqid;
22081 + u8 __reserved2[2];
22082 + u16 dctidx;
22083 + u8 __reserved3[2];
22084 + u16 ccgid;
22085 + u8 __reserved4[44];
22086 +} __packed;
22087 +
22088 +struct qm_mcr_ceetm_cq_config {
22089 + u8 __reserved1[62];
22090 +} __packed;
22091 +
22092 +struct qm_mcr_ceetm_cq_query {
22093 + u8 __reserved1[4];
22094 + u16 ccgid;
22095 + u16 state;
22096 + u32 pfdr_hptr:24;
22097 + u32 pfdr_tptr:24;
22098 + u16 od1_xsfdr;
22099 + u16 od2_xsfdr;
22100 + u16 od3_xsfdr;
22101 + u16 od4_xsfdr;
22102 + u16 od5_xsfdr;
22103 + u16 od6_xsfdr;
22104 + u16 ra1_xsfdr;
22105 + u16 ra2_xsfdr;
22106 + u8 __reserved2;
22107 + u32 frm_cnt:24;
22108 + u8 __reserved333[28];
22109 +} __packed;
22110 +
22111 +struct qm_mcr_ceetm_dct_config {
22112 + u8 __reserved1[62];
22113 +} __packed;
22114 +
22115 +struct qm_mcr_ceetm_dct_query {
22116 + u8 __reserved1[18];
22117 + u32 context_b;
22118 + u64 context_a;
22119 + u8 __reserved2[32];
22120 +} __packed;
22121 +
22122 +struct qm_mcr_ceetm_class_scheduler_config {
22123 + u8 __reserved1[62];
22124 +} __packed;
22125 +
22126 +struct qm_mcr_ceetm_class_scheduler_query {
22127 + u8 __reserved1[9];
22128 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
22129 + u8 gpc_reserved:1;
22130 + u8 gpc_combine_flag:1;
22131 + u8 gpc_prio_b:3;
22132 + u8 gpc_prio_a:3;
22133 +#else
22134 + u8 gpc_prio_a:3;
22135 + u8 gpc_prio_b:3;
22136 + u8 gpc_combine_flag:1;
22137 + u8 gpc_reserved:1;
22138 +#endif
22139 + u16 crem;
22140 + u16 erem;
22141 + u8 w[8];
22142 + u8 __reserved2[5];
22143 + u32 wbfslist:24;
22144 + u32 d8;
22145 + u32 d9;
22146 + u32 d10;
22147 + u32 d11;
22148 + u32 d12;
22149 + u32 d13;
22150 + u32 d14;
22151 + u32 d15;
22152 +} __packed;
22153 +
22154 +struct qm_mcr_ceetm_mapping_shaper_tcfc_config {
22155 + u16 cid;
22156 + u8 __reserved2[60];
22157 +} __packed;
22158 +
22159 +struct qm_mcr_ceetm_mapping_shaper_tcfc_query {
22160 + u16 cid;
22161 + u8 __reserved1;
22162 + union {
22163 + struct {
22164 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
22165 + u8 map_shaped:1;
22166 + u8 map_reserved:4;
22167 + u8 map_lni_id:3;
22168 +#else
22169 + u8 map_lni_id:3;
22170 + u8 map_reserved:4;
22171 + u8 map_shaped:1;
22172 +#endif
22173 + u8 __reserved2[58];
22174 + } __packed channel_mapping_query;
22175 + struct {
22176 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
22177 + u8 map_reserved:5;
22178 + u8 map_lni_id:3;
22179 +#else
22180 + u8 map_lni_id:3;
22181 + u8 map_reserved:5;
22182 +#endif
22183 + u8 __reserved2[58];
22184 + } __packed sp_mapping_query;
22185 + struct {
22186 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
22187 + u8 cpl:1;
22188 + u8 cpl_reserved:2;
22189 + u8 oal:5;
22190 +#else
22191 + u8 oal:5;
22192 + u8 cpl_reserved:2;
22193 + u8 cpl:1;
22194 +#endif
22195 + u32 crtcr:24;
22196 + u32 ertcr:24;
22197 + u16 crtbl;
22198 + u16 ertbl;
22199 + u8 mps;
22200 + u8 __reserved2[15];
22201 + u32 crat;
22202 + u32 erat;
22203 + u8 __reserved3[24];
22204 + } __packed shaper_query;
22205 + struct {
22206 + u8 __reserved1[11];
22207 + u64 lnitcfcc;
22208 + u8 __reserved3[40];
22209 + } __packed tcfc_query;
22210 + };
22211 +} __packed;
22212 +
22213 +struct qm_mcr_ceetm_ccgr_config {
22214 + u8 __reserved1[46];
22215 + union {
22216 + u8 __reserved2[8];
22217 + struct {
22218 + u16 timestamp;
22219 + u16 wr_porb_g;
22220 + u16 wr_prob_y;
22221 + u16 wr_prob_r;
22222 + } __packed test_write;
22223 + };
22224 + u8 __reserved3[8];
22225 +} __packed;
22226 +
22227 +struct qm_mcr_ceetm_ccgr_query {
22228 + u8 __reserved1[6];
22229 + union {
22230 + struct {
22231 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
22232 + u8 ctl_reserved:1;
22233 + u8 ctl_wr_en_g:1;
22234 + u8 ctl_wr_en_y:1;
22235 + u8 ctl_wr_en_r:1;
22236 + u8 ctl_td_en:1;
22237 + u8 ctl_td_mode:1;
22238 + u8 ctl_cscn_en:1;
22239 + u8 ctl_mode:1;
22240 +#else
22241 + u8 ctl_mode:1;
22242 + u8 ctl_cscn_en:1;
22243 + u8 ctl_td_mode:1;
22244 + u8 ctl_td_en:1;
22245 + u8 ctl_wr_en_r:1;
22246 + u8 ctl_wr_en_y:1;
22247 + u8 ctl_wr_en_g:1;
22248 + u8 ctl_reserved:1;
22249 +#endif
22250 + u8 cdv;
22251 + u8 __reserved2[2];
22252 + u8 oal;
22253 + u8 __reserved3;
22254 + struct qm_cgr_cs_thres cs_thres;
22255 + struct qm_cgr_cs_thres cs_thres_x;
22256 + struct qm_cgr_cs_thres td_thres;
22257 + struct qm_cgr_wr_parm wr_parm_g;
22258 + struct qm_cgr_wr_parm wr_parm_y;
22259 + struct qm_cgr_wr_parm wr_parm_r;
22260 + u16 cscn_targ_dcp;
22261 + u8 dcp_lsn;
22262 + u64 i_cnt:40;
22263 + u8 __reserved4[3];
22264 + u64 a_cnt:40;
22265 + u32 cscn_targ_swp[4];
22266 + } __packed cm_query;
22267 + struct {
22268 + u8 dnc;
22269 + u8 dn0;
22270 + u8 dn1;
22271 + u64 dnba:40;
22272 + u8 __reserved2[2];
22273 + u16 dnth_0;
22274 + u8 __reserved3[2];
22275 + u16 dnth_1;
22276 + u8 __reserved4[10];
22277 + u16 dnacc_0;
22278 + u8 __reserved5[2];
22279 + u16 dnacc_1;
22280 + u8 __reserved6[24];
22281 + } __packed dn_query;
22282 + struct {
22283 + u8 __reserved2[24];
22284 + struct __qm_mcr_querycongestion state;
22285 + } __packed congestion_state;
22286 +
22287 + };
22288 +} __packed;
22289 +
22290 +struct qm_mcr_ceetm_cq_peek_pop_xsfdrread {
22291 + u8 stat;
22292 + u8 __reserved1[11];
22293 + u16 dctidx;
22294 + struct qm_fd fd;
22295 + u8 __reserved2[32];
22296 +} __packed;
22297 +
22298 +struct qm_mcr_ceetm_statistics_query {
22299 + u8 __reserved1[17];
22300 + u64 frm_cnt:40;
22301 + u8 __reserved2[2];
22302 + u64 byte_cnt:48;
22303 + u8 __reserved3[32];
22304 +} __packed;
22305 +
22306 +struct qm_mc_result {
22307 + u8 verb;
22308 + u8 result;
22309 + union {
22310 + struct qm_mcr_initfq initfq;
22311 + struct qm_mcr_queryfq queryfq;
22312 + struct qm_mcr_queryfq_np queryfq_np;
22313 + struct qm_mcr_alterfq alterfq;
22314 + struct qm_mcr_initcgr initcgr;
22315 + struct qm_mcr_cgrtestwrite cgrtestwrite;
22316 + struct qm_mcr_querycgr querycgr;
22317 + struct qm_mcr_querycongestion querycongestion;
22318 + struct qm_mcr_querywq querywq;
22319 + struct qm_mcr_ceetm_lfqmt_config lfqmt_config;
22320 + struct qm_mcr_ceetm_lfqmt_query lfqmt_query;
22321 + struct qm_mcr_ceetm_cq_config cq_config;
22322 + struct qm_mcr_ceetm_cq_query cq_query;
22323 + struct qm_mcr_ceetm_dct_config dct_config;
22324 + struct qm_mcr_ceetm_dct_query dct_query;
22325 + struct qm_mcr_ceetm_class_scheduler_config csch_config;
22326 + struct qm_mcr_ceetm_class_scheduler_query csch_query;
22327 + struct qm_mcr_ceetm_mapping_shaper_tcfc_config mst_config;
22328 + struct qm_mcr_ceetm_mapping_shaper_tcfc_query mst_query;
22329 + struct qm_mcr_ceetm_ccgr_config ccgr_config;
22330 + struct qm_mcr_ceetm_ccgr_query ccgr_query;
22331 + struct qm_mcr_ceetm_cq_peek_pop_xsfdrread cq_ppxr;
22332 + struct qm_mcr_ceetm_statistics_query stats_query;
22333 + };
22334 +} __packed;
22335 +
22336 +#define QM_MCR_VERB_RRID 0x80
22337 +#define QM_MCR_VERB_MASK QM_MCC_VERB_MASK
22338 +#define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED
22339 +#define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED
22340 +#define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ
22341 +#define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP
22342 +#define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ
22343 +#define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED
22344 +#define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED
22345 +#define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE
22346 +#define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE
22347 +#define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS
22348 +#define QM_MCR_RESULT_NULL 0x00
22349 +#define QM_MCR_RESULT_OK 0xf0
22350 +#define QM_MCR_RESULT_ERR_FQID 0xf1
22351 +#define QM_MCR_RESULT_ERR_FQSTATE 0xf2
22352 +#define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3 /* OOS fails if FQ is !empty */
22353 +#define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4
22354 +#define QM_MCR_RESULT_PENDING 0xf8
22355 +#define QM_MCR_RESULT_ERR_BADCOMMAND 0xff
22356 +#define QM_MCR_NP_STATE_FE 0x10
22357 +#define QM_MCR_NP_STATE_R 0x08
22358 +#define QM_MCR_NP_STATE_MASK 0x07 /* Reads FQD::STATE; */
22359 +#define QM_MCR_NP_STATE_OOS 0x00
22360 +#define QM_MCR_NP_STATE_RETIRED 0x01
22361 +#define QM_MCR_NP_STATE_TEN_SCHED 0x02
22362 +#define QM_MCR_NP_STATE_TRU_SCHED 0x03
22363 +#define QM_MCR_NP_STATE_PARKED 0x04
22364 +#define QM_MCR_NP_STATE_ACTIVE 0x05
22365 +#define QM_MCR_NP_PTR_MASK 0x07ff /* for RA[12] & OD[123] */
22366 +#define QM_MCR_NP_RA1_NRA(v) (((v) >> 14) & 0x3) /* FQD::NRA */
22367 +#define QM_MCR_NP_RA2_IT(v) (((v) >> 14) & 0x1) /* FQD::IT */
22368 +#define QM_MCR_NP_OD1_NOD(v) (((v) >> 14) & 0x3) /* FQD::NOD */
22369 +#define QM_MCR_NP_OD3_NPC(v) (((v) >> 14) & 0x3) /* FQD::NPC */
22370 +#define QM_MCR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
22371 +#define QM_MCR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
22372 +/* This extracts the state for congestion group 'n' from a query response.
22373 + * Eg.
22374 + * u8 cgr = [...];
22375 + * struct qm_mc_result *res = [...];
22376 + * printf("congestion group %d congestion state: %d\n", cgr,
22377 + * QM_MCR_QUERYCONGESTION(&res->querycongestion.state, cgr));
22378 + */
22379 +#define __CGR_WORD(num) (num >> 5)
22380 +#define __CGR_SHIFT(num) (num & 0x1f)
22381 +#define __CGR_NUM (sizeof(struct __qm_mcr_querycongestion) << 3)
22382 +static inline int QM_MCR_QUERYCONGESTION(struct __qm_mcr_querycongestion *p,
22383 + u8 cgr)
22384 +{
22385 + return p->__state[__CGR_WORD(cgr)] & (0x80000000 >> __CGR_SHIFT(cgr));
22386 +}
22387 +
22388 +
22389 +/*********************/
22390 +/* Utility interface */
22391 +/*********************/
22392 +
22393 +/* Represents an allocator over a range of FQIDs. NB, accesses are not locked,
22394 + * spinlock them yourself if needed. */
22395 +struct qman_fqid_pool;
22396 +
22397 +/* Create/destroy a FQID pool, num must be a multiple of 32. NB, _destroy()
22398 + * always succeeds, but returns non-zero if there were "leaked" FQID
22399 + * allocations. */
22400 +struct qman_fqid_pool *qman_fqid_pool_create(u32 fqid_start, u32 num);
22401 +int qman_fqid_pool_destroy(struct qman_fqid_pool *pool);
22402 +/* Alloc/free a FQID from the range. _alloc() returns zero for success. */
22403 +int qman_fqid_pool_alloc(struct qman_fqid_pool *pool, u32 *fqid);
22404 +void qman_fqid_pool_free(struct qman_fqid_pool *pool, u32 fqid);
22405 +u32 qman_fqid_pool_used(struct qman_fqid_pool *pool);
22406 +
22407 +/*******************************************************************/
22408 +/* Managed (aka "shared" or "mux/demux") portal, high-level i/face */
22409 +/*******************************************************************/
22410 +
22411 + /* Portal and Frame Queues */
22412 + /* ----------------------- */
22413 +/* Represents a managed portal */
22414 +struct qman_portal;
22415 +
22416 +/* This object type represents Qman frame queue descriptors (FQD), it is
22417 + * cacheline-aligned, and initialised by qman_create_fq(). The structure is
22418 + * defined further down. */
22419 +struct qman_fq;
22420 +
22421 +/* This object type represents a Qman congestion group, it is defined further
22422 + * down. */
22423 +struct qman_cgr;
22424 +
22425 +struct qman_portal_config {
22426 + /* If the caller enables DQRR stashing (and thus wishes to operate the
22427 + * portal from only one cpu), this is the logical CPU that the portal
22428 + * will stash to. Whether stashing is enabled or not, this setting is
22429 + * also used for any "core-affine" portals, ie. default portals
22430 + * associated to the corresponding cpu. -1 implies that there is no core
22431 + * affinity configured. */
22432 + int cpu;
22433 + /* portal interrupt line */
22434 + int irq;
22435 + /* the unique index of this portal */
22436 + u32 index;
22437 + /* Is this portal shared? (If so, it has coarser locking and demuxes
22438 + * processing on behalf of other CPUs.) */
22439 + int is_shared;
22440 + /* The portal's dedicated channel id, use this value for initialising
22441 + * frame queues to target this portal when scheduled. */
22442 + u16 channel;
22443 + /* A mask of which pool channels this portal has dequeue access to
22444 + * (using QM_SDQCR_CHANNELS_POOL(n) for the bitmask) */
22445 + u32 pools;
22446 +};
22447 +
22448 +/* This enum, and the callback type that returns it, are used when handling
22449 + * dequeued frames via DQRR. Note that for "null" callbacks registered with the
22450 + * portal object (for handling dequeues that do not demux because contextB is
22451 + * NULL), the return value *MUST* be qman_cb_dqrr_consume. */
22452 +enum qman_cb_dqrr_result {
22453 + /* DQRR entry can be consumed */
22454 + qman_cb_dqrr_consume,
22455 + /* Like _consume, but requests parking - FQ must be held-active */
22456 + qman_cb_dqrr_park,
22457 + /* Does not consume, for DCA mode only. This allows out-of-order
22458 + * consumes by explicit calls to qman_dca() and/or the use of implicit
22459 + * DCA via EQCR entries. */
22460 + qman_cb_dqrr_defer,
22461 + /* Stop processing without consuming this ring entry. Exits the current
22462 + * qman_poll_dqrr() or interrupt-handling, as appropriate. If within an
22463 + * interrupt handler, the callback would typically call
22464 + * qman_irqsource_remove(QM_PIRQ_DQRI) before returning this value,
22465 + * otherwise the interrupt will reassert immediately. */
22466 + qman_cb_dqrr_stop,
22467 + /* Like qman_cb_dqrr_stop, but consumes the current entry. */
22468 + qman_cb_dqrr_consume_stop
22469 +};
22470 +typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm,
22471 + struct qman_fq *fq,
22472 + const struct qm_dqrr_entry *dqrr);
22473 +
22474 +/* This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They
22475 + * are always consumed after the callback returns. */
22476 +typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq,
22477 + const struct qm_mr_entry *msg);
22478 +
22479 +/* This callback type is used when handling DCP ERNs */
22480 +typedef void (*qman_cb_dc_ern)(struct qman_portal *qm,
22481 + const struct qm_mr_entry *msg);
22482 +
22483 +/* s/w-visible states. Ie. tentatively scheduled + truly scheduled + active +
22484 + * held-active + held-suspended are just "sched". Things like "retired" will not
22485 + * be assumed until it is complete (ie. QMAN_FQ_STATE_CHANGING is set until
22486 + * then, to indicate it's completing and to gate attempts to retry the retire
22487 + * command). Note, park commands do not set QMAN_FQ_STATE_CHANGING because it's
22488 + * technically impossible in the case of enqueue DCAs (which refer to DQRR ring
22489 + * index rather than the FQ that ring entry corresponds to), so repeated park
22490 + * commands are allowed (if you're silly enough to try) but won't change FQ
22491 + * state, and the resulting park notifications move FQs from "sched" to
22492 + * "parked". */
22493 +enum qman_fq_state {
22494 + qman_fq_state_oos,
22495 + qman_fq_state_parked,
22496 + qman_fq_state_sched,
22497 + qman_fq_state_retired
22498 +};
22499 +
22500 +/* Frame queue objects (struct qman_fq) are stored within memory passed to
22501 + * qman_create_fq(), as this allows stashing of caller-provided demux callback
22502 + * pointers at no extra cost to stashing of (driver-internal) FQ state. If the
22503 + * caller wishes to add per-FQ state and have it benefit from dequeue-stashing,
22504 + * they should;
22505 + *
22506 + * (a) extend the qman_fq structure with their state; eg.
22507 + *
22508 + * // myfq is allocated and driver_fq callbacks filled in;
22509 + * struct my_fq {
22510 + * struct qman_fq base;
22511 + * int an_extra_field;
22512 + * [ ... add other fields to be associated with each FQ ...]
22513 + * } *myfq = some_my_fq_allocator();
22514 + * struct qman_fq *fq = qman_create_fq(fqid, flags, &myfq->base);
22515 + *
22516 + * // in a dequeue callback, access extra fields from 'fq' via a cast;
22517 + * struct my_fq *myfq = (struct my_fq *)fq;
22518 + * do_something_with(myfq->an_extra_field);
22519 + * [...]
22520 + *
22521 + * (b) when and if configuring the FQ for context stashing, specify how ever
22522 + * many cachelines are required to stash 'struct my_fq', to accelerate not
22523 + * only the Qman driver but the callback as well.
22524 + */
22525 +
22526 +struct qman_fq_cb {
22527 + qman_cb_dqrr dqrr; /* for dequeued frames */
22528 + qman_cb_mr ern; /* for s/w ERNs */
22529 + qman_cb_mr fqs; /* frame-queue state changes*/
22530 +};
22531 +
22532 +struct qman_fq {
22533 + /* Caller of qman_create_fq() provides these demux callbacks */
22534 + struct qman_fq_cb cb;
22535 + /* These are internal to the driver, don't touch. In particular, they
22536 + * may change, be removed, or extended (so you shouldn't rely on
22537 + * sizeof(qman_fq) being a constant). */
22538 + spinlock_t fqlock;
22539 + u32 fqid;
22540 + volatile unsigned long flags;
22541 + enum qman_fq_state state;
22542 + int cgr_groupid;
22543 + struct rb_node node;
22544 +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
22545 + u32 key;
22546 +#endif
22547 +};
22548 +
22549 +/* This callback type is used when handling congestion group entry/exit.
22550 + * 'congested' is non-zero on congestion-entry, and zero on congestion-exit. */
22551 +typedef void (*qman_cb_cgr)(struct qman_portal *qm,
22552 + struct qman_cgr *cgr, int congested);
22553 +
22554 +struct qman_cgr {
22555 + /* Set these prior to qman_create_cgr() */
22556 + u32 cgrid; /* 0..255, but u32 to allow specials like -1, 256, etc.*/
22557 + qman_cb_cgr cb;
22558 + /* These are private to the driver */
22559 + u16 chan; /* portal channel this object is created on */
22560 + struct list_head node;
22561 +};
22562 +
22563 +/* Flags to qman_create_fq() */
22564 +#define QMAN_FQ_FLAG_NO_ENQUEUE 0x00000001 /* can't enqueue */
22565 +#define QMAN_FQ_FLAG_NO_MODIFY 0x00000002 /* can only enqueue */
22566 +#define QMAN_FQ_FLAG_TO_DCPORTAL 0x00000004 /* consumed by CAAM/PME/Fman */
22567 +#define QMAN_FQ_FLAG_LOCKED 0x00000008 /* multi-core locking */
22568 +#define QMAN_FQ_FLAG_AS_IS 0x00000010 /* query h/w state */
22569 +#define QMAN_FQ_FLAG_DYNAMIC_FQID 0x00000020 /* (de)allocate fqid */
22570 +
22571 +/* Flags to qman_destroy_fq() */
22572 +#define QMAN_FQ_DESTROY_PARKED 0x00000001 /* FQ can be parked or OOS */
22573 +
22574 +/* Flags from qman_fq_state() */
22575 +#define QMAN_FQ_STATE_CHANGING 0x80000000 /* 'state' is changing */
22576 +#define QMAN_FQ_STATE_NE 0x40000000 /* retired FQ isn't empty */
22577 +#define QMAN_FQ_STATE_ORL 0x20000000 /* retired FQ has ORL */
22578 +#define QMAN_FQ_STATE_BLOCKOOS 0xe0000000 /* if any are set, no OOS */
22579 +#define QMAN_FQ_STATE_CGR_EN 0x10000000 /* CGR enabled */
22580 +#define QMAN_FQ_STATE_VDQCR 0x08000000 /* being volatile dequeued */
22581 +
22582 +/* Flags to qman_init_fq() */
22583 +#define QMAN_INITFQ_FLAG_SCHED 0x00000001 /* schedule rather than park */
22584 +#define QMAN_INITFQ_FLAG_LOCAL 0x00000004 /* set dest portal */
22585 +
22586 +/* Flags to qman_volatile_dequeue() */
22587 +#ifdef CONFIG_FSL_DPA_CAN_WAIT
22588 +#define QMAN_VOLATILE_FLAG_WAIT 0x00000001 /* wait if VDQCR is in use */
22589 +#define QMAN_VOLATILE_FLAG_WAIT_INT 0x00000002 /* if wait, interruptible? */
22590 +#define QMAN_VOLATILE_FLAG_FINISH 0x00000004 /* wait till VDQCR completes */
22591 +#endif
22592 +
22593 +/* Flags to qman_enqueue(). NB, the strange numbering is to align with hardware,
22594 + * bit-wise. (NB: the PME API is sensitive to these precise numberings too, so
22595 + * any change here should be audited in PME.) */
22596 +#ifdef CONFIG_FSL_DPA_CAN_WAIT
22597 +#define QMAN_ENQUEUE_FLAG_WAIT 0x00010000 /* wait if EQCR is full */
22598 +#define QMAN_ENQUEUE_FLAG_WAIT_INT 0x00020000 /* if wait, interruptible? */
22599 +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
22600 +#define QMAN_ENQUEUE_FLAG_WAIT_SYNC 0x00000004 /* if wait, until consumed? */
22601 +#endif
22602 +#endif
22603 +#define QMAN_ENQUEUE_FLAG_WATCH_CGR 0x00080000 /* watch congestion state */
22604 +#define QMAN_ENQUEUE_FLAG_DCA 0x00008000 /* perform enqueue-DCA */
22605 +#define QMAN_ENQUEUE_FLAG_DCA_PARK 0x00004000 /* If DCA, requests park */
22606 +#define QMAN_ENQUEUE_FLAG_DCA_PTR(p) /* If DCA, p is DQRR entry */ \
22607 + (((u32)(p) << 2) & 0x00000f00)
22608 +#define QMAN_ENQUEUE_FLAG_C_GREEN 0x00000000 /* choose one C_*** flag */
22609 +#define QMAN_ENQUEUE_FLAG_C_YELLOW 0x00000008
22610 +#define QMAN_ENQUEUE_FLAG_C_RED 0x00000010
22611 +#define QMAN_ENQUEUE_FLAG_C_OVERRIDE 0x00000018
22612 +/* For the ORP-specific qman_enqueue_orp() variant;
22613 + * - this flag indicates "Not Last In Sequence", ie. all but the final fragment
22614 + * of a frame. */
22615 +#define QMAN_ENQUEUE_FLAG_NLIS 0x01000000
22616 +/* - this flag performs no enqueue but fills in an ORP sequence number that
22617 + * would otherwise block it (eg. if a frame has been dropped). */
22618 +#define QMAN_ENQUEUE_FLAG_HOLE 0x02000000
22619 +/* - this flag performs no enqueue but advances NESN to the given sequence
22620 + * number. */
22621 +#define QMAN_ENQUEUE_FLAG_NESN 0x04000000
22622 +
22623 +/* Flags to qman_modify_cgr() */
22624 +#define QMAN_CGR_FLAG_USE_INIT 0x00000001
22625 +#define QMAN_CGR_MODE_FRAME 0x00000001
22626 +
22627 + /* Portal Management */
22628 + /* ----------------- */
22629 +/**
22630 + * qman_get_portal_config - get portal configuration settings
22631 + *
22632 + * This returns a read-only view of the current cpu's affine portal settings.
22633 + */
22634 +const struct qman_portal_config *qman_get_portal_config(void);
22635 +
22636 +/**
22637 + * qman_irqsource_get - return the portal work that is interrupt-driven
22638 + *
22639 + * Returns a bitmask of QM_PIRQ_**I processing sources that are currently
22640 + * enabled for interrupt handling on the current cpu's affine portal. These
22641 + * sources will trigger the portal interrupt and the interrupt handler (or a
22642 + * tasklet/bottom-half it defers to) will perform the corresponding processing
22643 + * work. The qman_poll_***() functions will only process sources that are not in
22644 + * this bitmask. If the current CPU is sharing a portal hosted on another CPU,
22645 + * this always returns zero.
22646 + */
22647 +u32 qman_irqsource_get(void);
22648 +
22649 +/**
22650 + * qman_irqsource_add - add processing sources to be interrupt-driven
22651 + * @bits: bitmask of QM_PIRQ_**I processing sources
22652 + *
22653 + * Adds processing sources that should be interrupt-driven (rather than
22654 + * processed via qman_poll_***() functions). Returns zero for success, or
22655 + * -EINVAL if the current CPU is sharing a portal hosted on another CPU.
22656 + */
22657 +int qman_irqsource_add(u32 bits);
22658 +
22659 +/**
22660 + * qman_irqsource_remove - remove processing sources from being interrupt-driven
22661 + * @bits: bitmask of QM_PIRQ_**I processing sources
22662 + *
22663 + * Removes processing sources from being interrupt-driven, so that they will
22664 + * instead be processed via qman_poll_***() functions. Returns zero for success,
22665 + * or -EINVAL if the current CPU is sharing a portal hosted on another CPU.
22666 + */
22667 +int qman_irqsource_remove(u32 bits);
22668 +
22669 +/**
22670 + * qman_affine_cpus - return a mask of cpus that have affine portals
22671 + */
22672 +const cpumask_t *qman_affine_cpus(void);
22673 +
22674 +/**
22675 + * qman_affine_channel - return the channel ID of an portal
22676 + * @cpu: the cpu whose affine portal is the subject of the query
22677 + *
22678 + * If @cpu is -1, the affine portal for the current CPU will be used. It is a
22679 + * bug to call this function for any value of @cpu (other than -1) that is not a
22680 + * member of the mask returned from qman_affine_cpus().
22681 + */
22682 +u16 qman_affine_channel(int cpu);
22683 +
22684 +/**
22685 + * qman_get_affine_portal - return the portal pointer affine to cpu
22686 + * @cpu: the cpu whose affine portal is the subject of the query
22687 + *
22688 + */
22689 +void *qman_get_affine_portal(int cpu);
22690 +
22691 +/**
22692 + * qman_poll_dqrr - process DQRR (fast-path) entries
22693 + * @limit: the maximum number of DQRR entries to process
22694 + *
22695 + * Use of this function requires that DQRR processing not be interrupt-driven.
22696 + * Ie. the value returned by qman_irqsource_get() should not include
22697 + * QM_PIRQ_DQRI. If the current CPU is sharing a portal hosted on another CPU,
22698 + * this function will return -EINVAL, otherwise the return value is >=0 and
22699 + * represents the number of DQRR entries processed.
22700 + */
22701 +int qman_poll_dqrr(unsigned int limit);
22702 +
22703 +/**
22704 + * qman_poll_slow - process anything (except DQRR) that isn't interrupt-driven.
22705 + *
22706 + * This function does any portal processing that isn't interrupt-driven. If the
22707 + * current CPU is sharing a portal hosted on another CPU, this function will
22708 + * return (u32)-1, otherwise the return value is a bitmask of QM_PIRQ_* sources
22709 + * indicating what interrupt sources were actually processed by the call.
22710 + */
22711 +u32 qman_poll_slow(void);
22712 +
22713 +/**
22714 + * qman_poll - legacy wrapper for qman_poll_dqrr() and qman_poll_slow()
22715 + *
22716 + * Dispatcher logic on a cpu can use this to trigger any maintenance of the
22717 + * affine portal. There are two classes of portal processing in question;
22718 + * fast-path (which involves demuxing dequeue ring (DQRR) entries and tracking
22719 + * enqueue ring (EQCR) consumption), and slow-path (which involves EQCR
22720 + * thresholds, congestion state changes, etc). This function does whatever
22721 + * processing is not triggered by interrupts.
22722 + *
22723 + * Note, if DQRR and some slow-path processing are poll-driven (rather than
22724 + * interrupt-driven) then this function uses a heuristic to determine how often
22725 + * to run slow-path processing - as slow-path processing introduces at least a
22726 + * minimum latency each time it is run, whereas fast-path (DQRR) processing is
22727 + * close to zero-cost if there is no work to be done. Applications can tune this
22728 + * behaviour themselves by using qman_poll_dqrr() and qman_poll_slow() directly
22729 + * rather than going via this wrapper.
22730 + */
22731 +void qman_poll(void);
22732 +
22733 +/**
22734 + * qman_stop_dequeues - Stop h/w dequeuing to the s/w portal
22735 + *
22736 + * Disables DQRR processing of the portal. This is reference-counted, so
22737 + * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to
22738 + * truly re-enable dequeuing.
22739 + */
22740 +void qman_stop_dequeues(void);
22741 +
22742 +/**
22743 + * qman_start_dequeues - (Re)start h/w dequeuing to the s/w portal
22744 + *
22745 + * Enables DQRR processing of the portal. This is reference-counted, so
22746 + * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to
22747 + * truly re-enable dequeuing.
22748 + */
22749 +void qman_start_dequeues(void);
22750 +
22751 +/**
22752 + * qman_static_dequeue_add - Add pool channels to the portal SDQCR
22753 + * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
22754 + *
22755 + * Adds a set of pool channels to the portal's static dequeue command register
22756 + * (SDQCR). The requested pools are limited to those the portal has dequeue
22757 + * access to.
22758 + */
22759 +void qman_static_dequeue_add(u32 pools);
22760 +
22761 +/**
22762 + * qman_static_dequeue_del - Remove pool channels from the portal SDQCR
22763 + * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
22764 + *
22765 + * Removes a set of pool channels from the portal's static dequeue command
22766 + * register (SDQCR). The requested pools are limited to those the portal has
22767 + * dequeue access to.
22768 + */
22769 +void qman_static_dequeue_del(u32 pools);
22770 +
22771 +/**
22772 + * qman_static_dequeue_get - return the portal's current SDQCR
22773 + *
22774 + * Returns the portal's current static dequeue command register (SDQCR). The
22775 + * entire register is returned, so if only the currently-enabled pool channels
22776 + * are desired, mask the return value with QM_SDQCR_CHANNELS_POOL_MASK.
22777 + */
22778 +u32 qman_static_dequeue_get(void);
22779 +
22780 +/**
22781 + * qman_dca - Perform a Discrete Consumption Acknowledgement
22782 + * @dq: the DQRR entry to be consumed
22783 + * @park_request: indicates whether the held-active @fq should be parked
22784 + *
22785 + * Only allowed in DCA-mode portals, for DQRR entries whose handler callback had
22786 + * previously returned 'qman_cb_dqrr_defer'. NB, as with the other APIs, this
22787 + * does not take a 'portal' argument but implies the core affine portal from the
22788 + * cpu that is currently executing the function. For reasons of locking, this
22789 + * function must be called from the same CPU as that which processed the DQRR
22790 + * entry in the first place.
22791 + */
22792 +void qman_dca(struct qm_dqrr_entry *dq, int park_request);
22793 +
22794 +/**
22795 + * qman_eqcr_is_empty - Determine if portal's EQCR is empty
22796 + *
22797 + * For use in situations where a cpu-affine caller needs to determine when all
22798 + * enqueues for the local portal have been processed by Qman but can't use the
22799 + * QMAN_ENQUEUE_FLAG_WAIT_SYNC flag to do this from the final qman_enqueue().
22800 + * The function forces tracking of EQCR consumption (which normally doesn't
22801 + * happen until enqueue processing needs to find space to put new enqueue
22802 + * commands), and returns zero if the ring still has unprocessed entries,
22803 + * non-zero if it is empty.
22804 + */
22805 +int qman_eqcr_is_empty(void);
22806 +
22807 +/**
22808 + * qman_set_dc_ern - Set the handler for DCP enqueue rejection notifications
22809 + * @handler: callback for processing DCP ERNs
22810 + * @affine: whether this handler is specific to the locally affine portal
22811 + *
22812 + * If a hardware block's interface to Qman (ie. its direct-connect portal, or
22813 + * DCP) is configured not to receive enqueue rejections, then any enqueues
22814 + * through that DCP that are rejected will be sent to a given software portal.
22815 + * If @affine is non-zero, then this handler will only be used for DCP ERNs
22816 + * received on the portal affine to the current CPU. If multiple CPUs share a
22817 + * portal and they all call this function, they will be setting the handler for
22818 + * the same portal! If @affine is zero, then this handler will be global to all
22819 + * portals handled by this instance of the driver. Only those portals that do
22820 + * not have their own affine handler will use the global handler.
22821 + */
22822 +void qman_set_dc_ern(qman_cb_dc_ern handler, int affine);
22823 +
22824 + /* FQ management */
22825 + /* ------------- */
22826 +/**
22827 + * qman_create_fq - Allocates a FQ
22828 + * @fqid: the index of the FQD to encapsulate, must be "Out of Service"
22829 + * @flags: bit-mask of QMAN_FQ_FLAG_*** options
22830 + * @fq: memory for storing the 'fq', with callbacks filled in
22831 + *
22832 + * Creates a frame queue object for the given @fqid, unless the
22833 + * QMAN_FQ_FLAG_DYNAMIC_FQID flag is set in @flags, in which case a FQID is
22834 + * dynamically allocated (or the function fails if none are available). Once
22835 + * created, the caller should not touch the memory at 'fq' except as extended to
22836 + * adjacent memory for user-defined fields (see the definition of "struct
22837 + * qman_fq" for more info). NO_MODIFY is only intended for enqueuing to
22838 + * pre-existing frame-queues that aren't to be otherwise interfered with, it
22839 + * prevents all other modifications to the frame queue. The TO_DCPORTAL flag
22840 + * causes the driver to honour any contextB modifications requested in the
22841 + * qm_init_fq() API, as this indicates the frame queue will be consumed by a
22842 + * direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by
22843 + * software portals, the contextB field is controlled by the driver and can't be
22844 + * modified by the caller. If the AS_IS flag is specified, management commands
22845 + * will be used on portal @p to query state for frame queue @fqid and construct
22846 + * a frame queue object based on that, rather than assuming/requiring that it be
22847 + * Out of Service.
22848 + */
22849 +int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq);
22850 +
22851 +/**
22852 + * qman_destroy_fq - Deallocates a FQ
22853 + * @fq: the frame queue object to release
22854 + * @flags: bit-mask of QMAN_FQ_FREE_*** options
22855 + *
22856 + * The memory for this frame queue object ('fq' provided in qman_create_fq()) is
22857 + * not deallocated but the caller regains ownership, to do with as desired. The
22858 + * FQ must be in the 'out-of-service' state unless the QMAN_FQ_FREE_PARKED flag
22859 + * is specified, in which case it may also be in the 'parked' state.
22860 + */
22861 +void qman_destroy_fq(struct qman_fq *fq, u32 flags);
22862 +
22863 +/**
22864 + * qman_fq_fqid - Queries the frame queue ID of a FQ object
22865 + * @fq: the frame queue object to query
22866 + */
22867 +u32 qman_fq_fqid(struct qman_fq *fq);
22868 +
22869 +/**
22870 + * qman_fq_state - Queries the state of a FQ object
22871 + * @fq: the frame queue object to query
22872 + * @state: pointer to state enum to return the FQ scheduling state
22873 + * @flags: pointer to state flags to receive QMAN_FQ_STATE_*** bitmask
22874 + *
22875 + * Queries the state of the FQ object, without performing any h/w commands.
22876 + * This captures the state, as seen by the driver, at the time the function
22877 + * executes.
22878 + */
22879 +void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags);
22880 +
22881 +/**
22882 + * qman_init_fq - Initialises FQ fields, leaves the FQ "parked" or "scheduled"
22883 + * @fq: the frame queue object to modify, must be 'parked' or new.
22884 + * @flags: bit-mask of QMAN_INITFQ_FLAG_*** options
22885 + * @opts: the FQ-modification settings, as defined in the low-level API
22886 + *
22887 + * The @opts parameter comes from the low-level portal API. Select
22888 + * QMAN_INITFQ_FLAG_SCHED in @flags to cause the frame queue to be scheduled
22889 + * rather than parked. NB, @opts can be NULL.
22890 + *
22891 + * Note that some fields and options within @opts may be ignored or overwritten
22892 + * by the driver;
22893 + * 1. the 'count' and 'fqid' fields are always ignored (this operation only
22894 + * affects one frame queue: @fq).
22895 + * 2. the QM_INITFQ_WE_CONTEXTB option of the 'we_mask' field and the associated
22896 + * 'fqd' structure's 'context_b' field are sometimes overwritten;
22897 + * - if @fq was not created with QMAN_FQ_FLAG_TO_DCPORTAL, then context_b is
22898 + * initialised to a value used by the driver for demux.
22899 + * - if context_b is initialised for demux, so is context_a in case stashing
22900 + * is requested (see item 4).
22901 + * (So caller control of context_b is only possible for TO_DCPORTAL frame queue
22902 + * objects.)
22903 + * 3. if @flags contains QMAN_INITFQ_FLAG_LOCAL, the 'fqd' structure's
22904 + * 'dest::channel' field will be overwritten to match the portal used to issue
22905 + * the command. If the WE_DESTWQ write-enable bit had already been set by the
22906 + * caller, the channel workqueue will be left as-is, otherwise the write-enable
22907 + * bit is set and the workqueue is set to a default of 4. If the "LOCAL" flag
22908 + * isn't set, the destination channel/workqueue fields and the write-enable bit
22909 + * are left as-is.
22910 + * 4. if the driver overwrites context_a/b for demux, then if
22911 + * QM_INITFQ_WE_CONTEXTA is set, the driver will only overwrite
22912 + * context_a.address fields and will leave the stashing fields provided by the
22913 + * user alone, otherwise it will zero out the context_a.stashing fields.
22914 + */
22915 +int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts);
22916 +
22917 +/**
22918 + * qman_schedule_fq - Schedules a FQ
22919 + * @fq: the frame queue object to schedule, must be 'parked'
22920 + *
22921 + * Schedules the frame queue, which must be Parked, which takes it to
22922 + * Tentatively-Scheduled or Truly-Scheduled depending on its fill-level.
22923 + */
22924 +int qman_schedule_fq(struct qman_fq *fq);
22925 +
22926 +/**
22927 + * qman_retire_fq - Retires a FQ
22928 + * @fq: the frame queue object to retire
22929 + * @flags: FQ flags (as per qman_fq_state) if retirement completes immediately
22930 + *
22931 + * Retires the frame queue. This returns zero if it succeeds immediately, +1 if
22932 + * the retirement was started asynchronously, otherwise it returns negative for
22933 + * failure. When this function returns zero, @flags is set to indicate whether
22934 + * the retired FQ is empty and/or whether it has any ORL fragments (to show up
22935 + * as ERNs). Otherwise the corresponding flags will be known when a subsequent
22936 + * FQRN message shows up on the portal's message ring.
22937 + *
22938 + * NB, if the retirement is asynchronous (the FQ was in the Truly Scheduled or
22939 + * Active state), the completion will be via the message ring as a FQRN - but
22940 + * the corresponding callback may occur before this function returns!! Ie. the
22941 + * caller should be prepared to accept the callback as the function is called,
22942 + * not only once it has returned.
22943 + */
22944 +int qman_retire_fq(struct qman_fq *fq, u32 *flags);
22945 +
22946 +/**
22947 + * qman_oos_fq - Puts a FQ "out of service"
22948 + * @fq: the frame queue object to be put out-of-service, must be 'retired'
22949 + *
22950 + * The frame queue must be retired and empty, and if any order restoration list
22951 + * was released as ERNs at the time of retirement, they must all be consumed.
22952 + */
22953 +int qman_oos_fq(struct qman_fq *fq);
22954 +
22955 +/**
22956 + * qman_fq_flow_control - Set the XON/XOFF state of a FQ
22957 + * @fq: the frame queue object to be set to XON/XOFF state, must not be 'oos',
22958 + * or 'retired' or 'parked' state
22959 + * @xon: boolean to set fq in XON or XOFF state
22960 + *
22961 + * The frame should be in Tentatively Scheduled state or Truly Schedule sate,
22962 + * otherwise the IFSI interrupt will be asserted.
22963 + */
22964 +int qman_fq_flow_control(struct qman_fq *fq, int xon);
22965 +
22966 +/**
22967 + * qman_query_fq - Queries FQD fields (via h/w query command)
22968 + * @fq: the frame queue object to be queried
22969 + * @fqd: storage for the queried FQD fields
22970 + */
22971 +int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd);
22972 +
22973 +/**
22974 + * qman_query_fq_np - Queries non-programmable FQD fields
22975 + * @fq: the frame queue object to be queried
22976 + * @np: storage for the queried FQD fields
22977 + */
22978 +int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np);
22979 +
22980 +/**
22981 + * qman_query_wq - Queries work queue lengths
22982 + * @query_dedicated: If non-zero, query length of WQs in the channel dedicated
22983 + * to this software portal. Otherwise, query length of WQs in a
22984 + * channel specified in wq.
22985 + * @wq: storage for the queried WQs lengths. Also specified the channel to
22986 + * to query if query_dedicated is zero.
22987 + */
22988 +int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq);
22989 +
22990 +/**
22991 + * qman_volatile_dequeue - Issue a volatile dequeue command
22992 + * @fq: the frame queue object to dequeue from
22993 + * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options
22994 + * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set()
22995 + *
22996 + * Attempts to lock access to the portal's VDQCR volatile dequeue functionality.
22997 + * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and
22998 + * the VDQCR is already in use, otherwise returns non-zero for failure. If
22999 + * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once
23000 + * the VDQCR command has finished executing (ie. once the callback for the last
23001 + * DQRR entry resulting from the VDQCR command has been called). If not using
23002 + * the FINISH flag, completion can be determined either by detecting the
23003 + * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits
23004 + * in the "stat" field of the "struct qm_dqrr_entry" passed to the FQ's dequeue
23005 + * callback, or by waiting for the QMAN_FQ_STATE_VDQCR bit to disappear from the
23006 + * "flags" retrieved from qman_fq_state().
23007 + */
23008 +int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
23009 +
23010 +/**
23011 + * qman_enqueue - Enqueue a frame to a frame queue
23012 + * @fq: the frame queue object to enqueue to
23013 + * @fd: a descriptor of the frame to be enqueued
23014 + * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
23015 + *
23016 + * Fills an entry in the EQCR of portal @qm to enqueue the frame described by
23017 + * @fd. The descriptor details are copied from @fd to the EQCR entry, the 'pid'
23018 + * field is ignored. The return value is non-zero on error, such as ring full
23019 + * (and FLAG_WAIT not specified), congestion avoidance (FLAG_WATCH_CGR
23020 + * specified), etc. If the ring is full and FLAG_WAIT is specified, this
23021 + * function will block. If FLAG_INTERRUPT is set, the EQCI bit of the portal
23022 + * interrupt will assert when Qman consumes the EQCR entry (subject to "status
23023 + * disable", "enable", and "inhibit" registers). If FLAG_DCA is set, Qman will
23024 + * perform an implied "discrete consumption acknowledgement" on the dequeue
23025 + * ring's (DQRR) entry, at the ring index specified by the FLAG_DCA_IDX(x)
23026 + * macro. (As an alternative to issuing explicit DCA actions on DQRR entries,
23027 + * this implicit DCA can delay the release of a "held active" frame queue
23028 + * corresponding to a DQRR entry until Qman consumes the EQCR entry - providing
23029 + * order-preservation semantics in packet-forwarding scenarios.) If FLAG_DCA is
23030 + * set, then FLAG_DCA_PARK can also be set to imply that the DQRR consumption
23031 + * acknowledgement should "park request" the "held active" frame queue. Ie.
23032 + * when the portal eventually releases that frame queue, it will be left in the
23033 + * Parked state rather than Tentatively Scheduled or Truly Scheduled. If the
23034 + * portal is watching congestion groups, the QMAN_ENQUEUE_FLAG_WATCH_CGR flag
23035 + * is requested, and the FQ is a member of a congestion group, then this
23036 + * function returns -EAGAIN if the congestion group is currently congested.
23037 + * Note, this does not eliminate ERNs, as the async interface means we can be
23038 + * sending enqueue commands to an un-congested FQ that becomes congested before
23039 + * the enqueue commands are processed, but it does minimise needless thrashing
23040 + * of an already busy hardware resource by throttling many of the to-be-dropped
23041 + * enqueues "at the source".
23042 + */
23043 +int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags);
23044 +
23045 +typedef int (*qman_cb_precommit) (void *arg);
23046 +/**
23047 + * qman_enqueue_precommit - Enqueue a frame to a frame queue and call cb
23048 + * @fq: the frame queue object to enqueue to
23049 + * @fd: a descriptor of the frame to be enqueued
23050 + * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
23051 + * @cb: user supplied callback function to invoke before writing commit verb.
23052 + * @cb_arg: callback function argument
23053 + *
23054 + * This is similar to qman_enqueue except that it will invoke a user supplied
23055 + * callback function just before writng the commit verb. This is useful
23056 + * when the user want to do something *just before* enqueuing the request and
23057 + * the enqueue can't fail.
23058 + */
23059 +int qman_enqueue_precommit(struct qman_fq *fq, const struct qm_fd *fd,
23060 + u32 flags, qman_cb_precommit cb, void *cb_arg);
23061 +
23062 +/**
23063 + * qman_enqueue_orp - Enqueue a frame to a frame queue using an ORP
23064 + * @fq: the frame queue object to enqueue to
23065 + * @fd: a descriptor of the frame to be enqueued
23066 + * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
23067 + * @orp: the frame queue object used as an order restoration point.
23068 + * @orp_seqnum: the sequence number of this frame in the order restoration path
23069 + *
23070 + * Similar to qman_enqueue(), but with the addition of an Order Restoration
23071 + * Point (@orp) and corresponding sequence number (@orp_seqnum) for this
23072 + * enqueue operation to employ order restoration. Each frame queue object acts
23073 + * as an Order Definition Point (ODP) by providing each frame dequeued from it
23074 + * with an incrementing sequence number, this value is generally ignored unless
23075 + * that sequence of dequeued frames will need order restoration later. Each
23076 + * frame queue object also encapsulates an Order Restoration Point (ORP), which
23077 + * is a re-assembly context for re-ordering frames relative to their sequence
23078 + * numbers as they are enqueued. The ORP does not have to be within the frame
23079 + * queue that receives the enqueued frame, in fact it is usually the frame
23080 + * queue from which the frames were originally dequeued. For the purposes of
23081 + * order restoration, multiple frames (or "fragments") can be enqueued for a
23082 + * single sequence number by setting the QMAN_ENQUEUE_FLAG_NLIS flag for all
23083 + * enqueues except the final fragment of a given sequence number. Ordering
23084 + * between sequence numbers is guaranteed, even if fragments of different
23085 + * sequence numbers are interlaced with one another. Fragments of the same
23086 + * sequence number will retain the order in which they are enqueued. If no
23087 + * enqueue is to performed, QMAN_ENQUEUE_FLAG_HOLE indicates that the given
23088 + * sequence number is to be "skipped" by the ORP logic (eg. if a frame has been
23089 + * dropped from a sequence), or QMAN_ENQUEUE_FLAG_NESN indicates that the given
23090 + * sequence number should become the ORP's "Next Expected Sequence Number".
23091 + *
23092 + * Side note: a frame queue object can be used purely as an ORP, without
23093 + * carrying any frames at all. Care should be taken not to deallocate a frame
23094 + * queue object that is being actively used as an ORP, as a future allocation
23095 + * of the frame queue object may start using the internal ORP before the
23096 + * previous use has finished.
23097 + */
23098 +int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
23099 + struct qman_fq *orp, u16 orp_seqnum);
23100 +
23101 +/**
23102 + * qman_alloc_fqid_range - Allocate a contiguous range of FQIDs
23103 + * @result: is set by the API to the base FQID of the allocated range
23104 + * @count: the number of FQIDs required
23105 + * @align: required alignment of the allocated range
23106 + * @partial: non-zero if the API can return fewer than @count FQIDs
23107 + *
23108 + * Returns the number of frame queues allocated, or a negative error code. If
23109 + * @partial is non zero, the allocation request may return a smaller range of
23110 + * FQs than requested (though alignment will be as requested). If @partial is
23111 + * zero, the return value will either be 'count' or negative.
23112 + */
23113 +int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial);
23114 +static inline int qman_alloc_fqid(u32 *result)
23115 +{
23116 + int ret = qman_alloc_fqid_range(result, 1, 0, 0);
23117 + return (ret > 0) ? 0 : ret;
23118 +}
23119 +
23120 +/**
23121 + * qman_release_fqid_range - Release the specified range of frame queue IDs
23122 + * @fqid: the base FQID of the range to deallocate
23123 + * @count: the number of FQIDs in the range
23124 + *
23125 + * This function can also be used to seed the allocator with ranges of FQIDs
23126 + * that it can subsequently allocate from.
23127 + */
23128 +void qman_release_fqid_range(u32 fqid, unsigned int count);
23129 +static inline void qman_release_fqid(u32 fqid)
23130 +{
23131 + qman_release_fqid_range(fqid, 1);
23132 +}
23133 +
23134 +void qman_seed_fqid_range(u32 fqid, unsigned int count);
23135 +
23136 +
23137 +int qman_shutdown_fq(u32 fqid);
23138 +
23139 +/**
23140 + * qman_reserve_fqid_range - Reserve the specified range of frame queue IDs
23141 + * @fqid: the base FQID of the range to deallocate
23142 + * @count: the number of FQIDs in the range
23143 + */
23144 +int qman_reserve_fqid_range(u32 fqid, unsigned int count);
23145 +static inline int qman_reserve_fqid(u32 fqid)
23146 +{
23147 + return qman_reserve_fqid_range(fqid, 1);
23148 +}
23149 +
23150 + /* Pool-channel management */
23151 + /* ----------------------- */
23152 +/**
23153 + * qman_alloc_pool_range - Allocate a contiguous range of pool-channel IDs
23154 + * @result: is set by the API to the base pool-channel ID of the allocated range
23155 + * @count: the number of pool-channel IDs required
23156 + * @align: required alignment of the allocated range
23157 + * @partial: non-zero if the API can return fewer than @count
23158 + *
23159 + * Returns the number of pool-channel IDs allocated, or a negative error code.
23160 + * If @partial is non zero, the allocation request may return a smaller range of
23161 + * than requested (though alignment will be as requested). If @partial is zero,
23162 + * the return value will either be 'count' or negative.
23163 + */
23164 +int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial);
23165 +static inline int qman_alloc_pool(u32 *result)
23166 +{
23167 + int ret = qman_alloc_pool_range(result, 1, 0, 0);
23168 + return (ret > 0) ? 0 : ret;
23169 +}
23170 +
23171 +/**
23172 + * qman_release_pool_range - Release the specified range of pool-channel IDs
23173 + * @id: the base pool-channel ID of the range to deallocate
23174 + * @count: the number of pool-channel IDs in the range
23175 + */
23176 +void qman_release_pool_range(u32 id, unsigned int count);
23177 +static inline void qman_release_pool(u32 id)
23178 +{
23179 + qman_release_pool_range(id, 1);
23180 +}
23181 +
23182 +/**
23183 + * qman_reserve_pool_range - Reserve the specified range of pool-channel IDs
23184 + * @id: the base pool-channel ID of the range to reserve
23185 + * @count: the number of pool-channel IDs in the range
23186 + */
23187 +int qman_reserve_pool_range(u32 id, unsigned int count);
23188 +static inline int qman_reserve_pool(u32 id)
23189 +{
23190 + return qman_reserve_pool_range(id, 1);
23191 +}
23192 +
23193 +void qman_seed_pool_range(u32 id, unsigned int count);
23194 +
23195 + /* CGR management */
23196 + /* -------------- */
23197 +/**
23198 + * qman_create_cgr - Register a congestion group object
23199 + * @cgr: the 'cgr' object, with fields filled in
23200 + * @flags: QMAN_CGR_FLAG_* values
23201 + * @opts: optional state of CGR settings
23202 + *
23203 + * Registers this object to receiving congestion entry/exit callbacks on the
23204 + * portal affine to the cpu portal on which this API is executed. If opts is
23205 + * NULL then only the callback (cgr->cb) function is registered. If @flags
23206 + * contains QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset
23207 + * any unspecified parameters) will be used rather than a modify hw hardware
23208 + * (which only modifies the specified parameters).
23209 + */
23210 +int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
23211 + struct qm_mcc_initcgr *opts);
23212 +
23213 +/**
23214 + * qman_create_cgr_to_dcp - Register a congestion group object to DCP portal
23215 + * @cgr: the 'cgr' object, with fields filled in
23216 + * @flags: QMAN_CGR_FLAG_* values
23217 + * @dcp_portal: the DCP portal to which the cgr object is registered.
23218 + * @opts: optional state of CGR settings
23219 + *
23220 + */
23221 +int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
23222 + struct qm_mcc_initcgr *opts);
23223 +
23224 +/**
23225 + * qman_delete_cgr - Deregisters a congestion group object
23226 + * @cgr: the 'cgr' object to deregister
23227 + *
23228 + * "Unplugs" this CGR object from the portal affine to the cpu on which this API
23229 + * is executed. This must be excuted on the same affine portal on which it was
23230 + * created.
23231 + */
23232 +int qman_delete_cgr(struct qman_cgr *cgr);
23233 +
23234 +/**
23235 + * qman_delete_cgr_safe - Deregisters a congestion group object from any CPU
23236 + * @cgr: the 'cgr' object to deregister
23237 + *
23238 + * This will select the proper CPU and run there qman_delete_cgr().
23239 + */
23240 +void qman_delete_cgr_safe(struct qman_cgr *cgr);
23241 +
23242 +/**
23243 + * qman_modify_cgr - Modify CGR fields
23244 + * @cgr: the 'cgr' object to modify
23245 + * @flags: QMAN_CGR_FLAG_* values
23246 + * @opts: the CGR-modification settings
23247 + *
23248 + * The @opts parameter comes from the low-level portal API, and can be NULL.
23249 + * Note that some fields and options within @opts may be ignored or overwritten
23250 + * by the driver, in particular the 'cgrid' field is ignored (this operation
23251 + * only affects the given CGR object). If @flags contains
23252 + * QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset any
23253 + * unspecified parameters) will be used rather than a modify hw hardware (which
23254 + * only modifies the specified parameters).
23255 + */
23256 +int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
23257 + struct qm_mcc_initcgr *opts);
23258 +
23259 +/**
23260 +* qman_query_cgr - Queries CGR fields
23261 +* @cgr: the 'cgr' object to query
23262 +* @result: storage for the queried congestion group record
23263 +*/
23264 +int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *result);
23265 +
23266 +/**
23267 + * qman_query_congestion - Queries the state of all congestion groups
23268 + * @congestion: storage for the queried state of all congestion groups
23269 + */
23270 +int qman_query_congestion(struct qm_mcr_querycongestion *congestion);
23271 +
23272 +/**
23273 + * qman_alloc_cgrid_range - Allocate a contiguous range of CGR IDs
23274 + * @result: is set by the API to the base CGR ID of the allocated range
23275 + * @count: the number of CGR IDs required
23276 + * @align: required alignment of the allocated range
23277 + * @partial: non-zero if the API can return fewer than @count
23278 + *
23279 + * Returns the number of CGR IDs allocated, or a negative error code.
23280 + * If @partial is non zero, the allocation request may return a smaller range of
23281 + * than requested (though alignment will be as requested). If @partial is zero,
23282 + * the return value will either be 'count' or negative.
23283 + */
23284 +int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial);
23285 +static inline int qman_alloc_cgrid(u32 *result)
23286 +{
23287 + int ret = qman_alloc_cgrid_range(result, 1, 0, 0);
23288 + return (ret > 0) ? 0 : ret;
23289 +}
23290 +
23291 +/**
23292 + * qman_release_cgrid_range - Release the specified range of CGR IDs
23293 + * @id: the base CGR ID of the range to deallocate
23294 + * @count: the number of CGR IDs in the range
23295 + */
23296 +void qman_release_cgrid_range(u32 id, unsigned int count);
23297 +static inline void qman_release_cgrid(u32 id)
23298 +{
23299 + qman_release_cgrid_range(id, 1);
23300 +}
23301 +
23302 +/**
23303 + * qman_reserve_cgrid_range - Reserve the specified range of CGR ID
23304 + * @id: the base CGR ID of the range to reserve
23305 + * @count: the number of CGR IDs in the range
23306 + */
23307 +int qman_reserve_cgrid_range(u32 id, unsigned int count);
23308 +static inline int qman_reserve_cgrid(u32 id)
23309 +{
23310 + return qman_reserve_cgrid_range(id, 1);
23311 +}
23312 +
23313 +void qman_seed_cgrid_range(u32 id, unsigned int count);
23314 +
23315 +
23316 + /* Helpers */
23317 + /* ------- */
23318 +/**
23319 + * qman_poll_fq_for_init - Check if an FQ has been initialised from OOS
23320 + * @fqid: the FQID that will be initialised by other s/w
23321 + *
23322 + * In many situations, a FQID is provided for communication between s/w
23323 + * entities, and whilst the consumer is responsible for initialising and
23324 + * scheduling the FQ, the producer(s) generally create a wrapper FQ object using
23325 + * and only call qman_enqueue() (no FQ initialisation, scheduling, etc). Ie;
23326 + * qman_create_fq(..., QMAN_FQ_FLAG_NO_MODIFY, ...);
23327 + * However, data can not be enqueued to the FQ until it is initialised out of
23328 + * the OOS state - this function polls for that condition. It is particularly
23329 + * useful for users of IPC functions - each endpoint's Rx FQ is the other
23330 + * endpoint's Tx FQ, so each side can initialise and schedule their Rx FQ object
23331 + * and then use this API on the (NO_MODIFY) Tx FQ object in order to
23332 + * synchronise. The function returns zero for success, +1 if the FQ is still in
23333 + * the OOS state, or negative if there was an error.
23334 + */
23335 +static inline int qman_poll_fq_for_init(struct qman_fq *fq)
23336 +{
23337 + struct qm_mcr_queryfq_np np;
23338 + int err;
23339 + err = qman_query_fq_np(fq, &np);
23340 + if (err)
23341 + return err;
23342 + if ((np.state & QM_MCR_NP_STATE_MASK) == QM_MCR_NP_STATE_OOS)
23343 + return 1;
23344 + return 0;
23345 +}
23346 +
23347 + /* -------------- */
23348 + /* CEETM :: types */
23349 + /* -------------- */
23350 +/**
23351 + * Token Rate Structure
23352 + * Shaping rates are based on a "credit" system and a pre-configured h/w
23353 + * internal timer. The following type represents a shaper "rate" parameter as a
23354 + * fractional number of "tokens". Here's how it works. This (fractional) number
23355 + * of tokens is added to the shaper's "credit" every time the h/w timer elapses
23356 + * (up to a limit which is set by another shaper parameter). Every time a frame
23357 + * is enqueued through a shaper, the shaper deducts as many tokens as there are
23358 + * bytes of data in the enqueued frame. A shaper will not allow itself to
23359 + * enqueue any frames if its token count is negative. As such;
23360 + *
23361 + * The rate at which data is enqueued is limited by the
23362 + * rate at which tokens are added.
23363 + *
23364 + * Therefore if the user knows the period between these h/w timer updates in
23365 + * seconds, they can calculate the maximum traffic rate of the shaper (in
23366 + * bytes-per-second) from the token rate. And vice versa, they can calculate
23367 + * the token rate to use in order to achieve a given traffic rate.
23368 + */
23369 +struct qm_ceetm_rate {
23370 + /* The token rate is; whole + (fraction/8192) */
23371 + u32 whole:11; /* 0..2047 */
23372 + u32 fraction:13; /* 0..8191 */
23373 +};
23374 +
23375 +struct qm_ceetm_weight_code {
23376 + /* The weight code is; 5 msbits + 3 lsbits */
23377 + u8 y:5;
23378 + u8 x:3;
23379 +};
23380 +
23381 +struct qm_ceetm {
23382 + unsigned int idx;
23383 + struct list_head sub_portals;
23384 + struct list_head lnis;
23385 + unsigned int sp_range[2];
23386 + unsigned int lni_range[2];
23387 +};
23388 +
23389 +struct qm_ceetm_sp {
23390 + struct list_head node;
23391 + unsigned int idx;
23392 + unsigned int dcp_idx;
23393 + int is_claimed;
23394 + struct qm_ceetm_lni *lni;
23395 +};
23396 +
23397 +/* Logical Network Interface */
23398 +struct qm_ceetm_lni {
23399 + struct list_head node;
23400 + unsigned int idx;
23401 + unsigned int dcp_idx;
23402 + int is_claimed;
23403 + struct qm_ceetm_sp *sp;
23404 + struct list_head channels;
23405 + int shaper_enable;
23406 + int shaper_couple;
23407 + int oal;
23408 + struct qm_ceetm_rate cr_token_rate;
23409 + struct qm_ceetm_rate er_token_rate;
23410 + u16 cr_token_bucket_limit;
23411 + u16 er_token_bucket_limit;
23412 +};
23413 +
23414 +/* Class Queue Channel */
23415 +struct qm_ceetm_channel {
23416 + struct list_head node;
23417 + unsigned int idx;
23418 + unsigned int lni_idx;
23419 + unsigned int dcp_idx;
23420 + struct list_head class_queues;
23421 + struct list_head ccgs;
23422 + u8 shaper_enable;
23423 + u8 shaper_couple;
23424 + struct qm_ceetm_rate cr_token_rate;
23425 + struct qm_ceetm_rate er_token_rate;
23426 + u16 cr_token_bucket_limit;
23427 + u16 er_token_bucket_limit;
23428 +};
23429 +
23430 +struct qm_ceetm_ccg;
23431 +
23432 +/* This callback type is used when handling congestion entry/exit. The
23433 + * 'cb_ctx' value is the opaque value associated with ccg object.
23434 + * 'congested' is non-zero on congestion-entry, and zero on congestion-exit.
23435 + */
23436 +typedef void (*qman_cb_ccgr)(struct qm_ceetm_ccg *ccg, void *cb_ctx,
23437 + int congested);
23438 +
23439 +/* Class Congestion Group */
23440 +struct qm_ceetm_ccg {
23441 + struct qm_ceetm_channel *parent;
23442 + struct list_head node;
23443 + struct list_head cb_node;
23444 + qman_cb_ccgr cb;
23445 + void *cb_ctx;
23446 + unsigned int idx;
23447 +};
23448 +
23449 +/* Class Queue */
23450 +struct qm_ceetm_cq {
23451 + struct qm_ceetm_channel *parent;
23452 + struct qm_ceetm_ccg *ccg;
23453 + struct list_head node;
23454 + unsigned int idx;
23455 + int is_claimed;
23456 + struct list_head bound_lfqids;
23457 + struct list_head binding_node;
23458 +};
23459 +
23460 +/* Logical Frame Queue */
23461 +struct qm_ceetm_lfq {
23462 + struct qm_ceetm_channel *parent;
23463 + struct list_head node;
23464 + unsigned int idx;
23465 + unsigned int dctidx;
23466 + u64 context_a;
23467 + u32 context_b;
23468 + qman_cb_mr ern;
23469 +};
23470 +
23471 +/**
23472 + * qman_ceetm_bps2tokenrate - Given a desired rate 'bps' measured in bps
23473 + * (ie. bits-per-second), compute the 'token_rate' fraction that best
23474 + * approximates that rate.
23475 + * @bps: the desired shaper rate in bps.
23476 + * @token_rate: the output token rate computed with the given kbps.
23477 + * @rounding: dictates how to round if an exact conversion is not possible; if
23478 + * it is negative then 'token_rate' will round down to the highest value that
23479 + * does not exceed the desired rate, if it is positive then 'token_rate' will
23480 + * round up to the lowest value that is greater than or equal to the desired
23481 + * rate, and if it is zero then it will round to the nearest approximation,
23482 + * whether that be up or down.
23483 + *
23484 + * Return 0 for success, or -EINVAL if prescaler or qman clock is not available.
23485 + */
23486 +int qman_ceetm_bps2tokenrate(u64 bps,
23487 + struct qm_ceetm_rate *token_rate,
23488 + int rounding);
23489 +
23490 +/**
23491 + * qman_ceetm_tokenrate2bps - Given a 'token_rate', compute the
23492 + * corresponding number of 'bps'.
23493 + * @token_rate: the input desired token_rate fraction.
23494 + * @bps: the output shaper rate in bps computed with the give token rate.
23495 + * @rounding: has the same semantics as the previous function.
23496 + *
23497 + * Return 0 for success, or -EINVAL if prescaler or qman clock is not available.
23498 + */
23499 +int qman_ceetm_tokenrate2bps(const struct qm_ceetm_rate *token_rate,
23500 + u64 *bps,
23501 + int rounding);
23502 +
23503 +int qman_alloc_ceetm0_channel_range(u32 *result, u32 count, u32 align,
23504 + int partial);
23505 +static inline int qman_alloc_ceetm0_channel(u32 *result)
23506 +{
23507 + int ret = qman_alloc_ceetm0_channel_range(result, 1, 0, 0);
23508 + return (ret > 0) ? 0 : ret;
23509 +}
23510 +void qman_release_ceetm0_channel_range(u32 channelid, u32 count);
23511 +static inline void qman_release_ceetm0_channelid(u32 channelid)
23512 +{
23513 + qman_release_ceetm0_channel_range(channelid, 1);
23514 +}
23515 +
23516 +int qman_reserve_ceetm0_channel_range(u32 channelid, u32 count);
23517 +static inline int qman_reserve_ceetm0_channelid(u32 channelid)
23518 +{
23519 + return qman_reserve_ceetm0_channel_range(channelid, 1);
23520 +}
23521 +
23522 +void qman_seed_ceetm0_channel_range(u32 channelid, u32 count);
23523 +
23524 +
23525 +int qman_alloc_ceetm1_channel_range(u32 *result, u32 count, u32 align,
23526 + int partial);
23527 +static inline int qman_alloc_ceetm1_channel(u32 *result)
23528 +{
23529 + int ret = qman_alloc_ceetm1_channel_range(result, 1, 0, 0);
23530 + return (ret > 0) ? 0 : ret;
23531 +}
23532 +void qman_release_ceetm1_channel_range(u32 channelid, u32 count);
23533 +static inline void qman_release_ceetm1_channelid(u32 channelid)
23534 +{
23535 + qman_release_ceetm1_channel_range(channelid, 1);
23536 +}
23537 +int qman_reserve_ceetm1_channel_range(u32 channelid, u32 count);
23538 +static inline int qman_reserve_ceetm1_channelid(u32 channelid)
23539 +{
23540 + return qman_reserve_ceetm1_channel_range(channelid, 1);
23541 +}
23542 +
23543 +void qman_seed_ceetm1_channel_range(u32 channelid, u32 count);
23544 +
23545 +
23546 +int qman_alloc_ceetm0_lfqid_range(u32 *result, u32 count, u32 align,
23547 + int partial);
23548 +static inline int qman_alloc_ceetm0_lfqid(u32 *result)
23549 +{
23550 + int ret = qman_alloc_ceetm0_lfqid_range(result, 1, 0, 0);
23551 + return (ret > 0) ? 0 : ret;
23552 +}
23553 +void qman_release_ceetm0_lfqid_range(u32 lfqid, u32 count);
23554 +static inline void qman_release_ceetm0_lfqid(u32 lfqid)
23555 +{
23556 + qman_release_ceetm0_lfqid_range(lfqid, 1);
23557 +}
23558 +int qman_reserve_ceetm0_lfqid_range(u32 lfqid, u32 count);
23559 +static inline int qman_reserve_ceetm0_lfqid(u32 lfqid)
23560 +{
23561 + return qman_reserve_ceetm0_lfqid_range(lfqid, 1);
23562 +}
23563 +
23564 +void qman_seed_ceetm0_lfqid_range(u32 lfqid, u32 count);
23565 +
23566 +
23567 +int qman_alloc_ceetm1_lfqid_range(u32 *result, u32 count, u32 align,
23568 + int partial);
23569 +static inline int qman_alloc_ceetm1_lfqid(u32 *result)
23570 +{
23571 + int ret = qman_alloc_ceetm1_lfqid_range(result, 1, 0, 0);
23572 + return (ret > 0) ? 0 : ret;
23573 +}
23574 +void qman_release_ceetm1_lfqid_range(u32 lfqid, u32 count);
23575 +static inline void qman_release_ceetm1_lfqid(u32 lfqid)
23576 +{
23577 + qman_release_ceetm1_lfqid_range(lfqid, 1);
23578 +}
23579 +int qman_reserve_ceetm1_lfqid_range(u32 lfqid, u32 count);
23580 +static inline int qman_reserve_ceetm1_lfqid(u32 lfqid)
23581 +{
23582 + return qman_reserve_ceetm1_lfqid_range(lfqid, 1);
23583 +}
23584 +
23585 +void qman_seed_ceetm1_lfqid_range(u32 lfqid, u32 count);
23586 +
23587 +
23588 + /* ----------------------------- */
23589 + /* CEETM :: sub-portals */
23590 + /* ----------------------------- */
23591 +
23592 +/**
23593 + * qman_ceetm_sp_claim - Claims the given sub-portal, provided it is available
23594 + * to us and configured for traffic-management.
23595 + * @sp: the returned sub-portal object, if successful.
23596 + * @dcp_id: specifies the desired Fman block (and thus the relevant CEETM
23597 + * instance),
23598 + * @sp_idx" is the desired sub-portal index from 0 to 15.
23599 + *
23600 + * Returns zero for success, or -ENODEV if the sub-portal is in use, or -EINVAL
23601 + * if the sp_idx is out of range.
23602 + *
23603 + * Note that if there are multiple driver domains (eg. a linux kernel versus
23604 + * user-space drivers in USDPAA, or multiple guests running under a hypervisor)
23605 + * then a sub-portal may be accessible by more than one instance of a qman
23606 + * driver and so it may be claimed multiple times. If this is the case, it is
23607 + * up to the system architect to prevent conflicting configuration actions
23608 + * coming from the different driver domains. The qman drivers do not have any
23609 + * behind-the-scenes coordination to prevent this from happening.
23610 + */
23611 +int qman_ceetm_sp_claim(struct qm_ceetm_sp **sp,
23612 + enum qm_dc_portal dcp_idx,
23613 + unsigned int sp_idx);
23614 +
23615 +/**
23616 + * qman_ceetm_sp_release - Releases a previously claimed sub-portal.
23617 + * @sp: the sub-portal to be released.
23618 + *
23619 + * Returns 0 for success, or -EBUSY for failure if the dependencies are not
23620 + * released.
23621 + */
23622 +int qman_ceetm_sp_release(struct qm_ceetm_sp *sp);
23623 +
23624 + /* ----------------------------------- */
23625 + /* CEETM :: logical network interfaces */
23626 + /* ----------------------------------- */
23627 +
23628 +/**
23629 + * qman_ceetm_lni_claim - Claims an unclaimed LNI.
23630 + * @lni: the returned LNI object, if successful.
23631 + * @dcp_id: specifies the desired Fman block (and thus the relevant CEETM
23632 + * instance)
23633 + * @lni_idx: is the desired LNI index.
23634 + *
23635 + * Returns zero for success, or -EINVAL on failure, which will happen if the LNI
23636 + * is not available or has already been claimed (and not yet successfully
23637 + * released), or lni_dix is out of range.
23638 + *
23639 + * Note that there may be multiple driver domains (or instances) that need to
23640 + * transmit out the same LNI, so this claim is only guaranteeing exclusivity
23641 + * within the domain of the driver being called. See qman_ceetm_sp_claim() and
23642 + * qman_ceetm_sp_get_lni() for more information.
23643 + */
23644 +int qman_ceetm_lni_claim(struct qm_ceetm_lni **lni,
23645 + enum qm_dc_portal dcp_id,
23646 + unsigned int lni_idx);
23647 +
23648 +/**
23649 + * qman_ceetm_lni_releaes - Releases a previously claimed LNI.
23650 + * @lni: the lni needs to be released.
23651 + *
23652 + * This will only succeed if all dependent objects have been released.
23653 + * Returns zero for success, or -EBUSY if the dependencies are not released.
23654 + */
23655 +int qman_ceetm_lni_release(struct qm_ceetm_lni *lni);
23656 +
23657 +/**
23658 + * qman_ceetm_sp_set_lni
23659 + * qman_ceetm_sp_get_lni - Set/get the LNI that the sub-portal is currently
23660 + * mapped to.
23661 + * @sp: the given sub-portal.
23662 + * @lni(in "set"function): the LNI object which the sp will be mappaed to.
23663 + * @lni_idx(in "get" function): the LNI index which the sp is mapped to.
23664 + *
23665 + * Returns zero for success, or -EINVAL for the "set" function when this sp-lni
23666 + * mapping has been set, or configure mapping command returns error, and
23667 + * -EINVAL for "get" function when this sp-lni mapping is not set or the query
23668 + * mapping command returns error.
23669 + *
23670 + * This may be useful in situations where multiple driver domains have access
23671 + * to the same sub-portals in order to all be able to transmit out the same
23672 + * physical interface (perhaps they're on different IP addresses or VPNs, so
23673 + * Fman is splitting Rx traffic and here we need to converge Tx traffic). In
23674 + * that case, a control-plane is likely to use qman_ceetm_lni_claim() followed
23675 + * by qman_ceetm_sp_set_lni() to configure the sub-portal, and other domains
23676 + * are likely to use qman_ceetm_sp_get_lni() followed by qman_ceetm_lni_claim()
23677 + * in order to determine the LNI that the control-plane had assigned. This is
23678 + * why the "get" returns an index, whereas the "set" takes an (already claimed)
23679 + * LNI object.
23680 + */
23681 +int qman_ceetm_sp_set_lni(struct qm_ceetm_sp *sp,
23682 + struct qm_ceetm_lni *lni);
23683 +int qman_ceetm_sp_get_lni(struct qm_ceetm_sp *sp,
23684 + unsigned int *lni_idx);
23685 +
23686 +/**
23687 + * qman_ceetm_lni_enable_shaper
23688 + * qman_ceetm_lni_disable_shaper - Enables/disables shaping on the LNI.
23689 + * @lni: the given LNI.
23690 + * @coupled: indicates whether CR and ER are coupled.
23691 + * @oal: the overhead accounting length which is added to the actual length of
23692 + * each frame when performing shaper calculations.
23693 + *
23694 + * When the number of (unused) committed-rate tokens reach the committed-rate
23695 + * token limit, 'coupled' indicates whether surplus tokens should be added to
23696 + * the excess-rate token count (up to the excess-rate token limit).
23697 + * When LNI is claimed, the shaper is disabled by default. The enable function
23698 + * will turn on this shaper for this lni.
23699 + * Whenever a claimed LNI is first enabled for shaping, its committed and
23700 + * excess token rates and limits are zero, so will need to be changed to do
23701 + * anything useful. The shaper can subsequently be enabled/disabled without
23702 + * resetting the shaping parameters, but the shaping parameters will be reset
23703 + * when the LNI is released.
23704 + *
23705 + * Returns zero for success, or errno for "enable" function in the cases as:
23706 + * a) -EINVAL if the shaper is already enabled,
23707 + * b) -EIO if the configure shaper command returns error.
23708 + * For "disable" function, returns:
23709 + * a) -EINVAL if the shaper is has already disabled.
23710 + * b) -EIO if calling configure shaper command returns error.
23711 + */
23712 +int qman_ceetm_lni_enable_shaper(struct qm_ceetm_lni *lni, int coupled,
23713 + int oal);
23714 +int qman_ceetm_lni_disable_shaper(struct qm_ceetm_lni *lni);
23715 +
23716 +/**
23717 + * qman_ceetm_lni_is_shaper_enabled - Check LNI shaper status
23718 + * @lni: the give LNI
23719 + */
23720 +int qman_ceetm_lni_is_shaper_enabled(struct qm_ceetm_lni *lni);
23721 +
23722 +/**
23723 + * qman_ceetm_lni_set_commit_rate
23724 + * qman_ceetm_lni_get_commit_rate
23725 + * qman_ceetm_lni_set_excess_rate
23726 + * qman_ceetm_lni_get_excess_rate - Set/get the shaper CR/ER token rate and
23727 + * token limit for the given LNI.
23728 + * @lni: the given LNI.
23729 + * @token_rate: the desired token rate for "set" fuction, or the token rate of
23730 + * the LNI queried by "get" function.
23731 + * @token_limit: the desired token bucket limit for "set" function, or the token
23732 + * limit of the given LNI queried by "get" function.
23733 + *
23734 + * Returns zero for success. The "set" function returns -EINVAL if the given
23735 + * LNI is unshapped or -EIO if the configure shaper command returns error.
23736 + * The "get" function returns -EINVAL if the token rate or the token limit is
23737 + * not set or the query command returns error.
23738 + */
23739 +int qman_ceetm_lni_set_commit_rate(struct qm_ceetm_lni *lni,
23740 + const struct qm_ceetm_rate *token_rate,
23741 + u16 token_limit);
23742 +int qman_ceetm_lni_get_commit_rate(struct qm_ceetm_lni *lni,
23743 + struct qm_ceetm_rate *token_rate,
23744 + u16 *token_limit);
23745 +int qman_ceetm_lni_set_excess_rate(struct qm_ceetm_lni *lni,
23746 + const struct qm_ceetm_rate *token_rate,
23747 + u16 token_limit);
23748 +int qman_ceetm_lni_get_excess_rate(struct qm_ceetm_lni *lni,
23749 + struct qm_ceetm_rate *token_rate,
23750 + u16 *token_limit);
23751 +/**
23752 + * qman_ceetm_lni_set_commit_rate_bps
23753 + * qman_ceetm_lni_get_commit_rate_bps
23754 + * qman_ceetm_lni_set_excess_rate_bps
23755 + * qman_ceetm_lni_get_excess_rate_bps - Set/get the shaper CR/ER rate
23756 + * and token limit for the given LNI.
23757 + * @lni: the given LNI.
23758 + * @bps: the desired shaping rate in bps for "set" fuction, or the shaping rate
23759 + * of the LNI queried by "get" function.
23760 + * @token_limit: the desired token bucket limit for "set" function, or the token
23761 + * limit of the given LNI queried by "get" function.
23762 + *
23763 + * Returns zero for success. The "set" function returns -EINVAL if the given
23764 + * LNI is unshapped or -EIO if the configure shaper command returns error.
23765 + * The "get" function returns -EINVAL if the token rate or the token limit is
23766 + * not set or the query command returns error.
23767 + */
23768 +int qman_ceetm_lni_set_commit_rate_bps(struct qm_ceetm_lni *lni,
23769 + u64 bps,
23770 + u16 token_limit);
23771 +int qman_ceetm_lni_get_commit_rate_bps(struct qm_ceetm_lni *lni,
23772 + u64 *bps, u16 *token_limit);
23773 +int qman_ceetm_lni_set_excess_rate_bps(struct qm_ceetm_lni *lni,
23774 + u64 bps,
23775 + u16 token_limit);
23776 +int qman_ceetm_lni_get_excess_rate_bps(struct qm_ceetm_lni *lni,
23777 + u64 *bps, u16 *token_limit);
23778 +
23779 +/**
23780 + * qman_ceetm_lni_set_tcfcc
23781 + * qman_ceetm_lni_get_tcfcc - Configure/query "Traffic Class Flow Control".
23782 + * @lni: the given LNI.
23783 + * @cq_level: is between 0 and 15, representing individual class queue levels
23784 + * (CQ0 to CQ7 for every channel) and grouped class queue levels (CQ8 to CQ15
23785 + * for every channel).
23786 + * @traffic_class: is between 0 and 7 when associating a given class queue level
23787 + * to a traffic class, or -1 when disabling traffic class flow control for this
23788 + * class queue level.
23789 + *
23790 + * Return zero for success, or -EINVAL if the cq_level or traffic_class is out
23791 + * of range as indicated above, or -EIO if the configure/query tcfcc command
23792 + * returns error.
23793 + *
23794 + * Refer to the section of QMan CEETM traffic class flow control in the
23795 + * Reference Manual.
23796 + */
23797 +int qman_ceetm_lni_set_tcfcc(struct qm_ceetm_lni *lni,
23798 + unsigned int cq_level,
23799 + int traffic_class);
23800 +int qman_ceetm_lni_get_tcfcc(struct qm_ceetm_lni *lni,
23801 + unsigned int cq_level,
23802 + int *traffic_class);
23803 +
23804 + /* ----------------------------- */
23805 + /* CEETM :: class queue channels */
23806 + /* ----------------------------- */
23807 +
23808 +/**
23809 + * qman_ceetm_channel_claim - Claims an unclaimed CQ channel that is mapped to
23810 + * the given LNI.
23811 + * @channel: the returned class queue channel object, if successful.
23812 + * @lni: the LNI that the channel belongs to.
23813 + *
23814 + * Channels are always initially "unshaped".
23815 + *
23816 + * Return zero for success, or -ENODEV if there is no channel available(all 32
23817 + * channels are claimed) or -EINVAL if the channel mapping command returns
23818 + * error.
23819 + */
23820 +int qman_ceetm_channel_claim(struct qm_ceetm_channel **channel,
23821 + struct qm_ceetm_lni *lni);
23822 +
23823 +/**
23824 + * qman_ceetm_channel_release - Releases a previously claimed CQ channel.
23825 + * @channel: the channel needs to be released.
23826 + *
23827 + * Returns zero for success, or -EBUSY if the dependencies are still in use.
23828 + *
23829 + * Note any shaping of the channel will be cleared to leave it in an unshaped
23830 + * state.
23831 + */
23832 +int qman_ceetm_channel_release(struct qm_ceetm_channel *channel);
23833 +
23834 +/**
23835 + * qman_ceetm_channel_enable_shaper
23836 + * qman_ceetm_channel_disable_shaper - Enables/disables shaping on the channel.
23837 + * @channel: the given channel.
23838 + * @coupled: indicates whether surplus CR tokens should be added to the
23839 + * excess-rate token count (up to the excess-rate token limit) when the number
23840 + * of (unused) committed-rate tokens reach the committed_rate token limit.
23841 + *
23842 + * Whenever a claimed channel is first enabled for shaping, its committed and
23843 + * excess token rates and limits are zero, so will need to be changed to do
23844 + * anything useful. The shaper can subsequently be enabled/disabled without
23845 + * resetting the shaping parameters, but the shaping parameters will be reset
23846 + * when the channel is released.
23847 + *
23848 + * Return 0 for success, or -EINVAL for failure, in the case that the channel
23849 + * shaper has been enabled/disabled or the management command returns error.
23850 + */
23851 +int qman_ceetm_channel_enable_shaper(struct qm_ceetm_channel *channel,
23852 + int coupled);
23853 +int qman_ceetm_channel_disable_shaper(struct qm_ceetm_channel *channel);
23854 +
23855 +/**
23856 + * qman_ceetm_channel_is_shaper_enabled - Check channel shaper status.
23857 + * @channel: the give channel.
23858 + */
23859 +int qman_ceetm_channel_is_shaper_enabled(struct qm_ceetm_channel *channel);
23860 +
23861 +/**
23862 + * qman_ceetm_channel_set_commit_rate
23863 + * qman_ceetm_channel_get_commit_rate
23864 + * qman_ceetm_channel_set_excess_rate
23865 + * qman_ceetm_channel_get_excess_rate - Set/get channel CR/ER shaper parameters.
23866 + * @channel: the given channel.
23867 + * @token_rate: the desired token rate for "set" function, or the queried token
23868 + * rate for "get" function.
23869 + * @token_limit: the desired token limit for "set" function, or the queried
23870 + * token limit for "get" function.
23871 + *
23872 + * Return zero for success. The "set" function returns -EINVAL if the channel
23873 + * is unshaped, or -EIO if the configure shapper command returns error. The
23874 + * "get" function returns -EINVAL if token rate of token limit is not set, or
23875 + * the query shaper command returns error.
23876 + */
23877 +int qman_ceetm_channel_set_commit_rate(struct qm_ceetm_channel *channel,
23878 + const struct qm_ceetm_rate *token_rate,
23879 + u16 token_limit);
23880 +int qman_ceetm_channel_get_commit_rate(struct qm_ceetm_channel *channel,
23881 + struct qm_ceetm_rate *token_rate,
23882 + u16 *token_limit);
23883 +int qman_ceetm_channel_set_excess_rate(struct qm_ceetm_channel *channel,
23884 + const struct qm_ceetm_rate *token_rate,
23885 + u16 token_limit);
23886 +int qman_ceetm_channel_get_excess_rate(struct qm_ceetm_channel *channel,
23887 + struct qm_ceetm_rate *token_rate,
23888 + u16 *token_limit);
23889 +/**
23890 + * qman_ceetm_channel_set_commit_rate_bps
23891 + * qman_ceetm_channel_get_commit_rate_bps
23892 + * qman_ceetm_channel_set_excess_rate_bps
23893 + * qman_ceetm_channel_get_excess_rate_bps - Set/get channel CR/ER shaper
23894 + * parameters.
23895 + * @channel: the given channel.
23896 + * @token_rate: the desired shaper rate in bps for "set" function, or the
23897 + * shaper rate in bps for "get" function.
23898 + * @token_limit: the desired token limit for "set" function, or the queried
23899 + * token limit for "get" function.
23900 + *
23901 + * Return zero for success. The "set" function returns -EINVAL if the channel
23902 + * is unshaped, or -EIO if the configure shapper command returns error. The
23903 + * "get" function returns -EINVAL if token rate of token limit is not set, or
23904 + * the query shaper command returns error.
23905 + */
23906 +int qman_ceetm_channel_set_commit_rate_bps(struct qm_ceetm_channel *channel,
23907 + u64 bps, u16 token_limit);
23908 +int qman_ceetm_channel_get_commit_rate_bps(struct qm_ceetm_channel *channel,
23909 + u64 *bps, u16 *token_limit);
23910 +int qman_ceetm_channel_set_excess_rate_bps(struct qm_ceetm_channel *channel,
23911 + u64 bps, u16 token_limit);
23912 +int qman_ceetm_channel_get_excess_rate_bps(struct qm_ceetm_channel *channel,
23913 + u64 *bps, u16 *token_limit);
23914 +
23915 +/**
23916 + * qman_ceetm_channel_set_weight
23917 + * qman_ceetm_channel_get_weight - Set/get the weight for unshaped channel
23918 + * @channel: the given channel.
23919 + * @token_limit: the desired token limit as the weight of the unshaped channel
23920 + * for "set" function, or the queried token limit for "get" function.
23921 + *
23922 + * The algorithm of unshaped fair queuing (uFQ) is used for unshaped channel.
23923 + * It allows the unshaped channels to be included in the CR time eligible list,
23924 + * and thus use the configured CR token limit value as their fair queuing
23925 + * weight.
23926 + *
23927 + * Return zero for success, or -EINVAL if the channel is a shaped channel or
23928 + * the management command returns error.
23929 + */
23930 +int qman_ceetm_channel_set_weight(struct qm_ceetm_channel *channel,
23931 + u16 token_limit);
23932 +int qman_ceetm_channel_get_weight(struct qm_ceetm_channel *channel,
23933 + u16 *token_limit);
23934 +
23935 +/**
23936 + * qman_ceetm_channel_set_group
23937 + * qman_ceetm_channel_get_group - Set/get the grouping of the class scheduler.
23938 + * @channel: the given channel.
23939 + * @group_b: indicates whether there is group B in this channel.
23940 + * @prio_a: the priority of group A.
23941 + * @prio_b: the priority of group B.
23942 + *
23943 + * There are 8 individual class queues (CQ0-CQ7), and 8 grouped class queues
23944 + * (CQ8-CQ15). If 'group_b' is zero, then all the grouped class queues are in
23945 + * group A, otherwise they are split into group A (CQ8-11) and group B
23946 + * (CQ12-C15). The individual class queues and the group(s) are in strict
23947 + * priority order relative to each other. Within the group(s), the scheduling
23948 + * is not strict priority order, but the result of scheduling within a group
23949 + * is in strict priority order relative to the other class queues in the
23950 + * channel. 'prio_a' and 'prio_b' control the priority order of the groups
23951 + * relative to the individual class queues, and take values from 0-7. Eg. if
23952 + * 'group_b' is non-zero, 'prio_a' is 2 and 'prio_b' is 6, then the strict
23953 + * priority order would be;
23954 + * CQ0, CQ1, CQ2, GROUPA, CQ3, CQ4, CQ5, CQ6, GROUPB, CQ7
23955 + *
23956 + * Return 0 for success. For "set" function, returns -EINVAL if prio_a or
23957 + * prio_b are out of the range 0 - 7 (priority of group A or group B can not
23958 + * be 0, CQ0 is always the highest class queue in this channel.), or -EIO if
23959 + * the configure scheduler command returns error. For "get" function, return
23960 + * -EINVAL if the query scheduler command returns error.
23961 + */
23962 +int qman_ceetm_channel_set_group(struct qm_ceetm_channel *channel,
23963 + int group_b,
23964 + unsigned int prio_a,
23965 + unsigned int prio_b);
23966 +int qman_ceetm_channel_get_group(struct qm_ceetm_channel *channel,
23967 + int *group_b,
23968 + unsigned int *prio_a,
23969 + unsigned int *prio_b);
23970 +
23971 +/**
23972 + * qman_ceetm_channel_set_group_cr_eligibility
23973 + * qman_ceetm_channel_set_group_er_eligibility - Set channel group eligibility
23974 + * @channel: the given channel object
23975 + * @group_b: indicates whether there is group B in this channel.
23976 + * @cre: the commit rate eligibility, 1 for enable, 0 for disable.
23977 + *
23978 + * Return zero for success, or -EINVAL if eligibility setting fails.
23979 +*/
23980 +int qman_ceetm_channel_set_group_cr_eligibility(struct qm_ceetm_channel
23981 + *channel, int group_b, int cre);
23982 +int qman_ceetm_channel_set_group_er_eligibility(struct qm_ceetm_channel
23983 + *channel, int group_b, int ere);
23984 +
23985 +/**
23986 + * qman_ceetm_channel_set_cq_cr_eligibility
23987 + * qman_ceetm_channel_set_cq_er_eligibility - Set channel cq eligibility
23988 + * @channel: the given channel object
23989 + * @idx: is from 0 to 7 (representing CQ0 to CQ7).
23990 + * @cre: the commit rate eligibility, 1 for enable, 0 for disable.
23991 + *
23992 + * Return zero for success, or -EINVAL if eligibility setting fails.
23993 +*/
23994 +int qman_ceetm_channel_set_cq_cr_eligibility(struct qm_ceetm_channel *channel,
23995 + unsigned int idx, int cre);
23996 +int qman_ceetm_channel_set_cq_er_eligibility(struct qm_ceetm_channel *channel,
23997 + unsigned int idx, int ere);
23998 +
23999 + /* --------------------- */
24000 + /* CEETM :: class queues */
24001 + /* --------------------- */
24002 +
24003 +/**
24004 + * qman_ceetm_cq_claim - Claims an individual class queue.
24005 + * @cq: the returned class queue object, if successful.
24006 + * @channel: the class queue channel.
24007 + * @idx: is from 0 to 7 (representing CQ0 to CQ7).
24008 + * @ccg: represents the class congestion group that this class queue should be
24009 + * subscribed to, or NULL if no congestion group membership is desired.
24010 + *
24011 + * Returns zero for success, or -EINVAL if @idx is out of range 0 - 7 or
24012 + * if this class queue has been claimed, or configure class queue command
24013 + * returns error, or returns -ENOMEM if allocating CQ memory fails.
24014 + */
24015 +int qman_ceetm_cq_claim(struct qm_ceetm_cq **cq,
24016 + struct qm_ceetm_channel *channel,
24017 + unsigned int idx,
24018 + struct qm_ceetm_ccg *ccg);
24019 +
24020 +/**
24021 + * qman_ceetm_cq_claim_A - Claims a class queue group A.
24022 + * @cq: the returned class queue object, if successful.
24023 + * @channel: the class queue channel.
24024 + * @idx: is from 8 to 15 if only group A exits, otherwise, it is from 8 to 11.
24025 + * @ccg: represents the class congestion group that this class queue should be
24026 + * subscribed to, or NULL if no congestion group membership is desired.
24027 + *
24028 + * Return zero for success, or -EINVAL if @idx is out the range or if
24029 + * this class queue has been claimed or configure class queue command returns
24030 + * error, or returns -ENOMEM if allocating CQ memory fails.
24031 + */
24032 +int qman_ceetm_cq_claim_A(struct qm_ceetm_cq **cq,
24033 + struct qm_ceetm_channel *channel,
24034 + unsigned int idx,
24035 + struct qm_ceetm_ccg *ccg);
24036 +
24037 +/**
24038 + * qman_ceetm_cq_claim_B - Claims a class queue group B.
24039 + * @cq: the returned class queue object, if successful.
24040 + * @channel: the class queue channel.
24041 + * @idx: is from 0 to 3 (CQ12 to CQ15).
24042 + * @ccg: represents the class congestion group that this class queue should be
24043 + * subscribed to, or NULL if no congestion group membership is desired.
24044 + *
24045 + * Return zero for success, or -EINVAL if @idx is out the range or if
24046 + * this class queue has been claimed or configure class queue command returns
24047 + * error, or returns -ENOMEM if allocating CQ memory fails.
24048 + */
24049 +int qman_ceetm_cq_claim_B(struct qm_ceetm_cq **cq,
24050 + struct qm_ceetm_channel *channel,
24051 + unsigned int idx,
24052 + struct qm_ceetm_ccg *ccg);
24053 +
24054 +/**
24055 + * qman_ceetm_cq_release - Releases a previously claimed class queue.
24056 + * @cq: The class queue to be released.
24057 + *
24058 + * Return zero for success, or -EBUSY if the dependent objects (eg. logical
24059 + * FQIDs) have not been released.
24060 + */
24061 +int qman_ceetm_cq_release(struct qm_ceetm_cq *cq);
24062 +
24063 +/**
24064 + * qman_ceetm_set_queue_weight
24065 + * qman_ceetm_get_queue_weight - Configure/query the weight of a grouped class
24066 + * queue.
24067 + * @cq: the given class queue.
24068 + * @weight_code: the desired weight code to set for the given class queue for
24069 + * "set" function or the queired weight code for "get" function.
24070 + *
24071 + * Grouped class queues have a default weight code of zero, which corresponds to
24072 + * a scheduler weighting of 1. This function can be used to modify a grouped
24073 + * class queue to another weight, (Use the helpers qman_ceetm_wbfs2ratio()
24074 + * and qman_ceetm_ratio2wbfs() to convert between these 'weight_code' values
24075 + * and the corresponding sharing weight.)
24076 + *
24077 + * Returns zero for success, or -EIO if the configure weight command returns
24078 + * error for "set" function, or -EINVAL if the query command returns
24079 + * error for "get" function.
24080 + * See section "CEETM Weighted Scheduling among Grouped Classes" in Reference
24081 + * Manual for weight and weight code.
24082 + */
24083 +int qman_ceetm_set_queue_weight(struct qm_ceetm_cq *cq,
24084 + struct qm_ceetm_weight_code *weight_code);
24085 +int qman_ceetm_get_queue_weight(struct qm_ceetm_cq *cq,
24086 + struct qm_ceetm_weight_code *weight_code);
24087 +
24088 +/**
24089 + * qman_ceetm_set_queue_weight_in_ratio
24090 + * qman_ceetm_get_queue_weight_in_ratio - Configure/query the weight of a
24091 + * grouped class queue.
24092 + * @cq: the given class queue.
24093 + * @ratio: the weight in ratio. It should be the real ratio number multiplied
24094 + * by 100 to get rid of fraction.
24095 + *
24096 + * Returns zero for success, or -EIO if the configure weight command returns
24097 + * error for "set" function, or -EINVAL if the query command returns
24098 + * error for "get" function.
24099 + */
24100 +int qman_ceetm_set_queue_weight_in_ratio(struct qm_ceetm_cq *cq, u32 ratio);
24101 +int qman_ceetm_get_queue_weight_in_ratio(struct qm_ceetm_cq *cq, u32 *ratio);
24102 +
24103 +/* Weights are encoded using a pseudo-exponential scheme. The weight codes 0,
24104 + * 32, 64, [...] correspond to weights of 1, 2, 4, [...]. The weights
24105 + * corresponding to intermediate weight codes are calculated using linear
24106 + * interpolation on the inverted values. Or put another way, the inverse weights
24107 + * for each 32nd weight code are 1, 1/2, 1/4, [...], and so the intervals
24108 + * between these are divided linearly into 32 intermediate values, the inverses
24109 + * of which form the remaining weight codes.
24110 + *
24111 + * The Weighted Bandwidth Fair Scheduling (WBFS) algorithm provides a form of
24112 + * scheduling within a group of class queues (group A or B). Weights are used to
24113 + * normalise the class queues to an underlying BFS algorithm where all class
24114 + * queues are assumed to require "equal bandwidth". So the weights referred to
24115 + * by the weight codes act as divisors on the size of frames being enqueued. Ie.
24116 + * one class queue in a group is assigned a weight of 2 whilst the other class
24117 + * queues in the group keep the default weight of 1, then the WBFS scheduler
24118 + * will effectively treat all frames enqueued on the weight-2 class queue as
24119 + * having half the number of bytes they really have. Ie. if all other things are
24120 + * equal, that class queue would get twice as much bytes-per-second bandwidth as
24121 + * the others. So weights should be chosen to provide bandwidth ratios between
24122 + * members of the same class queue group. These weights have no bearing on
24123 + * behaviour outside that group's WBFS mechanism though.
24124 + */
24125 +
24126 +/**
24127 + * qman_ceetm_wbfs2ratio - Given a weight code ('wbfs'), an accurate fractional
24128 + * representation of the corresponding weight is given (in order to not lose
24129 + * any precision).
24130 + * @weight_code: The given weight code in WBFS.
24131 + * @numerator: the numerator part of the weight computed by the weight code.
24132 + * @denominator: the denominator part of the weight computed by the weight code
24133 + *
24134 + * Returns zero for success or -EINVAL if the given weight code is illegal.
24135 + */
24136 +int qman_ceetm_wbfs2ratio(struct qm_ceetm_weight_code *weight_code,
24137 + u32 *numerator,
24138 + u32 *denominator);
24139 +/**
24140 + * qman_ceetm_ratio2wbfs - Given a weight, find the nearest possible weight code
24141 + * If the user needs to know how close this is, convert the resulting weight
24142 + * code back to a weight and compare.
24143 + * @numerator: numerator part of the given weight.
24144 + * @denominator: denominator part of the given weight.
24145 + * @weight_code: the weight code computed from the given weight.
24146 + *
24147 + * Returns zero for success, or -ERANGE if "numerator/denominator" is outside
24148 + * the range of weights.
24149 + */
24150 +int qman_ceetm_ratio2wbfs(u32 numerator,
24151 + u32 denominator,
24152 + struct qm_ceetm_weight_code *weight_code,
24153 + int rounding);
24154 +
24155 +#define QMAN_CEETM_FLAG_CLEAR_STATISTICS_COUNTER 0x1
24156 +/**
24157 + * qman_ceetm_cq_get_dequeue_statistics - Get the statistics provided by CEETM
24158 + * CQ counters.
24159 + * @cq: the given CQ object.
24160 + * @flags: indicates whether the statistics counter will be cleared after query.
24161 + * @frame_count: The number of the frames that have been counted since the
24162 + * counter was cleared last time.
24163 + * @byte_count: the number of bytes in all frames that have been counted.
24164 + *
24165 + * Return zero for success or -EINVAL if query statistics command returns error.
24166 + *
24167 + */
24168 +int qman_ceetm_cq_get_dequeue_statistics(struct qm_ceetm_cq *cq, u32 flags,
24169 + u64 *frame_count, u64 *byte_count);
24170 +
24171 +/**
24172 + * qman_ceetm_drain_cq - drain the CQ till it is empty.
24173 + * @cq: the give CQ object.
24174 + * Return 0 for success or -EINVAL for unsuccessful command to empty CQ.
24175 + */
24176 +int qman_ceetm_drain_cq(struct qm_ceetm_cq *cq);
24177 +
24178 + /* ---------------------- */
24179 + /* CEETM :: logical FQIDs */
24180 + /* ---------------------- */
24181 +/**
24182 + * qman_ceetm_lfq_claim - Claims an unused logical FQID, associates it with
24183 + * the given class queue.
24184 + * @lfq: the returned lfq object, if successful.
24185 + * @cq: the class queue which needs to claim a LFQID.
24186 + *
24187 + * Return zero for success, or -ENODEV if no LFQID is available or -ENOMEM if
24188 + * allocating memory for lfq fails, or -EINVAL if configuring LFQMT fails.
24189 + */
24190 +int qman_ceetm_lfq_claim(struct qm_ceetm_lfq **lfq,
24191 + struct qm_ceetm_cq *cq);
24192 +
24193 +/**
24194 + * qman_ceetm_lfq_release - Releases a previously claimed logical FQID.
24195 + * @lfq: the lfq to be released.
24196 + *
24197 + * Return zero for success.
24198 + */
24199 +int qman_ceetm_lfq_release(struct qm_ceetm_lfq *lfq);
24200 +
24201 +/**
24202 + * qman_ceetm_lfq_set_context
24203 + * qman_ceetm_lfq_get_context - Set/get the context_a/context_b pair to the
24204 + * "dequeue context table" associated with the logical FQID.
24205 + * @lfq: the given logical FQ object.
24206 + * @context_a: contextA of the dequeue context.
24207 + * @context_b: contextB of the dequeue context.
24208 + *
24209 + * Returns zero for success, or -EINVAL if there is error to set/get the
24210 + * context pair.
24211 + */
24212 +int qman_ceetm_lfq_set_context(struct qm_ceetm_lfq *lfq,
24213 + u64 context_a,
24214 + u32 context_b);
24215 +int qman_ceetm_lfq_get_context(struct qm_ceetm_lfq *lfq,
24216 + u64 *context_a,
24217 + u32 *context_b);
24218 +
24219 +/**
24220 + * qman_ceetm_create_fq - Initialise a FQ object for the LFQ.
24221 + * @lfq: the given logic fq.
24222 + * @fq: the fq object created for the given logic fq.
24223 + *
24224 + * The FQ object can be used in qman_enqueue() and qman_enqueue_orp() APIs to
24225 + * target a logical FQID (and the class queue it is associated with).
24226 + * Note that this FQ object can only be used for enqueues, and
24227 + * in the case of qman_enqueue_orp() it can not be used as the 'orp' parameter,
24228 + * only as 'fq'. This FQ object can not (and shouldn't) be destroyed, it is only
24229 + * valid as long as the underlying 'lfq' remains claimed. It is the user's
24230 + * responsibility to ensure that the underlying 'lfq' is not released until any
24231 + * enqueues to this FQ object have completed. The only field the user needs to
24232 + * fill in is fq->cb.ern, as that enqueue rejection handler is the callback that
24233 + * could conceivably be called on this FQ object. This API can be called
24234 + * multiple times to create multiple FQ objects referring to the same logical
24235 + * FQID, and any enqueue rejections will respect the callback of the object that
24236 + * issued the enqueue (and will identify the object via the parameter passed to
24237 + * the callback too). There is no 'flags' parameter to this API as there is for
24238 + * qman_create_fq() - the created FQ object behaves as though qman_create_fq()
24239 + * had been called with the single flag QMAN_FQ_FLAG_NO_MODIFY.
24240 + *
24241 + * Returns 0 for success.
24242 + */
24243 +int qman_ceetm_create_fq(struct qm_ceetm_lfq *lfq, struct qman_fq *fq);
24244 +
24245 + /* -------------------------------- */
24246 + /* CEETM :: class congestion groups */
24247 + /* -------------------------------- */
24248 +
24249 +/**
24250 + * qman_ceetm_ccg_claim - Claims an unused CCG.
24251 + * @ccg: the returned CCG object, if successful.
24252 + * @channel: the given class queue channel
24253 + * @cscn: the callback function of this CCG.
24254 + * @cb_ctx: the corresponding context to be used used if state change
24255 + * notifications are later enabled for this CCG.
24256 + *
24257 + * The congestion group is local to the given class queue channel, so only
24258 + * class queues within the channel can be associated with that congestion group.
24259 + * The association of class queues to congestion groups occurs when the class
24260 + * queues are claimed, see qman_ceetm_cq_claim() and related functions.
24261 + * Congestion groups are in a "zero" state when initially claimed, and they are
24262 + * returned to that state when released.
24263 + *
24264 + * Return zero for success, or -EINVAL if no CCG in the channel is available.
24265 + */
24266 +int qman_ceetm_ccg_claim(struct qm_ceetm_ccg **ccg,
24267 + struct qm_ceetm_channel *channel,
24268 + unsigned int idx,
24269 + void (*cscn)(struct qm_ceetm_ccg *,
24270 + void *cb_ctx,
24271 + int congested),
24272 + void *cb_ctx);
24273 +
24274 +/**
24275 + * qman_ceetm_ccg_release - Releases a previously claimed CCG.
24276 + * @ccg: the given ccg.
24277 + *
24278 + * Returns zero for success, or -EBUSY if the given ccg's dependent objects
24279 + * (class queues that are associated with the CCG) have not been released.
24280 + */
24281 +int qman_ceetm_ccg_release(struct qm_ceetm_ccg *ccg);
24282 +
24283 +/* This struct is used to specify attributes for a CCG. The 'we_mask' field
24284 + * controls which CCG attributes are to be updated, and the remainder specify
24285 + * the values for those attributes. A CCG counts either frames or the bytes
24286 + * within those frames, but not both ('mode'). A CCG can optionally cause
24287 + * enqueues to be rejected, due to tail-drop or WRED, or both (they are
24288 + * independent options, 'td_en' and 'wr_en_g,wr_en_y,wr_en_r'). Tail-drop can be
24289 + * level-triggered due to a single threshold ('td_thres') or edge-triggered due
24290 + * to a "congestion state", but not both ('td_mode'). Congestion state has
24291 + * distinct entry and exit thresholds ('cs_thres_in' and 'cs_thres_out'), and
24292 + * notifications can be sent to software the CCG goes in to and out of this
24293 + * congested state ('cscn_en'). */
24294 +struct qm_ceetm_ccg_params {
24295 + /* Boolean fields together in a single bitfield struct */
24296 + struct {
24297 + /* Whether to count bytes or frames. 1==frames */
24298 + u8 mode:1;
24299 + /* En/disable tail-drop. 1==enable */
24300 + u8 td_en:1;
24301 + /* Tail-drop on congestion-state or threshold. 1=threshold */
24302 + u8 td_mode:1;
24303 + /* Generate congestion state change notifications. 1==enable */
24304 + u8 cscn_en:1;
24305 + /* Enable WRED rejections (per colour). 1==enable */
24306 + u8 wr_en_g:1;
24307 + u8 wr_en_y:1;
24308 + u8 wr_en_r:1;
24309 + } __packed;
24310 + /* Tail-drop threshold. See qm_cgr_thres_[gs]et64(). */
24311 + struct qm_cgr_cs_thres td_thres;
24312 + /* Congestion state thresholds, for entry and exit. */
24313 + struct qm_cgr_cs_thres cs_thres_in;
24314 + struct qm_cgr_cs_thres cs_thres_out;
24315 + /* Overhead accounting length. Per-packet "tax", from -128 to +127 */
24316 + signed char oal;
24317 + /* Congestion state change notification for DCP portal, virtual CCGID*/
24318 + /* WRED parameters. */
24319 + struct qm_cgr_wr_parm wr_parm_g;
24320 + struct qm_cgr_wr_parm wr_parm_y;
24321 + struct qm_cgr_wr_parm wr_parm_r;
24322 +};
24323 +/* Bits used in 'we_mask' to qman_ceetm_ccg_set(), controls which attributes of
24324 + * the CCGR are to be updated. */
24325 +#define QM_CCGR_WE_MODE 0x0001 /* mode (bytes/frames) */
24326 +#define QM_CCGR_WE_CS_THRES_IN 0x0002 /* congestion state entry threshold */
24327 +#define QM_CCGR_WE_TD_EN 0x0004 /* congestion state tail-drop enable */
24328 +#define QM_CCGR_WE_CSCN_TUPD 0x0008 /* CSCN target update */
24329 +#define QM_CCGR_WE_CSCN_EN 0x0010 /* congestion notification enable */
24330 +#define QM_CCGR_WE_WR_EN_R 0x0020 /* WRED enable - red */
24331 +#define QM_CCGR_WE_WR_EN_Y 0x0040 /* WRED enable - yellow */
24332 +#define QM_CCGR_WE_WR_EN_G 0x0080 /* WRED enable - green */
24333 +#define QM_CCGR_WE_WR_PARM_R 0x0100 /* WRED parameters - red */
24334 +#define QM_CCGR_WE_WR_PARM_Y 0x0200 /* WRED parameters - yellow */
24335 +#define QM_CCGR_WE_WR_PARM_G 0x0400 /* WRED parameters - green */
24336 +#define QM_CCGR_WE_OAL 0x0800 /* overhead accounting length */
24337 +#define QM_CCGR_WE_CS_THRES_OUT 0x1000 /* congestion state exit threshold */
24338 +#define QM_CCGR_WE_TD_THRES 0x2000 /* tail-drop threshold */
24339 +#define QM_CCGR_WE_TD_MODE 0x4000 /* tail-drop mode (state/threshold) */
24340 +#define QM_CCGR_WE_CDV 0x8000 /* cdv */
24341 +
24342 +/**
24343 + * qman_ceetm_ccg_set
24344 + * qman_ceetm_ccg_get - Configure/query a subset of CCG attributes.
24345 + * @ccg: the given CCG object.
24346 + * @we_mask: the write enable mask.
24347 + * @params: the parameters setting for this ccg
24348 + *
24349 + * Return 0 for success, or -EIO if configure ccg command returns error for
24350 + * "set" function, or -EINVAL if query ccg command returns error for "get"
24351 + * function.
24352 + */
24353 +int qman_ceetm_ccg_set(struct qm_ceetm_ccg *ccg,
24354 + u16 we_mask,
24355 + const struct qm_ceetm_ccg_params *params);
24356 +int qman_ceetm_ccg_get(struct qm_ceetm_ccg *ccg,
24357 + struct qm_ceetm_ccg_params *params);
24358 +
24359 +/** qman_ceetm_cscn_swp_set - Add or remove a software portal from the target
24360 + * mask.
24361 + * qman_ceetm_cscn_swp_get - Query whether a given software portal index is
24362 + * in the cscn target mask.
24363 + * @ccg: the give CCG object.
24364 + * @swp_idx: the index of the software portal.
24365 + * @cscn_enabled: 1: Set the swp to be cscn target. 0: remove the swp from
24366 + * the target mask.
24367 + * @we_mask: the write enable mask.
24368 + * @params: the parameters setting for this ccg
24369 + *
24370 + * Return 0 for success, or -EINVAL if command in set/get function fails.
24371 + */
24372 +int qman_ceetm_cscn_swp_set(struct qm_ceetm_ccg *ccg,
24373 + u16 swp_idx,
24374 + unsigned int cscn_enabled,
24375 + u16 we_mask,
24376 + const struct qm_ceetm_ccg_params *params);
24377 +int qman_ceetm_cscn_swp_get(struct qm_ceetm_ccg *ccg,
24378 + u16 swp_idx,
24379 + unsigned int *cscn_enabled);
24380 +
24381 +/** qman_ceetm_cscn_dcp_set - Add or remove a direct connect portal from the\
24382 + * target mask.
24383 + * qman_ceetm_cscn_dcp_get - Query whether a given direct connect portal index
24384 + * is in the cscn target mask.
24385 + * @ccg: the give CCG object.
24386 + * @dcp_idx: the index of the direct connect portal.
24387 + * @vcgid: congestion state change notification for dcp portal, virtual CGID.
24388 + * @cscn_enabled: 1: Set the dcp to be cscn target. 0: remove the dcp from
24389 + * the target mask.
24390 + * @we_mask: the write enable mask.
24391 + * @params: the parameters setting for this ccg
24392 + *
24393 + * Return 0 for success, or -EINVAL if command in set/get function fails.
24394 + */
24395 +int qman_ceetm_cscn_dcp_set(struct qm_ceetm_ccg *ccg,
24396 + u16 dcp_idx,
24397 + u8 vcgid,
24398 + unsigned int cscn_enabled,
24399 + u16 we_mask,
24400 + const struct qm_ceetm_ccg_params *params);
24401 +int qman_ceetm_cscn_dcp_get(struct qm_ceetm_ccg *ccg,
24402 + u16 dcp_idx,
24403 + u8 *vcgid,
24404 + unsigned int *cscn_enabled);
24405 +
24406 +/**
24407 + * qman_ceetm_ccg_get_reject_statistics - Get the statistics provided by
24408 + * CEETM CCG counters.
24409 + * @ccg: the given CCG object.
24410 + * @flags: indicates whether the statistics counter will be cleared after query.
24411 + * @frame_count: The number of the frames that have been counted since the
24412 + * counter was cleared last time.
24413 + * @byte_count: the number of bytes in all frames that have been counted.
24414 + *
24415 + * Return zero for success or -EINVAL if query statistics command returns error.
24416 + *
24417 + */
24418 +int qman_ceetm_ccg_get_reject_statistics(struct qm_ceetm_ccg *ccg, u32 flags,
24419 + u64 *frame_count, u64 *byte_count);
24420 +
24421 +/**
24422 + * qman_ceetm_query_lfqmt - Query the logical frame queue mapping table
24423 + * @lfqid: Logical Frame Queue ID
24424 + * @lfqmt_query: Results of the query command
24425 + *
24426 + * Returns zero for success or -EIO if the query command returns error.
24427 + *
24428 + */
24429 +int qman_ceetm_query_lfqmt(int lfqid,
24430 + struct qm_mcr_ceetm_lfqmt_query *lfqmt_query);
24431 +
24432 +/**
24433 + * qman_ceetm_query_write_statistics - Query (and optionally write) statistics
24434 + * @cid: Target ID (CQID or CCGRID)
24435 + * @dcp_idx: CEETM portal ID
24436 + * @command_type: One of the following:
24437 + * 0 = Query dequeue statistics. CID carries the CQID to be queried.
24438 + * 1 = Query and clear dequeue statistics. CID carries the CQID to be queried
24439 + * 2 = Write dequeue statistics. CID carries the CQID to be written.
24440 + * 3 = Query reject statistics. CID carries the CCGRID to be queried.
24441 + * 4 = Query and clear reject statistics. CID carries the CCGRID to be queried
24442 + * 5 = Write reject statistics. CID carries the CCGRID to be written
24443 + * @frame_count: Frame count value to be written if this is a write command
24444 + * @byte_count: Bytes count value to be written if this is a write command
24445 + *
24446 + * Returns zero for success or -EIO if the query command returns error.
24447 + */
24448 +int qman_ceetm_query_write_statistics(u16 cid, enum qm_dc_portal dcp_idx,
24449 + u16 command_type, u64 frame_count,
24450 + u64 byte_count);
24451 +
24452 +/**
24453 + * qman_set_wpm - Set waterfall power management
24454 + *
24455 + * @wpm_enable: boolean, 1 = enable wpm, 0 = disable wpm.
24456 + *
24457 + * Return 0 for success, return -ENODEV if QMan misc_cfg register is not
24458 + * accessible.
24459 + */
24460 +int qman_set_wpm(int wpm_enable);
24461 +
24462 +/**
24463 + * qman_get_wpm - Query the waterfall power management setting
24464 + *
24465 + * @wpm_enable: boolean, 1 = enable wpm, 0 = disable wpm.
24466 + *
24467 + * Return 0 for success, return -ENODEV if QMan misc_cfg register is not
24468 + * accessible.
24469 + */
24470 +int qman_get_wpm(int *wpm_enable);
24471 +
24472 +/* The below qman_p_***() variants might be called in a migration situation
24473 + * (e.g. cpu hotplug). They are used to continue accessing the portal that
24474 + * execution was affine to prior to migration.
24475 + * @qman_portal specifies which portal the APIs will use.
24476 +*/
24477 +const struct qman_portal_config *qman_p_get_portal_config(struct qman_portal
24478 + *p);
24479 +int qman_p_irqsource_add(struct qman_portal *p, u32 bits);
24480 +int qman_p_irqsource_remove(struct qman_portal *p, u32 bits);
24481 +int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit);
24482 +u32 qman_p_poll_slow(struct qman_portal *p);
24483 +void qman_p_poll(struct qman_portal *p);
24484 +void qman_p_stop_dequeues(struct qman_portal *p);
24485 +void qman_p_start_dequeues(struct qman_portal *p);
24486 +void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools);
24487 +void qman_p_static_dequeue_del(struct qman_portal *p, u32 pools);
24488 +u32 qman_p_static_dequeue_get(struct qman_portal *p);
24489 +void qman_p_dca(struct qman_portal *p, struct qm_dqrr_entry *dq,
24490 + int park_request);
24491 +int qman_p_volatile_dequeue(struct qman_portal *p, struct qman_fq *fq,
24492 + u32 flags __maybe_unused, u32 vdqcr);
24493 +int qman_p_enqueue(struct qman_portal *p, struct qman_fq *fq,
24494 + const struct qm_fd *fd, u32 flags);
24495 +int qman_p_enqueue_orp(struct qman_portal *p, struct qman_fq *fq,
24496 + const struct qm_fd *fd, u32 flags,
24497 + struct qman_fq *orp, u16 orp_seqnum);
24498 +int qman_p_enqueue_precommit(struct qman_portal *p, struct qman_fq *fq,
24499 + const struct qm_fd *fd, u32 flags,
24500 + qman_cb_precommit cb, void *cb_arg);
24501 +#ifdef __cplusplus
24502 +}
24503 +#endif
24504 +
24505 +#endif /* FSL_QMAN_H */
24506 --- /dev/null
24507 +++ b/include/linux/fsl_usdpaa.h
24508 @@ -0,0 +1,372 @@
24509 +/* Copyright 2011-2012 Freescale Semiconductor, Inc.
24510 + *
24511 + * This file is licensed under the terms of the GNU General Public License
24512 + * version 2. This program is licensed "as is" without any warranty of any
24513 + * kind, whether express or implied.
24514 + */
24515 +
24516 +#ifndef FSL_USDPAA_H
24517 +#define FSL_USDPAA_H
24518 +
24519 +#ifdef __cplusplus
24520 +extern "C" {
24521 +#endif
24522 +
24523 +#include <linux/uaccess.h>
24524 +#include <linux/ioctl.h>
24525 +#include <linux/fsl_qman.h> /* For "enum qm_channel" */
24526 +#include <linux/compat.h>
24527 +
24528 +#ifdef CONFIG_FSL_USDPAA
24529 +
24530 +/******************************/
24531 +/* Allocation of resource IDs */
24532 +/******************************/
24533 +
24534 +/* This enum is used to distinguish between the type of underlying object being
24535 + * manipulated. */
24536 +enum usdpaa_id_type {
24537 + usdpaa_id_fqid,
24538 + usdpaa_id_bpid,
24539 + usdpaa_id_qpool,
24540 + usdpaa_id_cgrid,
24541 + usdpaa_id_ceetm0_lfqid,
24542 + usdpaa_id_ceetm0_channelid,
24543 + usdpaa_id_ceetm1_lfqid,
24544 + usdpaa_id_ceetm1_channelid,
24545 + usdpaa_id_max /* <-- not a valid type, represents the number of types */
24546 +};
24547 +#define USDPAA_IOCTL_MAGIC 'u'
24548 +struct usdpaa_ioctl_id_alloc {
24549 + uint32_t base; /* Return value, the start of the allocated range */
24550 + enum usdpaa_id_type id_type; /* what kind of resource(s) to allocate */
24551 + uint32_t num; /* how many IDs to allocate (and return value) */
24552 + uint32_t align; /* must be a power of 2, 0 is treated like 1 */
24553 + int partial; /* whether to allow less than 'num' */
24554 +};
24555 +struct usdpaa_ioctl_id_release {
24556 + /* Input; */
24557 + enum usdpaa_id_type id_type;
24558 + uint32_t base;
24559 + uint32_t num;
24560 +};
24561 +struct usdpaa_ioctl_id_reserve {
24562 + enum usdpaa_id_type id_type;
24563 + uint32_t base;
24564 + uint32_t num;
24565 +};
24566 +
24567 +
24568 +/* ioctl() commands */
24569 +#define USDPAA_IOCTL_ID_ALLOC \
24570 + _IOWR(USDPAA_IOCTL_MAGIC, 0x01, struct usdpaa_ioctl_id_alloc)
24571 +#define USDPAA_IOCTL_ID_RELEASE \
24572 + _IOW(USDPAA_IOCTL_MAGIC, 0x02, struct usdpaa_ioctl_id_release)
24573 +#define USDPAA_IOCTL_ID_RESERVE \
24574 + _IOW(USDPAA_IOCTL_MAGIC, 0x0A, struct usdpaa_ioctl_id_reserve)
24575 +
24576 +/**********************/
24577 +/* Mapping DMA memory */
24578 +/**********************/
24579 +
24580 +/* Maximum length for a map name, including NULL-terminator */
24581 +#define USDPAA_DMA_NAME_MAX 16
24582 +/* Flags for requesting DMA maps. Maps are private+unnamed or sharable+named.
24583 + * For a sharable and named map, specify _SHARED (whether creating one or
24584 + * binding to an existing one). If _SHARED is specified and _CREATE is not, then
24585 + * the mapping must already exist. If _SHARED and _CREATE are specified and the
24586 + * mapping doesn't already exist, it will be created. If _SHARED and _CREATE are
24587 + * specified and the mapping already exists, the mapping will fail unless _LAZY
24588 + * is specified. When mapping to a pre-existing sharable map, the length must be
24589 + * an exact match. Lengths must be a power-of-4 multiple of page size.
24590 + *
24591 + * Note that this does not actually map the memory to user-space, that is done
24592 + * by a subsequent mmap() using the page offset returned from this ioctl(). The
24593 + * ioctl() is what gives the process permission to do this, and a page-offset
24594 + * with which to do so.
24595 + */
24596 +#define USDPAA_DMA_FLAG_SHARE 0x01
24597 +#define USDPAA_DMA_FLAG_CREATE 0x02
24598 +#define USDPAA_DMA_FLAG_LAZY 0x04
24599 +#define USDPAA_DMA_FLAG_RDONLY 0x08
24600 +struct usdpaa_ioctl_dma_map {
24601 + /* Output parameters - virtual and physical addresses */
24602 + void *ptr;
24603 + uint64_t phys_addr;
24604 + /* Input parameter, the length of the region to be created (or if
24605 + * mapping an existing region, this must match it). Must be a power-of-4
24606 + * multiple of page size. */
24607 + uint64_t len;
24608 + /* Input parameter, the USDPAA_DMA_FLAG_* settings. */
24609 + uint32_t flags;
24610 + /* If _FLAG_SHARE is specified, the name of the region to be created (or
24611 + * of the existing mapping to use). */
24612 + char name[USDPAA_DMA_NAME_MAX];
24613 + /* If this ioctl() creates the mapping, this is an input parameter
24614 + * stating whether the region supports locking. If mapping an existing
24615 + * region, this is a return value indicating the same thing. */
24616 + int has_locking;
24617 + /* In the case of a successful map with _CREATE and _LAZY, this return
24618 + * value indicates whether we created the mapped region or whether it
24619 + * already existed. */
24620 + int did_create;
24621 +};
24622 +
24623 +#ifdef CONFIG_COMPAT
24624 +struct usdpaa_ioctl_dma_map_compat {
24625 + /* Output parameters - virtual and physical addresses */
24626 + compat_uptr_t ptr;
24627 + uint64_t phys_addr;
24628 + /* Input parameter, the length of the region to be created (or if
24629 + * mapping an existing region, this must match it). Must be a power-of-4
24630 + * multiple of page size. */
24631 + uint64_t len;
24632 + /* Input parameter, the USDPAA_DMA_FLAG_* settings. */
24633 + uint32_t flags;
24634 + /* If _FLAG_SHARE is specified, the name of the region to be created (or
24635 + * of the existing mapping to use). */
24636 + char name[USDPAA_DMA_NAME_MAX];
24637 + /* If this ioctl() creates the mapping, this is an input parameter
24638 + * stating whether the region supports locking. If mapping an existing
24639 + * region, this is a return value indicating the same thing. */
24640 + int has_locking;
24641 + /* In the case of a successful map with _CREATE and _LAZY, this return
24642 + * value indicates whether we created the mapped region or whether it
24643 + * already existed. */
24644 + int did_create;
24645 +};
24646 +
24647 +#define USDPAA_IOCTL_DMA_MAP_COMPAT \
24648 + _IOWR(USDPAA_IOCTL_MAGIC, 0x03, struct usdpaa_ioctl_dma_map_compat)
24649 +#endif
24650 +
24651 +
24652 +#define USDPAA_IOCTL_DMA_MAP \
24653 + _IOWR(USDPAA_IOCTL_MAGIC, 0x03, struct usdpaa_ioctl_dma_map)
24654 +/* munmap() does not remove the DMA map, just the user-space mapping to it.
24655 + * This ioctl will do both (though you can munmap() before calling the ioctl
24656 + * too). */
24657 +#define USDPAA_IOCTL_DMA_UNMAP \
24658 + _IOW(USDPAA_IOCTL_MAGIC, 0x04, unsigned char)
24659 +/* We implement a cross-process locking scheme per DMA map. Call this ioctl()
24660 + * with a mmap()'d address, and the process will (interruptible) sleep if the
24661 + * lock is already held by another process. Process destruction will
24662 + * automatically clean up any held locks. */
24663 +#define USDPAA_IOCTL_DMA_LOCK \
24664 + _IOW(USDPAA_IOCTL_MAGIC, 0x05, unsigned char)
24665 +#define USDPAA_IOCTL_DMA_UNLOCK \
24666 + _IOW(USDPAA_IOCTL_MAGIC, 0x06, unsigned char)
24667 +
24668 +/***************************************/
24669 +/* Mapping and using QMan/BMan portals */
24670 +/***************************************/
24671 +enum usdpaa_portal_type {
24672 + usdpaa_portal_qman,
24673 + usdpaa_portal_bman,
24674 +};
24675 +
24676 +#define QBMAN_ANY_PORTAL_IDX 0xffffffff
24677 +
24678 +struct usdpaa_ioctl_portal_map {
24679 + /* Input parameter, is a qman or bman portal required. */
24680 +
24681 + enum usdpaa_portal_type type;
24682 + /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX
24683 + for don't care. The portal index will be populated by the
24684 + driver when the ioctl() successfully completes */
24685 + uint32_t index;
24686 +
24687 + /* Return value if the map succeeds, this gives the mapped
24688 + * cache-inhibited (cinh) and cache-enabled (cena) addresses. */
24689 + struct usdpaa_portal_map {
24690 + void *cinh;
24691 + void *cena;
24692 + } addr;
24693 + /* Qman-specific return values */
24694 + uint16_t channel;
24695 + uint32_t pools;
24696 +};
24697 +
24698 +#ifdef CONFIG_COMPAT
24699 +struct compat_usdpaa_ioctl_portal_map {
24700 + /* Input parameter, is a qman or bman portal required. */
24701 + enum usdpaa_portal_type type;
24702 + /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX
24703 + for don't care. The portal index will be populated by the
24704 + driver when the ioctl() successfully completes */
24705 + uint32_t index;
24706 + /* Return value if the map succeeds, this gives the mapped
24707 + * cache-inhibited (cinh) and cache-enabled (cena) addresses. */
24708 + struct usdpaa_portal_map_compat {
24709 + compat_uptr_t cinh;
24710 + compat_uptr_t cena;
24711 + } addr;
24712 + /* Qman-specific return values */
24713 + uint16_t channel;
24714 + uint32_t pools;
24715 +};
24716 +#define USDPAA_IOCTL_PORTAL_MAP_COMPAT \
24717 + _IOWR(USDPAA_IOCTL_MAGIC, 0x07, struct compat_usdpaa_ioctl_portal_map)
24718 +#define USDPAA_IOCTL_PORTAL_UNMAP_COMPAT \
24719 + _IOW(USDPAA_IOCTL_MAGIC, 0x08, struct usdpaa_portal_map_compat)
24720 +#endif
24721 +
24722 +#define USDPAA_IOCTL_PORTAL_MAP \
24723 + _IOWR(USDPAA_IOCTL_MAGIC, 0x07, struct usdpaa_ioctl_portal_map)
24724 +#define USDPAA_IOCTL_PORTAL_UNMAP \
24725 + _IOW(USDPAA_IOCTL_MAGIC, 0x08, struct usdpaa_portal_map)
24726 +
24727 +struct usdpaa_ioctl_irq_map {
24728 + enum usdpaa_portal_type type; /* Type of portal to map */
24729 + int fd; /* File descriptor that contains the portal */
24730 + void *portal_cinh; /* Cache inhibited area to identify the portal */
24731 +};
24732 +
24733 +#define USDPAA_IOCTL_PORTAL_IRQ_MAP \
24734 + _IOW(USDPAA_IOCTL_MAGIC, 0x09, struct usdpaa_ioctl_irq_map)
24735 +
24736 +#ifdef CONFIG_COMPAT
24737 +
24738 +struct compat_ioctl_irq_map {
24739 + enum usdpaa_portal_type type; /* Type of portal to map */
24740 + compat_int_t fd; /* File descriptor that contains the portal */
24741 + compat_uptr_t portal_cinh; /* Used identify the portal */};
24742 +
24743 +#define USDPAA_IOCTL_PORTAL_IRQ_MAP_COMPAT \
24744 + _IOW(USDPAA_IOCTL_MAGIC, 0x09, struct compat_ioctl_irq_map)
24745 +#endif
24746 +
24747 +/* ioctl to query the amount of DMA memory used in the system */
24748 +struct usdpaa_ioctl_dma_used {
24749 + uint64_t free_bytes;
24750 + uint64_t total_bytes;
24751 +};
24752 +#define USDPAA_IOCTL_DMA_USED \
24753 + _IOR(USDPAA_IOCTL_MAGIC, 0x0B, struct usdpaa_ioctl_dma_used)
24754 +
24755 +/* ioctl to allocate a raw portal */
24756 +struct usdpaa_ioctl_raw_portal {
24757 + /* inputs */
24758 + enum usdpaa_portal_type type; /* Type of portal to allocate */
24759 +
24760 + /* set to non zero to turn on stashing */
24761 + uint8_t enable_stash;
24762 + /* Stashing attributes for the portal */
24763 + uint32_t cpu;
24764 + uint32_t cache;
24765 + uint32_t window;
24766 +
24767 + /* Specifies the stash request queue this portal should use */
24768 + uint8_t sdest;
24769 +
24770 + /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX
24771 + * for don't care. The portal index will be populated by the
24772 + * driver when the ioctl() successfully completes */
24773 + uint32_t index;
24774 +
24775 + /* outputs */
24776 + uint64_t cinh;
24777 + uint64_t cena;
24778 +};
24779 +
24780 +#define USDPAA_IOCTL_ALLOC_RAW_PORTAL \
24781 + _IOWR(USDPAA_IOCTL_MAGIC, 0x0C, struct usdpaa_ioctl_raw_portal)
24782 +
24783 +#define USDPAA_IOCTL_FREE_RAW_PORTAL \
24784 + _IOR(USDPAA_IOCTL_MAGIC, 0x0D, struct usdpaa_ioctl_raw_portal)
24785 +
24786 +#ifdef CONFIG_COMPAT
24787 +
24788 +struct compat_ioctl_raw_portal {
24789 + /* inputs */
24790 + enum usdpaa_portal_type type; /* Type of portal to allocate */
24791 +
24792 + /* set to non zero to turn on stashing */
24793 + uint8_t enable_stash;
24794 + /* Stashing attributes for the portal */
24795 + uint32_t cpu;
24796 + uint32_t cache;
24797 + uint32_t window;
24798 + /* Specifies the stash request queue this portal should use */
24799 + uint8_t sdest;
24800 +
24801 + /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX
24802 + * for don't care. The portal index will be populated by the
24803 + * driver when the ioctl() successfully completes */
24804 + uint32_t index;
24805 +
24806 + /* outputs */
24807 + uint64_t cinh;
24808 + uint64_t cena;
24809 +};
24810 +
24811 +#define USDPAA_IOCTL_ALLOC_RAW_PORTAL_COMPAT \
24812 + _IOWR(USDPAA_IOCTL_MAGIC, 0x0C, struct compat_ioctl_raw_portal)
24813 +
24814 +#define USDPAA_IOCTL_FREE_RAW_PORTAL_COMPAT \
24815 + _IOR(USDPAA_IOCTL_MAGIC, 0x0D, struct compat_ioctl_raw_portal)
24816 +
24817 +#endif
24818 +
24819 +#ifdef __KERNEL__
24820 +
24821 +/* Early-boot hook */
24822 +int __init fsl_usdpaa_init_early(void);
24823 +
24824 +/* Fault-handling in arch/powerpc/mm/mem.c gives USDPAA an opportunity to detect
24825 + * faults within its ranges via this hook. */
24826 +int usdpaa_test_fault(unsigned long pfn, u64 *phys_addr, u64 *size);
24827 +
24828 +#endif /* __KERNEL__ */
24829 +
24830 +#endif /* CONFIG_FSL_USDPAA */
24831 +
24832 +#ifdef __KERNEL__
24833 +/* This interface is needed in a few places and though it's not specific to
24834 + * USDPAA as such, creating a new header for it doesn't make any sense. The
24835 + * qbman kernel driver implements this interface and uses it as the backend for
24836 + * both the FQID and BPID allocators. The fsl_usdpaa driver also uses this
24837 + * interface for tracking per-process allocations handed out to user-space. */
24838 +struct dpa_alloc {
24839 + struct list_head free;
24840 + spinlock_t lock;
24841 + struct list_head used;
24842 +};
24843 +#define DECLARE_DPA_ALLOC(name) \
24844 + struct dpa_alloc name = { \
24845 + .free = { \
24846 + .prev = &name.free, \
24847 + .next = &name.free \
24848 + }, \
24849 + .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
24850 + .used = { \
24851 + .prev = &name.used, \
24852 + .next = &name.used \
24853 + } \
24854 + }
24855 +static inline void dpa_alloc_init(struct dpa_alloc *alloc)
24856 +{
24857 + INIT_LIST_HEAD(&alloc->free);
24858 + INIT_LIST_HEAD(&alloc->used);
24859 + spin_lock_init(&alloc->lock);
24860 +}
24861 +int dpa_alloc_new(struct dpa_alloc *alloc, u32 *result, u32 count, u32 align,
24862 + int partial);
24863 +void dpa_alloc_free(struct dpa_alloc *alloc, u32 base_id, u32 count);
24864 +void dpa_alloc_seed(struct dpa_alloc *alloc, u32 fqid, u32 count);
24865 +
24866 +/* Like 'new' but specifies the desired range, returns -ENOMEM if the entire
24867 + * desired range is not available, or 0 for success. */
24868 +int dpa_alloc_reserve(struct dpa_alloc *alloc, u32 base_id, u32 count);
24869 +/* Pops and returns contiguous ranges from the allocator. Returns -ENOMEM when
24870 + * 'alloc' is empty. */
24871 +int dpa_alloc_pop(struct dpa_alloc *alloc, u32 *result, u32 *count);
24872 +/* Returns 1 if the specified id is alloced, 0 otherwise */
24873 +int dpa_alloc_check(struct dpa_alloc *list, u32 id);
24874 +#endif /* __KERNEL__ */
24875 +
24876 +#ifdef __cplusplus
24877 +}
24878 +#endif
24879 +
24880 +#endif /* FSL_USDPAA_H */