d513efb2fbeff3edc7585b986d09a83e1ea59f6e
[openwrt/staging/dedeckeh.git] / target / linux / layerscape / patches-4.9 / 705-dpaa2-support-layerscape.patch
1 From 3a302437605308079db398b67000a77a4fe92da8 Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Mon, 25 Sep 2017 12:07:58 +0800
4 Subject: [PATCH] dpaa2: support layerscape
5
6 This is a integrated patch for layerscape dpaa2 support.
7
8 Signed-off-by: Bogdan Purcareata <bogdan.purcareata@nxp.com>
9 Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
10 Signed-off-by: Razvan Stefanescu <razvan.stefanescu@nxp.com>
11 Signed-off-by: costi <constantin.tudor@freescale.com>
12 Signed-off-by: Catalin Horghidan <catalin.horghidan@nxp.com>
13 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
14 ---
15 drivers/soc/fsl/ls2-console/Kconfig | 4 +
16 drivers/soc/fsl/ls2-console/Makefile | 1 +
17 drivers/soc/fsl/ls2-console/ls2-console.c | 284 ++
18 drivers/staging/fsl-dpaa2/ethernet/Makefile | 11 +
19 drivers/staging/fsl-dpaa2/ethernet/README | 186 ++
20 .../staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c | 350 +++
21 .../staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h | 60 +
22 .../staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h | 184 ++
23 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 3155 ++++++++++++++++++++
24 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h | 460 +++
25 drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c | 856 ++++++
26 drivers/staging/fsl-dpaa2/ethernet/dpkg.h | 176 ++
27 drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h | 600 ++++
28 drivers/staging/fsl-dpaa2/ethernet/dpni.c | 1770 +++++++++++
29 drivers/staging/fsl-dpaa2/ethernet/dpni.h | 989 ++++++
30 drivers/staging/fsl-dpaa2/ethernet/net.h | 480 +++
31 drivers/staging/fsl-dpaa2/ethsw/Kconfig | 6 +
32 drivers/staging/fsl-dpaa2/ethsw/Makefile | 10 +
33 drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h | 851 ++++++
34 drivers/staging/fsl-dpaa2/ethsw/dpsw.c | 2762 +++++++++++++++++
35 drivers/staging/fsl-dpaa2/ethsw/dpsw.h | 1269 ++++++++
36 drivers/staging/fsl-dpaa2/ethsw/switch.c | 1857 ++++++++++++
37 drivers/staging/fsl-dpaa2/evb/Kconfig | 7 +
38 drivers/staging/fsl-dpaa2/evb/Makefile | 10 +
39 drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h | 279 ++
40 drivers/staging/fsl-dpaa2/evb/dpdmux.c | 1112 +++++++
41 drivers/staging/fsl-dpaa2/evb/dpdmux.h | 453 +++
42 drivers/staging/fsl-dpaa2/evb/evb.c | 1350 +++++++++
43 drivers/staging/fsl-dpaa2/mac/Kconfig | 23 +
44 drivers/staging/fsl-dpaa2/mac/Makefile | 10 +
45 drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h | 172 ++
46 drivers/staging/fsl-dpaa2/mac/dpmac.c | 620 ++++
47 drivers/staging/fsl-dpaa2/mac/dpmac.h | 342 +++
48 drivers/staging/fsl-dpaa2/mac/mac.c | 666 +++++
49 drivers/staging/fsl-dpaa2/rtc/Makefile | 10 +
50 drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h | 160 +
51 drivers/staging/fsl-dpaa2/rtc/dprtc.c | 746 +++++
52 drivers/staging/fsl-dpaa2/rtc/dprtc.h | 172 ++
53 drivers/staging/fsl-dpaa2/rtc/rtc.c | 243 ++
54 39 files changed, 22696 insertions(+)
55 create mode 100644 drivers/soc/fsl/ls2-console/Kconfig
56 create mode 100644 drivers/soc/fsl/ls2-console/Makefile
57 create mode 100644 drivers/soc/fsl/ls2-console/ls2-console.c
58 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/Makefile
59 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/README
60 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
61 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h
62 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
63 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
64 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
65 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
66 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpkg.h
67 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
68 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni.c
69 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni.h
70 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/net.h
71 create mode 100644 drivers/staging/fsl-dpaa2/ethsw/Kconfig
72 create mode 100644 drivers/staging/fsl-dpaa2/ethsw/Makefile
73 create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
74 create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw.c
75 create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw.h
76 create mode 100644 drivers/staging/fsl-dpaa2/ethsw/switch.c
77 create mode 100644 drivers/staging/fsl-dpaa2/evb/Kconfig
78 create mode 100644 drivers/staging/fsl-dpaa2/evb/Makefile
79 create mode 100644 drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h
80 create mode 100644 drivers/staging/fsl-dpaa2/evb/dpdmux.c
81 create mode 100644 drivers/staging/fsl-dpaa2/evb/dpdmux.h
82 create mode 100644 drivers/staging/fsl-dpaa2/evb/evb.c
83 create mode 100644 drivers/staging/fsl-dpaa2/mac/Kconfig
84 create mode 100644 drivers/staging/fsl-dpaa2/mac/Makefile
85 create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h
86 create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac.c
87 create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac.h
88 create mode 100644 drivers/staging/fsl-dpaa2/mac/mac.c
89 create mode 100644 drivers/staging/fsl-dpaa2/rtc/Makefile
90 create mode 100644 drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h
91 create mode 100644 drivers/staging/fsl-dpaa2/rtc/dprtc.c
92 create mode 100644 drivers/staging/fsl-dpaa2/rtc/dprtc.h
93 create mode 100644 drivers/staging/fsl-dpaa2/rtc/rtc.c
94
95 diff --git a/drivers/soc/fsl/ls2-console/Kconfig b/drivers/soc/fsl/ls2-console/Kconfig
96 new file mode 100644
97 index 00000000..47d0dc11
98 --- /dev/null
99 +++ b/drivers/soc/fsl/ls2-console/Kconfig
100 @@ -0,0 +1,4 @@
101 +config FSL_LS2_CONSOLE
102 + tristate "Layerscape MC and AIOP console support"
103 + depends on ARCH_LAYERSCAPE
104 + default y
105 diff --git a/drivers/soc/fsl/ls2-console/Makefile b/drivers/soc/fsl/ls2-console/Makefile
106 new file mode 100644
107 index 00000000..62b96346
108 --- /dev/null
109 +++ b/drivers/soc/fsl/ls2-console/Makefile
110 @@ -0,0 +1 @@
111 +obj-$(CONFIG_FSL_LS2_CONSOLE) += ls2-console.o
112 diff --git a/drivers/soc/fsl/ls2-console/ls2-console.c b/drivers/soc/fsl/ls2-console/ls2-console.c
113 new file mode 100644
114 index 00000000..68415ad0
115 --- /dev/null
116 +++ b/drivers/soc/fsl/ls2-console/ls2-console.c
117 @@ -0,0 +1,284 @@
118 +/* Copyright 2015-2016 Freescale Semiconductor Inc.
119 + *
120 + * Redistribution and use in source and binary forms, with or without
121 + * modification, are permitted provided that the following conditions are met:
122 + * * Redistributions of source code must retain the above copyright
123 + * notice, this list of conditions and the following disclaimer.
124 + * * Redistributions in binary form must reproduce the above copyright
125 + * notice, this list of conditions and the following disclaimer in the
126 + * documentation and/or other materials provided with the distribution.
127 + * * Neither the name of the above-listed copyright holders nor the
128 + * names of any contributors may be used to endorse or promote products
129 + * derived from this software without specific prior written permission.
130 + *
131 + *
132 + * ALTERNATIVELY, this software may be distributed under the terms of the
133 + * GNU General Public License ("GPL") as published by the Free Software
134 + * Foundation, either version 2 of that License or (at your option) any
135 + * later version.
136 + *
137 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
138 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
139 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
140 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
141 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
142 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
143 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
144 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
145 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
146 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
147 + * POSSIBILITY OF SUCH DAMAGE.
148 + */
149 +
150 +#include <linux/miscdevice.h>
151 +#include <linux/uaccess.h>
152 +#include <linux/poll.h>
153 +#include <linux/compat.h>
154 +#include <linux/module.h>
155 +#include <linux/slab.h>
156 +#include <linux/io.h>
157 +
158 +/* SoC address for the MC firmware base low/high registers */
159 +#define SOC_CCSR_MC_FW_BASE_ADDR_REGS 0x8340020
160 +#define SOC_CCSR_MC_FW_BASE_ADDR_REGS_SIZE 2
161 +/* MC firmware base low/high registers indexes */
162 +#define MCFBALR_OFFSET 0
163 +#define MCFBAHR_OFFSET 1
164 +
165 +/* Bit mask used to obtain the most significant part of the MC base address */
166 +#define MC_FW_HIGH_ADDR_MASK 0x1FFFF
167 +/* Bit mask used to obtain the least significant part of the MC base address */
168 +#define MC_FW_LOW_ADDR_MASK 0xE0000000
169 +
170 +#define MC_BUFFER_OFFSET 0x01000000
171 +#define MC_BUFFER_SIZE (1024*1024*16)
172 +#define MC_OFFSET_DELTA (MC_BUFFER_OFFSET)
173 +
174 +#define AIOP_BUFFER_OFFSET 0x06000000
175 +#define AIOP_BUFFER_SIZE (1024*1024*16)
176 +#define AIOP_OFFSET_DELTA (0)
177 +
178 +struct log_header {
179 + char magic_word[8]; /* magic word */
180 + uint32_t buf_start; /* holds the 32-bit little-endian
181 + * offset of the start of the buffer
182 + */
183 + uint32_t buf_length; /* holds the 32-bit little-endian
184 + * length of the buffer
185 + */
186 + uint32_t last_byte; /* holds the 32-bit little-endian offset
187 + * of the byte after the last byte that
188 + * was written
189 + */
190 + char reserved[44];
191 +};
192 +
193 +#define LOG_HEADER_FLAG_BUFFER_WRAPAROUND 0x80000000
194 +#define LOG_VERSION_MAJOR 1
195 +#define LOG_VERSION_MINOR 0
196 +
197 +
198 +#define invalidate(p) { asm volatile("dc ivac, %0" : : "r" (p) : "memory"); }
199 +
200 +struct console_data {
201 + char *map_addr;
202 + struct log_header *hdr;
203 + char *start_addr; /* Start of buffer */
204 + char *end_addr; /* End of buffer */
205 + char *end_of_data; /* Current end of data */
206 + char *cur_ptr; /* Last data sent to console */
207 +};
208 +
209 +#define LAST_BYTE(a) ((a) & ~(LOG_HEADER_FLAG_BUFFER_WRAPAROUND))
210 +
211 +static inline void __adjust_end(struct console_data *cd)
212 +{
213 + cd->end_of_data = cd->start_addr
214 + + LAST_BYTE(le32_to_cpu(cd->hdr->last_byte));
215 +}
216 +
217 +static inline void adjust_end(struct console_data *cd)
218 +{
219 + invalidate(cd->hdr);
220 + __adjust_end(cd);
221 +}
222 +
223 +static inline uint64_t get_mc_fw_base_address(void)
224 +{
225 + u32 *mcfbaregs = (u32 *) ioremap(SOC_CCSR_MC_FW_BASE_ADDR_REGS,
226 + SOC_CCSR_MC_FW_BASE_ADDR_REGS_SIZE);
227 + u64 mcfwbase = 0ULL;
228 +
229 + mcfwbase = readl(mcfbaregs + MCFBAHR_OFFSET) & MC_FW_HIGH_ADDR_MASK;
230 + mcfwbase <<= 32;
231 + mcfwbase |= readl(mcfbaregs + MCFBALR_OFFSET) & MC_FW_LOW_ADDR_MASK;
232 + iounmap(mcfbaregs);
233 + pr_info("fsl-ls2-console: MC base address at 0x%016llx\n", mcfwbase);
234 + return mcfwbase;
235 +}
236 +
237 +static int fsl_ls2_generic_console_open(struct inode *node, struct file *fp,
238 + u64 offset, u64 size,
239 + uint8_t *emagic, uint8_t magic_len,
240 + u32 offset_delta)
241 +{
242 + struct console_data *cd;
243 + uint8_t *magic;
244 + uint32_t wrapped;
245 +
246 + cd = kmalloc(sizeof(*cd), GFP_KERNEL);
247 + if (cd == NULL)
248 + return -ENOMEM;
249 + fp->private_data = cd;
250 + cd->map_addr = ioremap(get_mc_fw_base_address() + offset, size);
251 +
252 + cd->hdr = (struct log_header *) cd->map_addr;
253 + invalidate(cd->hdr);
254 +
255 + magic = cd->hdr->magic_word;
256 + if (memcmp(magic, emagic, magic_len)) {
257 + pr_info("magic didn't match!\n");
258 + pr_info("expected: %02x %02x %02x %02x %02x %02x %02x %02x\n",
259 + emagic[0], emagic[1], emagic[2], emagic[3],
260 + emagic[4], emagic[5], emagic[6], emagic[7]);
261 + pr_info(" seen: %02x %02x %02x %02x %02x %02x %02x %02x\n",
262 + magic[0], magic[1], magic[2], magic[3],
263 + magic[4], magic[5], magic[6], magic[7]);
264 + kfree(cd);
265 + iounmap(cd->map_addr);
266 + return -EIO;
267 + }
268 +
269 + cd->start_addr = cd->map_addr
270 + + le32_to_cpu(cd->hdr->buf_start) - offset_delta;
271 + cd->end_addr = cd->start_addr + le32_to_cpu(cd->hdr->buf_length);
272 +
273 + wrapped = le32_to_cpu(cd->hdr->last_byte)
274 + & LOG_HEADER_FLAG_BUFFER_WRAPAROUND;
275 +
276 + __adjust_end(cd);
277 + if (wrapped && (cd->end_of_data != cd->end_addr))
278 + cd->cur_ptr = cd->end_of_data+1;
279 + else
280 + cd->cur_ptr = cd->start_addr;
281 +
282 + return 0;
283 +}
284 +
285 +static int fsl_ls2_mc_console_open(struct inode *node, struct file *fp)
286 +{
287 + uint8_t magic_word[] = { 0, 1, 'C', 'M' };
288 +
289 + return fsl_ls2_generic_console_open(node, fp,
290 + MC_BUFFER_OFFSET, MC_BUFFER_SIZE,
291 + magic_word, sizeof(magic_word),
292 + MC_OFFSET_DELTA);
293 +}
294 +
295 +static int fsl_ls2_aiop_console_open(struct inode *node, struct file *fp)
296 +{
297 + uint8_t magic_word[] = { 'P', 'O', 'I', 'A' };
298 +
299 + return fsl_ls2_generic_console_open(node, fp,
300 + AIOP_BUFFER_OFFSET, AIOP_BUFFER_SIZE,
301 + magic_word, sizeof(magic_word),
302 + AIOP_OFFSET_DELTA);
303 +}
304 +
305 +static int fsl_ls2_console_close(struct inode *node, struct file *fp)
306 +{
307 + struct console_data *cd = fp->private_data;
308 +
309 + iounmap(cd->map_addr);
310 + kfree(cd);
311 + return 0;
312 +}
313 +
314 +ssize_t fsl_ls2_console_read(struct file *fp, char __user *buf, size_t count,
315 + loff_t *f_pos)
316 +{
317 + struct console_data *cd = fp->private_data;
318 + size_t bytes = 0;
319 + char data;
320 +
321 + /* Check if we need to adjust the end of data addr */
322 + adjust_end(cd);
323 +
324 + while ((count != bytes) && (cd->end_of_data != cd->cur_ptr)) {
325 + if (((u64)cd->cur_ptr) % 64 == 0)
326 + invalidate(cd->cur_ptr);
327 +
328 + data = *(cd->cur_ptr);
329 + if (copy_to_user(&buf[bytes], &data, 1))
330 + return -EFAULT;
331 + cd->cur_ptr++;
332 + if (cd->cur_ptr >= cd->end_addr)
333 + cd->cur_ptr = cd->start_addr;
334 + ++bytes;
335 + }
336 + return bytes;
337 +}
338 +
339 +static const struct file_operations fsl_ls2_mc_console_fops = {
340 + .owner = THIS_MODULE,
341 + .open = fsl_ls2_mc_console_open,
342 + .release = fsl_ls2_console_close,
343 + .read = fsl_ls2_console_read,
344 +};
345 +
346 +static struct miscdevice fsl_ls2_mc_console_dev = {
347 + .minor = MISC_DYNAMIC_MINOR,
348 + .name = "fsl_mc_console",
349 + .fops = &fsl_ls2_mc_console_fops
350 +};
351 +
352 +static const struct file_operations fsl_ls2_aiop_console_fops = {
353 + .owner = THIS_MODULE,
354 + .open = fsl_ls2_aiop_console_open,
355 + .release = fsl_ls2_console_close,
356 + .read = fsl_ls2_console_read,
357 +};
358 +
359 +static struct miscdevice fsl_ls2_aiop_console_dev = {
360 + .minor = MISC_DYNAMIC_MINOR,
361 + .name = "fsl_aiop_console",
362 + .fops = &fsl_ls2_aiop_console_fops
363 +};
364 +
365 +static int __init fsl_ls2_console_init(void)
366 +{
367 + int err = 0;
368 +
369 + pr_info("Freescale LS2 console driver\n");
370 + err = misc_register(&fsl_ls2_mc_console_dev);
371 + if (err) {
372 + pr_err("fsl_mc_console: cannot register device\n");
373 + return err;
374 + }
375 + pr_info("fsl-ls2-console: device %s registered\n",
376 + fsl_ls2_mc_console_dev.name);
377 +
378 + err = misc_register(&fsl_ls2_aiop_console_dev);
379 + if (err) {
380 + pr_err("fsl_aiop_console: cannot register device\n");
381 + return err;
382 + }
383 + pr_info("fsl-ls2-console: device %s registered\n",
384 + fsl_ls2_aiop_console_dev.name);
385 +
386 + return 0;
387 +}
388 +
389 +static void __exit fsl_ls2_console_exit(void)
390 +{
391 + misc_deregister(&fsl_ls2_mc_console_dev);
392 +
393 + misc_deregister(&fsl_ls2_aiop_console_dev);
394 +}
395 +
396 +module_init(fsl_ls2_console_init);
397 +module_exit(fsl_ls2_console_exit);
398 +
399 +MODULE_AUTHOR("Roy Pledge <roy.pledge@freescale.com>");
400 +MODULE_LICENSE("Dual BSD/GPL");
401 +MODULE_DESCRIPTION("Freescale LS2 console driver");
402 diff --git a/drivers/staging/fsl-dpaa2/ethernet/Makefile b/drivers/staging/fsl-dpaa2/ethernet/Makefile
403 new file mode 100644
404 index 00000000..e26911d5
405 --- /dev/null
406 +++ b/drivers/staging/fsl-dpaa2/ethernet/Makefile
407 @@ -0,0 +1,11 @@
408 +#
409 +# Makefile for the Freescale DPAA2 Ethernet controller
410 +#
411 +
412 +obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
413 +
414 +fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o
415 +fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DEBUGFS} += dpaa2-eth-debugfs.o
416 +
417 +# Needed by the tracing framework
418 +CFLAGS_dpaa2-eth.o := -I$(src)
419 diff --git a/drivers/staging/fsl-dpaa2/ethernet/README b/drivers/staging/fsl-dpaa2/ethernet/README
420 new file mode 100644
421 index 00000000..410952ec
422 --- /dev/null
423 +++ b/drivers/staging/fsl-dpaa2/ethernet/README
424 @@ -0,0 +1,186 @@
425 +Freescale DPAA2 Ethernet driver
426 +===============================
427 +
428 +This file provides documentation for the Freescale DPAA2 Ethernet driver.
429 +
430 +
431 +Contents
432 +========
433 + Supported Platforms
434 + Architecture Overview
435 + Creating a Network Interface
436 + Features & Offloads
437 +
438 +
439 +Supported Platforms
440 +===================
441 +This driver provides networking support for Freescale DPAA2 SoCs, e.g.
442 +LS2080A, LS2088A, LS1088A.
443 +
444 +
445 +Architecture Overview
446 +=====================
447 +Unlike regular NICs, in the DPAA2 architecture there is no single hardware block
448 +representing network interfaces; instead, several separate hardware resources
449 +concur to provide the networking functionality:
450 + - network interfaces
451 + - queues, channels
452 + - buffer pools
453 + - MAC/PHY
454 +
455 +All hardware resources are allocated and configured through the Management
456 +Complex (MC) portals. MC abstracts most of these resources as DPAA2 objects
457 +and exposes ABIs through which they can be configured and controlled. A few
458 +hardware resources, like queues, do not have a corresponding MC object and
459 +are treated as internal resources of other objects.
460 +
461 +For a more detailed description of the DPAA2 architecture and its object
462 +abstractions see:
463 + drivers/staging/fsl-mc/README.txt
464 +
465 +Each Linux net device is built on top of a Datapath Network Interface (DPNI)
466 +object and uses Buffer Pools (DPBPs), I/O Portals (DPIOs) and Concentrators
467 +(DPCONs).
468 +
469 +Configuration interface:
470 +
471 + -----------------------
472 + | DPAA2 Ethernet Driver |
473 + -----------------------
474 + . . .
475 + . . .
476 + . . . . . . . . . . . .
477 + . . .
478 + . . .
479 + ---------- ---------- -----------
480 + | DPBP API | | DPNI API | | DPCON API |
481 + ---------- ---------- -----------
482 + . . . software
483 +=========== . ========== . ============ . ===================
484 + . . . hardware
485 + ------------------------------------------
486 + | MC hardware portals |
487 + ------------------------------------------
488 + . . .
489 + . . .
490 + ------ ------ -------
491 + | DPBP | | DPNI | | DPCON |
492 + ------ ------ -------
493 +
494 +The DPNIs are network interfaces without a direct one-on-one mapping to PHYs.
495 +DPBPs represent hardware buffer pools. Packet I/O is performed in the context
496 +of DPCON objects, using DPIO portals for managing and communicating with the
497 +hardware resources.
498 +
499 +Datapath (I/O) interface:
500 +
501 + -----------------------------------------------
502 + | DPAA2 Ethernet Driver |
503 + -----------------------------------------------
504 + | ^ ^ | |
505 + | | | | |
506 + enqueue| dequeue| data | dequeue| seed |
507 + (Tx) | (Rx, TxC)| avail.| request| buffers|
508 + | | notify| | |
509 + | | | | |
510 + V | | V V
511 + -----------------------------------------------
512 + | DPIO Driver |
513 + -----------------------------------------------
514 + | | | | | software
515 + | | | | | ================
516 + | | | | | hardware
517 + -----------------------------------------------
518 + | I/O hardware portals |
519 + -----------------------------------------------
520 + | ^ ^ | |
521 + | | | | |
522 + | | | V |
523 + V | ================ V
524 + ---------------------- | -------------
525 + queues ---------------------- | | Buffer pool |
526 + ---------------------- | -------------
527 + =======================
528 + Channel
529 +
530 +Datapath I/O (DPIO) portals provide enqueue and dequeue services, data
531 +availability notifications and buffer pool management. DPIOs are shared between
532 +all DPAA2 objects (and implicitly all DPAA2 kernel drivers) that work with data
533 +frames, but must be affine to the CPUs for the purpose of traffic distribution.
534 +
535 +Frames are transmitted and received through hardware frame queues, which can be
536 +grouped in channels for the purpose of hardware scheduling. The Ethernet driver
537 +enqueues TX frames on egress queues and after transmission is complete a TX
538 +confirmation frame is sent back to the CPU.
539 +
540 +When frames are available on ingress queues, a data availability notification
541 +is sent to the CPU; notifications are raised per channel, so even if multiple
542 +queues in the same channel have available frames, only one notification is sent.
543 +After a channel fires a notification, is must be explicitly rearmed.
544 +
545 +Each network interface can have multiple Rx, Tx and confirmation queues affined
546 +to CPUs, and one channel (DPCON) for each CPU that services at least one queue.
547 +DPCONs are used to distribute ingress traffic to different CPUs via the cores'
548 +affine DPIOs.
549 +
550 +The role of hardware buffer pools is storage of ingress frame data. Each network
551 +interface has a privately owned buffer pool which it seeds with kernel allocated
552 +buffers.
553 +
554 +
555 +DPNIs are decoupled from PHYs; a DPNI can be connected to a PHY through a DPMAC
556 +object or to another DPNI through an internal link, but the connection is
557 +managed by MC and completely transparent to the Ethernet driver.
558 +
559 + --------- --------- ---------
560 + | eth if1 | | eth if2 | | eth ifn |
561 + --------- --------- ---------
562 + . . .
563 + . . .
564 + . . .
565 + ---------------------------
566 + | DPAA2 Ethernet Driver |
567 + ---------------------------
568 + . . .
569 + . . .
570 + . . .
571 + ------ ------ ------ -------
572 + | DPNI | | DPNI | | DPNI | | DPMAC |----+
573 + ------ ------ ------ ------- |
574 + | | | | |
575 + | | | | -----
576 + =========== ================== | PHY |
577 + -----
578 +
579 +Creating a Network Interface
580 +============================
581 +A net device is created for each DPNI object probed on the MC bus. Each DPNI has
582 +a number of properties which determine the network interface configuration
583 +options and associated hardware resources.
584 +
585 +DPNI objects (and the other DPAA2 objects needed for a network interface) can be
586 +added to a container on the MC bus in one of two ways: statically, through a
587 +Datapath Layout Binary file (DPL) that is parsed by MC at boot time; or created
588 +dynamically at runtime, via the DPAA2 objects APIs.
589 +
590 +
591 +Features & Offloads
592 +===================
593 +Hardware checksum offloading is supported for TCP and UDP over IPv4/6 frames.
594 +The checksum offloads can be independently configured on RX and TX through
595 +ethtool.
596 +
597 +Hardware offload of unicast and multicast MAC filtering is supported on the
598 +ingress path and permanently enabled.
599 +
600 +Scatter-gather frames are supported on both RX and TX paths. On TX, SG support
601 +is configurable via ethtool; on RX it is always enabled.
602 +
603 +The DPAA2 hardware can process jumbo Ethernet frames of up to 10K bytes.
604 +
605 +The Ethernet driver defines a static flow hashing scheme that distributes
606 +traffic based on a 5-tuple key: src IP, dst IP, IP proto, L4 src port,
607 +L4 dst port. No user configuration is supported for now.
608 +
609 +Hardware specific statistics for the network interface as well as some
610 +non-standard driver stats can be consulted through ethtool -S option.
611 diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
612 new file mode 100644
613 index 00000000..445c5d17
614 --- /dev/null
615 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
616 @@ -0,0 +1,350 @@
617 +
618 +/* Copyright 2015 Freescale Semiconductor Inc.
619 + *
620 + * Redistribution and use in source and binary forms, with or without
621 + * modification, are permitted provided that the following conditions are met:
622 + * * Redistributions of source code must retain the above copyright
623 + * notice, this list of conditions and the following disclaimer.
624 + * * Redistributions in binary form must reproduce the above copyright
625 + * notice, this list of conditions and the following disclaimer in the
626 + * documentation and/or other materials provided with the distribution.
627 + * * Neither the name of Freescale Semiconductor nor the
628 + * names of its contributors may be used to endorse or promote products
629 + * derived from this software without specific prior written permission.
630 + *
631 + *
632 + * ALTERNATIVELY, this software may be distributed under the terms of the
633 + * GNU General Public License ("GPL") as published by the Free Software
634 + * Foundation, either version 2 of that License or (at your option) any
635 + * later version.
636 + *
637 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
638 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
639 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
640 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
641 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
642 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
643 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
644 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
645 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
646 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
647 + */
648 +
649 +#include <linux/module.h>
650 +#include <linux/debugfs.h>
651 +#include "dpaa2-eth.h"
652 +#include "dpaa2-eth-debugfs.h"
653 +
654 +#define DPAA2_ETH_DBG_ROOT "dpaa2-eth"
655 +
656 +static struct dentry *dpaa2_dbg_root;
657 +
658 +static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset)
659 +{
660 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
661 + struct rtnl_link_stats64 *stats;
662 + struct dpaa2_eth_drv_stats *extras;
663 + int i;
664 +
665 + seq_printf(file, "Per-CPU stats for %s\n", priv->net_dev->name);
666 + seq_printf(file, "%s%16s%16s%16s%16s%16s%16s%16s%16s\n",
667 + "CPU", "Rx", "Rx Err", "Rx SG", "Tx", "Tx Err", "Tx conf",
668 + "Tx SG", "Enq busy");
669 +
670 + for_each_online_cpu(i) {
671 + stats = per_cpu_ptr(priv->percpu_stats, i);
672 + extras = per_cpu_ptr(priv->percpu_extras, i);
673 + seq_printf(file, "%3d%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu\n",
674 + i,
675 + stats->rx_packets,
676 + stats->rx_errors,
677 + extras->rx_sg_frames,
678 + stats->tx_packets,
679 + stats->tx_errors,
680 + extras->tx_conf_frames,
681 + extras->tx_sg_frames,
682 + extras->tx_portal_busy);
683 + }
684 +
685 + return 0;
686 +}
687 +
688 +static int dpaa2_dbg_cpu_open(struct inode *inode, struct file *file)
689 +{
690 + int err;
691 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
692 +
693 + err = single_open(file, dpaa2_dbg_cpu_show, priv);
694 + if (err < 0)
695 + netdev_err(priv->net_dev, "single_open() failed\n");
696 +
697 + return err;
698 +}
699 +
700 +static const struct file_operations dpaa2_dbg_cpu_ops = {
701 + .open = dpaa2_dbg_cpu_open,
702 + .read = seq_read,
703 + .llseek = seq_lseek,
704 + .release = single_release,
705 +};
706 +
707 +static char *fq_type_to_str(struct dpaa2_eth_fq *fq)
708 +{
709 + switch (fq->type) {
710 + case DPAA2_RX_FQ:
711 + return "Rx";
712 + case DPAA2_TX_CONF_FQ:
713 + return "Tx conf";
714 + case DPAA2_RX_ERR_FQ:
715 + return "Rx err";
716 + default:
717 + return "N/A";
718 + }
719 +}
720 +
721 +static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset)
722 +{
723 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
724 + struct dpaa2_eth_fq *fq;
725 + u32 fcnt, bcnt;
726 + int i, err;
727 +
728 + seq_printf(file, "FQ stats for %s:\n", priv->net_dev->name);
729 + seq_printf(file, "%s%16s%16s%16s%16s%16s\n",
730 + "VFQID", "CPU", "Type", "Frames", "Pending frames",
731 + "Congestion");
732 +
733 + for (i = 0; i < priv->num_fqs; i++) {
734 + fq = &priv->fq[i];
735 + err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
736 + if (err)
737 + fcnt = 0;
738 +
739 + seq_printf(file, "%5d%16d%16s%16llu%16u%16llu\n",
740 + fq->fqid,
741 + fq->target_cpu,
742 + fq_type_to_str(fq),
743 + fq->stats.frames,
744 + fcnt,
745 + fq->stats.congestion_entry);
746 + }
747 +
748 + return 0;
749 +}
750 +
751 +static int dpaa2_dbg_fqs_open(struct inode *inode, struct file *file)
752 +{
753 + int err;
754 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
755 +
756 + err = single_open(file, dpaa2_dbg_fqs_show, priv);
757 + if (err < 0)
758 + netdev_err(priv->net_dev, "single_open() failed\n");
759 +
760 + return err;
761 +}
762 +
763 +static const struct file_operations dpaa2_dbg_fq_ops = {
764 + .open = dpaa2_dbg_fqs_open,
765 + .read = seq_read,
766 + .llseek = seq_lseek,
767 + .release = single_release,
768 +};
769 +
770 +static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset)
771 +{
772 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
773 + struct dpaa2_eth_channel *ch;
774 + int i;
775 +
776 + seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name);
777 + seq_printf(file, "%s%16s%16s%16s%16s%16s\n",
778 + "CHID", "CPU", "Deq busy", "Frames", "CDANs",
779 + "Avg frm/CDAN");
780 +
781 + for (i = 0; i < priv->num_channels; i++) {
782 + ch = priv->channel[i];
783 + seq_printf(file, "%4d%16d%16llu%16llu%16llu%16llu\n",
784 + ch->ch_id,
785 + ch->nctx.desired_cpu,
786 + ch->stats.dequeue_portal_busy,
787 + ch->stats.frames,
788 + ch->stats.cdan,
789 + ch->stats.frames / ch->stats.cdan);
790 + }
791 +
792 + return 0;
793 +}
794 +
795 +static int dpaa2_dbg_ch_open(struct inode *inode, struct file *file)
796 +{
797 + int err;
798 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
799 +
800 + err = single_open(file, dpaa2_dbg_ch_show, priv);
801 + if (err < 0)
802 + netdev_err(priv->net_dev, "single_open() failed\n");
803 +
804 + return err;
805 +}
806 +
807 +static const struct file_operations dpaa2_dbg_ch_ops = {
808 + .open = dpaa2_dbg_ch_open,
809 + .read = seq_read,
810 + .llseek = seq_lseek,
811 + .release = single_release,
812 +};
813 +
814 +static ssize_t dpaa2_dbg_reset_write(struct file *file, const char __user *buf,
815 + size_t count, loff_t *offset)
816 +{
817 + struct dpaa2_eth_priv *priv = file->private_data;
818 + struct rtnl_link_stats64 *percpu_stats;
819 + struct dpaa2_eth_drv_stats *percpu_extras;
820 + struct dpaa2_eth_fq *fq;
821 + struct dpaa2_eth_channel *ch;
822 + int i;
823 +
824 + for_each_online_cpu(i) {
825 + percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
826 + memset(percpu_stats, 0, sizeof(*percpu_stats));
827 +
828 + percpu_extras = per_cpu_ptr(priv->percpu_extras, i);
829 + memset(percpu_extras, 0, sizeof(*percpu_extras));
830 + }
831 +
832 + for (i = 0; i < priv->num_fqs; i++) {
833 + fq = &priv->fq[i];
834 + memset(&fq->stats, 0, sizeof(fq->stats));
835 + }
836 +
837 + for (i = 0; i < priv->num_channels; i++) {
838 + ch = priv->channel[i];
839 + memset(&ch->stats, 0, sizeof(ch->stats));
840 + }
841 +
842 + return count;
843 +}
844 +
845 +static const struct file_operations dpaa2_dbg_reset_ops = {
846 + .open = simple_open,
847 + .write = dpaa2_dbg_reset_write,
848 +};
849 +
850 +static ssize_t dpaa2_dbg_reset_mc_write(struct file *file,
851 + const char __user *buf,
852 + size_t count, loff_t *offset)
853 +{
854 + struct dpaa2_eth_priv *priv = file->private_data;
855 + int err;
856 +
857 + err = dpni_reset_statistics(priv->mc_io, 0, priv->mc_token);
858 + if (err)
859 + netdev_err(priv->net_dev,
860 + "dpni_reset_statistics() failed %d\n", err);
861 +
862 + return count;
863 +}
864 +
865 +static const struct file_operations dpaa2_dbg_reset_mc_ops = {
866 + .open = simple_open,
867 + .write = dpaa2_dbg_reset_mc_write,
868 +};
869 +
870 +void dpaa2_dbg_add(struct dpaa2_eth_priv *priv)
871 +{
872 + if (!dpaa2_dbg_root)
873 + return;
874 +
875 + /* Create a directory for the interface */
876 + priv->dbg.dir = debugfs_create_dir(priv->net_dev->name,
877 + dpaa2_dbg_root);
878 + if (!priv->dbg.dir) {
879 + netdev_err(priv->net_dev, "debugfs_create_dir() failed\n");
880 + return;
881 + }
882 +
883 + /* per-cpu stats file */
884 + priv->dbg.cpu_stats = debugfs_create_file("cpu_stats", 0444,
885 + priv->dbg.dir, priv,
886 + &dpaa2_dbg_cpu_ops);
887 + if (!priv->dbg.cpu_stats) {
888 + netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
889 + goto err_cpu_stats;
890 + }
891 +
892 + /* per-fq stats file */
893 + priv->dbg.fq_stats = debugfs_create_file("fq_stats", 0444,
894 + priv->dbg.dir, priv,
895 + &dpaa2_dbg_fq_ops);
896 + if (!priv->dbg.fq_stats) {
897 + netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
898 + goto err_fq_stats;
899 + }
900 +
901 + /* per-fq stats file */
902 + priv->dbg.ch_stats = debugfs_create_file("ch_stats", 0444,
903 + priv->dbg.dir, priv,
904 + &dpaa2_dbg_ch_ops);
905 + if (!priv->dbg.fq_stats) {
906 + netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
907 + goto err_ch_stats;
908 + }
909 +
910 + /* reset stats */
911 + priv->dbg.reset_stats = debugfs_create_file("reset_stats", 0200,
912 + priv->dbg.dir, priv,
913 + &dpaa2_dbg_reset_ops);
914 + if (!priv->dbg.reset_stats) {
915 + netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
916 + goto err_reset_stats;
917 + }
918 +
919 + /* reset MC stats */
920 + priv->dbg.reset_mc_stats = debugfs_create_file("reset_mc_stats",
921 + 0222, priv->dbg.dir, priv,
922 + &dpaa2_dbg_reset_mc_ops);
923 + if (!priv->dbg.reset_mc_stats) {
924 + netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
925 + goto err_reset_mc_stats;
926 + }
927 +
928 + return;
929 +
930 +err_reset_mc_stats:
931 + debugfs_remove(priv->dbg.reset_stats);
932 +err_reset_stats:
933 + debugfs_remove(priv->dbg.ch_stats);
934 +err_ch_stats:
935 + debugfs_remove(priv->dbg.fq_stats);
936 +err_fq_stats:
937 + debugfs_remove(priv->dbg.cpu_stats);
938 +err_cpu_stats:
939 + debugfs_remove(priv->dbg.dir);
940 +}
941 +
942 +void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv)
943 +{
944 + debugfs_remove(priv->dbg.reset_mc_stats);
945 + debugfs_remove(priv->dbg.reset_stats);
946 + debugfs_remove(priv->dbg.fq_stats);
947 + debugfs_remove(priv->dbg.ch_stats);
948 + debugfs_remove(priv->dbg.cpu_stats);
949 + debugfs_remove(priv->dbg.dir);
950 +}
951 +
952 +void dpaa2_eth_dbg_init(void)
953 +{
954 + dpaa2_dbg_root = debugfs_create_dir(DPAA2_ETH_DBG_ROOT, NULL);
955 + if (!dpaa2_dbg_root) {
956 + pr_err("DPAA2-ETH: debugfs create failed\n");
957 + return;
958 + }
959 +
960 + pr_info("DPAA2-ETH: debugfs created\n");
961 +}
962 +
963 +void __exit dpaa2_eth_dbg_exit(void)
964 +{
965 + debugfs_remove(dpaa2_dbg_root);
966 +}
967 diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h
968 new file mode 100644
969 index 00000000..551e6c4c
970 --- /dev/null
971 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h
972 @@ -0,0 +1,60 @@
973 +/* Copyright 2015 Freescale Semiconductor Inc.
974 + *
975 + * Redistribution and use in source and binary forms, with or without
976 + * modification, are permitted provided that the following conditions are met:
977 + * * Redistributions of source code must retain the above copyright
978 + * notice, this list of conditions and the following disclaimer.
979 + * * Redistributions in binary form must reproduce the above copyright
980 + * notice, this list of conditions and the following disclaimer in the
981 + * documentation and/or other materials provided with the distribution.
982 + * * Neither the name of Freescale Semiconductor nor the
983 + * names of its contributors may be used to endorse or promote products
984 + * derived from this software without specific prior written permission.
985 + *
986 + *
987 + * ALTERNATIVELY, this software may be distributed under the terms of the
988 + * GNU General Public License ("GPL") as published by the Free Software
989 + * Foundation, either version 2 of that License or (at your option) any
990 + * later version.
991 + *
992 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
993 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
994 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
995 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
996 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
997 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
998 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
999 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1000 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1001 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1002 + */
1003 +
1004 +#ifndef DPAA2_ETH_DEBUGFS_H
1005 +#define DPAA2_ETH_DEBUGFS_H
1006 +
1007 +#include <linux/dcache.h>
1008 +
1009 +struct dpaa2_eth_priv;
1010 +
1011 +struct dpaa2_debugfs {
1012 + struct dentry *dir;
1013 + struct dentry *fq_stats;
1014 + struct dentry *ch_stats;
1015 + struct dentry *cpu_stats;
1016 + struct dentry *reset_stats;
1017 + struct dentry *reset_mc_stats;
1018 +};
1019 +
1020 +#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
1021 +void dpaa2_eth_dbg_init(void);
1022 +void dpaa2_eth_dbg_exit(void);
1023 +void dpaa2_dbg_add(struct dpaa2_eth_priv *priv);
1024 +void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv);
1025 +#else
1026 +static inline void dpaa2_eth_dbg_init(void) {}
1027 +static inline void dpaa2_eth_dbg_exit(void) {}
1028 +static inline void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) {}
1029 +static inline void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) {}
1030 +#endif /* CONFIG_FSL_DPAA2_ETH_DEBUGFS */
1031 +
1032 +#endif /* DPAA2_ETH_DEBUGFS_H */
1033 diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
1034 new file mode 100644
1035 index 00000000..e8e6522a
1036 --- /dev/null
1037 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
1038 @@ -0,0 +1,184 @@
1039 +/* Copyright 2014-2015 Freescale Semiconductor Inc.
1040 + *
1041 + * Redistribution and use in source and binary forms, with or without
1042 + * modification, are permitted provided that the following conditions are met:
1043 + * * Redistributions of source code must retain the above copyright
1044 + * notice, this list of conditions and the following disclaimer.
1045 + * * Redistributions in binary form must reproduce the above copyright
1046 + * notice, this list of conditions and the following disclaimer in the
1047 + * documentation and/or other materials provided with the distribution.
1048 + * * Neither the name of Freescale Semiconductor nor the
1049 + * names of its contributors may be used to endorse or promote products
1050 + * derived from this software without specific prior written permission.
1051 + *
1052 + *
1053 + * ALTERNATIVELY, this software may be distributed under the terms of the
1054 + * GNU General Public License ("GPL") as published by the Free Software
1055 + * Foundation, either version 2 of that License or (at your option) any
1056 + * later version.
1057 + *
1058 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
1059 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1060 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1061 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
1062 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1063 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1064 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1065 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1066 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1067 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1068 + */
1069 +
1070 +#undef TRACE_SYSTEM
1071 +#define TRACE_SYSTEM dpaa2_eth
1072 +
1073 +#if !defined(_DPAA2_ETH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
1074 +#define _DPAA2_ETH_TRACE_H
1075 +
1076 +#include <linux/skbuff.h>
1077 +#include <linux/netdevice.h>
1078 +#include <linux/tracepoint.h>
1079 +
1080 +#define TR_FMT "[%s] fd: addr=0x%llx, len=%u, off=%u"
1081 +/* trace_printk format for raw buffer event class */
1082 +#define TR_BUF_FMT "[%s] vaddr=%p size=%zu dma_addr=%pad map_size=%zu bpid=%d"
1083 +
1084 +/* This is used to declare a class of events.
1085 + * individual events of this type will be defined below.
1086 + */
1087 +
1088 +/* Store details about a frame descriptor */
1089 +DECLARE_EVENT_CLASS(dpaa2_eth_fd,
1090 + /* Trace function prototype */
1091 + TP_PROTO(struct net_device *netdev,
1092 + const struct dpaa2_fd *fd),
1093 +
1094 + /* Repeat argument list here */
1095 + TP_ARGS(netdev, fd),
1096 +
1097 + /* A structure containing the relevant information we want
1098 + * to record. Declare name and type for each normal element,
1099 + * name, type and size for arrays. Use __string for variable
1100 + * length strings.
1101 + */
1102 + TP_STRUCT__entry(
1103 + __field(u64, fd_addr)
1104 + __field(u32, fd_len)
1105 + __field(u16, fd_offset)
1106 + __string(name, netdev->name)
1107 + ),
1108 +
1109 + /* The function that assigns values to the above declared
1110 + * fields
1111 + */
1112 + TP_fast_assign(
1113 + __entry->fd_addr = dpaa2_fd_get_addr(fd);
1114 + __entry->fd_len = dpaa2_fd_get_len(fd);
1115 + __entry->fd_offset = dpaa2_fd_get_offset(fd);
1116 + __assign_str(name, netdev->name);
1117 + ),
1118 +
1119 + /* This is what gets printed when the trace event is
1120 + * triggered.
1121 + */
1122 + TP_printk(TR_FMT,
1123 + __get_str(name),
1124 + __entry->fd_addr,
1125 + __entry->fd_len,
1126 + __entry->fd_offset)
1127 +);
1128 +
1129 +/* Now declare events of the above type. Format is:
1130 + * DEFINE_EVENT(class, name, proto, args), with proto and args same as for class
1131 + */
1132 +
1133 +/* Tx (egress) fd */
1134 +DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_fd,
1135 + TP_PROTO(struct net_device *netdev,
1136 + const struct dpaa2_fd *fd),
1137 +
1138 + TP_ARGS(netdev, fd)
1139 +);
1140 +
1141 +/* Rx fd */
1142 +DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_fd,
1143 + TP_PROTO(struct net_device *netdev,
1144 + const struct dpaa2_fd *fd),
1145 +
1146 + TP_ARGS(netdev, fd)
1147 +);
1148 +
1149 +/* Tx confirmation fd */
1150 +DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_conf_fd,
1151 + TP_PROTO(struct net_device *netdev,
1152 + const struct dpaa2_fd *fd),
1153 +
1154 + TP_ARGS(netdev, fd)
1155 +);
1156 +
1157 +/* Log data about raw buffers. Useful for tracing DPBP content. */
1158 +TRACE_EVENT(dpaa2_eth_buf_seed,
1159 + /* Trace function prototype */
1160 + TP_PROTO(struct net_device *netdev,
1161 + /* virtual address and size */
1162 + void *vaddr,
1163 + size_t size,
1164 + /* dma map address and size */
1165 + dma_addr_t dma_addr,
1166 + size_t map_size,
1167 + /* buffer pool id, if relevant */
1168 + u16 bpid),
1169 +
1170 + /* Repeat argument list here */
1171 + TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid),
1172 +
1173 + /* A structure containing the relevant information we want
1174 + * to record. Declare name and type for each normal element,
1175 + * name, type and size for arrays. Use __string for variable
1176 + * length strings.
1177 + */
1178 + TP_STRUCT__entry(
1179 + __field(void *, vaddr)
1180 + __field(size_t, size)
1181 + __field(dma_addr_t, dma_addr)
1182 + __field(size_t, map_size)
1183 + __field(u16, bpid)
1184 + __string(name, netdev->name)
1185 + ),
1186 +
1187 + /* The function that assigns values to the above declared
1188 + * fields
1189 + */
1190 + TP_fast_assign(
1191 + __entry->vaddr = vaddr;
1192 + __entry->size = size;
1193 + __entry->dma_addr = dma_addr;
1194 + __entry->map_size = map_size;
1195 + __entry->bpid = bpid;
1196 + __assign_str(name, netdev->name);
1197 + ),
1198 +
1199 + /* This is what gets printed when the trace event is
1200 + * triggered.
1201 + */
1202 + TP_printk(TR_BUF_FMT,
1203 + __get_str(name),
1204 + __entry->vaddr,
1205 + __entry->size,
1206 + &__entry->dma_addr,
1207 + __entry->map_size,
1208 + __entry->bpid)
1209 +);
1210 +
1211 +/* If only one event of a certain type needs to be declared, use TRACE_EVENT().
1212 + * The syntax is the same as for DECLARE_EVENT_CLASS().
1213 + */
1214 +
1215 +#endif /* _DPAA2_ETH_TRACE_H */
1216 +
1217 +/* This must be outside ifdef _DPAA2_ETH_TRACE_H */
1218 +#undef TRACE_INCLUDE_PATH
1219 +#define TRACE_INCLUDE_PATH .
1220 +#undef TRACE_INCLUDE_FILE
1221 +#define TRACE_INCLUDE_FILE dpaa2-eth-trace
1222 +#include <trace/define_trace.h>
1223 diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
1224 new file mode 100644
1225 index 00000000..452eca52
1226 --- /dev/null
1227 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
1228 @@ -0,0 +1,3155 @@
1229 +/* Copyright 2014-2015 Freescale Semiconductor Inc.
1230 + *
1231 + * Redistribution and use in source and binary forms, with or without
1232 + * modification, are permitted provided that the following conditions are met:
1233 + * * Redistributions of source code must retain the above copyright
1234 + * notice, this list of conditions and the following disclaimer.
1235 + * * Redistributions in binary form must reproduce the above copyright
1236 + * notice, this list of conditions and the following disclaimer in the
1237 + * documentation and/or other materials provided with the distribution.
1238 + * * Neither the name of Freescale Semiconductor nor the
1239 + * names of its contributors may be used to endorse or promote products
1240 + * derived from this software without specific prior written permission.
1241 + *
1242 + *
1243 + * ALTERNATIVELY, this software may be distributed under the terms of the
1244 + * GNU General Public License ("GPL") as published by the Free Software
1245 + * Foundation, either version 2 of that License or (at your option) any
1246 + * later version.
1247 + *
1248 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
1249 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1250 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1251 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
1252 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1253 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1254 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1255 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1256 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1257 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1258 + */
1259 +#include <linux/init.h>
1260 +#include <linux/module.h>
1261 +#include <linux/platform_device.h>
1262 +#include <linux/etherdevice.h>
1263 +#include <linux/of_net.h>
1264 +#include <linux/interrupt.h>
1265 +#include <linux/debugfs.h>
1266 +#include <linux/kthread.h>
1267 +#include <linux/msi.h>
1268 +#include <linux/net_tstamp.h>
1269 +#include <linux/iommu.h>
1270 +
1271 +#include "../../fsl-mc/include/dpbp.h"
1272 +#include "../../fsl-mc/include/dpcon.h"
1273 +#include "../../fsl-mc/include/mc.h"
1274 +#include "../../fsl-mc/include/mc-sys.h"
1275 +#include "dpaa2-eth.h"
1276 +#include "dpkg.h"
1277 +
1278 +/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
1279 + * using trace events only need to #include <trace/events/sched.h>
1280 + */
1281 +#define CREATE_TRACE_POINTS
1282 +#include "dpaa2-eth-trace.h"
1283 +
1284 +MODULE_LICENSE("Dual BSD/GPL");
1285 +MODULE_AUTHOR("Freescale Semiconductor, Inc");
1286 +MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
1287 +
1288 +const char dpaa2_eth_drv_version[] = "0.1";
1289 +
1290 +void *dpaa2_eth_iova_to_virt(struct iommu_domain *domain, dma_addr_t iova_addr)
1291 +{
1292 + phys_addr_t phys_addr;
1293 +
1294 + phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
1295 +
1296 + return phys_to_virt(phys_addr);
1297 +}
1298 +
1299 +static void validate_rx_csum(struct dpaa2_eth_priv *priv,
1300 + u32 fd_status,
1301 + struct sk_buff *skb)
1302 +{
1303 + skb_checksum_none_assert(skb);
1304 +
1305 + /* HW checksum validation is disabled, nothing to do here */
1306 + if (!(priv->net_dev->features & NETIF_F_RXCSUM))
1307 + return;
1308 +
1309 + /* Read checksum validation bits */
1310 + if (!((fd_status & DPAA2_FAS_L3CV) &&
1311 + (fd_status & DPAA2_FAS_L4CV)))
1312 + return;
1313 +
1314 + /* Inform the stack there's no need to compute L3/L4 csum anymore */
1315 + skb->ip_summed = CHECKSUM_UNNECESSARY;
1316 +}
1317 +
1318 +/* Free a received FD.
1319 + * Not to be used for Tx conf FDs or on any other paths.
1320 + */
1321 +static void free_rx_fd(struct dpaa2_eth_priv *priv,
1322 + const struct dpaa2_fd *fd,
1323 + void *vaddr)
1324 +{
1325 + struct device *dev = priv->net_dev->dev.parent;
1326 + dma_addr_t addr = dpaa2_fd_get_addr(fd);
1327 + u8 fd_format = dpaa2_fd_get_format(fd);
1328 + struct dpaa2_sg_entry *sgt;
1329 + void *sg_vaddr;
1330 + int i;
1331 +
1332 + /* If single buffer frame, just free the data buffer */
1333 + if (fd_format == dpaa2_fd_single)
1334 + goto free_buf;
1335 + else if (fd_format != dpaa2_fd_sg)
1336 + /* we don't support any other format */
1337 + return;
1338 +
1339 + /* For S/G frames, we first need to free all SG entries */
1340 + sgt = vaddr + dpaa2_fd_get_offset(fd);
1341 + for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
1342 + addr = dpaa2_sg_get_addr(&sgt[i]);
1343 + sg_vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, addr);
1344 +
1345 + dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
1346 + DMA_FROM_DEVICE);
1347 +
1348 + put_page(virt_to_head_page(sg_vaddr));
1349 +
1350 + if (dpaa2_sg_is_final(&sgt[i]))
1351 + break;
1352 + }
1353 +
1354 +free_buf:
1355 + put_page(virt_to_head_page(vaddr));
1356 +}
1357 +
1358 +/* Build a linear skb based on a single-buffer frame descriptor */
1359 +static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv,
1360 + struct dpaa2_eth_channel *ch,
1361 + const struct dpaa2_fd *fd,
1362 + void *fd_vaddr)
1363 +{
1364 + struct sk_buff *skb = NULL;
1365 + u16 fd_offset = dpaa2_fd_get_offset(fd);
1366 + u32 fd_length = dpaa2_fd_get_len(fd);
1367 +
1368 + skb = build_skb(fd_vaddr, DPAA2_ETH_SKB_SIZE);
1369 + if (unlikely(!skb))
1370 + return NULL;
1371 +
1372 + skb_reserve(skb, fd_offset);
1373 + skb_put(skb, fd_length);
1374 +
1375 + ch->buf_count--;
1376 +
1377 + return skb;
1378 +}
1379 +
1380 +/* Build a non linear (fragmented) skb based on a S/G table */
1381 +static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
1382 + struct dpaa2_eth_channel *ch,
1383 + struct dpaa2_sg_entry *sgt)
1384 +{
1385 + struct sk_buff *skb = NULL;
1386 + struct device *dev = priv->net_dev->dev.parent;
1387 + void *sg_vaddr;
1388 + dma_addr_t sg_addr;
1389 + u16 sg_offset;
1390 + u32 sg_length;
1391 + struct page *page, *head_page;
1392 + int page_offset;
1393 + int i;
1394 +
1395 + for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
1396 + struct dpaa2_sg_entry *sge = &sgt[i];
1397 +
1398 + /* NOTE: We only support SG entries in dpaa2_sg_single format,
1399 + * but this is the only format we may receive from HW anyway
1400 + */
1401 +
1402 + /* Get the address and length from the S/G entry */
1403 + sg_addr = dpaa2_sg_get_addr(sge);
1404 + sg_vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, sg_addr);
1405 + dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
1406 + DMA_FROM_DEVICE);
1407 +
1408 + sg_length = dpaa2_sg_get_len(sge);
1409 +
1410 + if (i == 0) {
1411 + /* We build the skb around the first data buffer */
1412 + skb = build_skb(sg_vaddr, DPAA2_ETH_SKB_SIZE);
1413 + if (unlikely(!skb))
1414 + return NULL;
1415 +
1416 + sg_offset = dpaa2_sg_get_offset(sge);
1417 + skb_reserve(skb, sg_offset);
1418 + skb_put(skb, sg_length);
1419 + } else {
1420 + /* Rest of the data buffers are stored as skb frags */
1421 + page = virt_to_page(sg_vaddr);
1422 + head_page = virt_to_head_page(sg_vaddr);
1423 +
1424 + /* Offset in page (which may be compound).
1425 + * Data in subsequent SG entries is stored from the
1426 + * beginning of the buffer, so we don't need to add the
1427 + * sg_offset.
1428 + */
1429 + page_offset = ((unsigned long)sg_vaddr &
1430 + (PAGE_SIZE - 1)) +
1431 + (page_address(page) - page_address(head_page));
1432 +
1433 + skb_add_rx_frag(skb, i - 1, head_page, page_offset,
1434 + sg_length, DPAA2_ETH_RX_BUF_SIZE);
1435 + }
1436 +
1437 + if (dpaa2_sg_is_final(sge))
1438 + break;
1439 + }
1440 +
1441 + /* Count all data buffers + SG table buffer */
1442 + ch->buf_count -= i + 2;
1443 +
1444 + return skb;
1445 +}
1446 +
1447 +/* Main Rx frame processing routine */
1448 +static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
1449 + struct dpaa2_eth_channel *ch,
1450 + const struct dpaa2_fd *fd,
1451 + struct napi_struct *napi,
1452 + u16 queue_id)
1453 +{
1454 + dma_addr_t addr = dpaa2_fd_get_addr(fd);
1455 + u8 fd_format = dpaa2_fd_get_format(fd);
1456 + void *vaddr;
1457 + struct sk_buff *skb;
1458 + struct rtnl_link_stats64 *percpu_stats;
1459 + struct dpaa2_eth_drv_stats *percpu_extras;
1460 + struct device *dev = priv->net_dev->dev.parent;
1461 + struct dpaa2_fas *fas;
1462 + void *buf_data;
1463 + u32 status = 0;
1464 +
1465 + /* Tracing point */
1466 + trace_dpaa2_rx_fd(priv->net_dev, fd);
1467 +
1468 + vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, addr);
1469 + dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
1470 +
1471 + /* HWA - FAS, timestamp */
1472 + fas = dpaa2_eth_get_fas(vaddr);
1473 + prefetch(fas);
1474 + /* data / SG table */
1475 + buf_data = vaddr + dpaa2_fd_get_offset(fd);
1476 + prefetch(buf_data);
1477 +
1478 + percpu_stats = this_cpu_ptr(priv->percpu_stats);
1479 + percpu_extras = this_cpu_ptr(priv->percpu_extras);
1480 +
1481 + switch (fd_format) {
1482 + case dpaa2_fd_single:
1483 + skb = build_linear_skb(priv, ch, fd, vaddr);
1484 + break;
1485 + case dpaa2_fd_sg:
1486 + skb = build_frag_skb(priv, ch, buf_data);
1487 + put_page(virt_to_head_page(vaddr));
1488 + percpu_extras->rx_sg_frames++;
1489 + percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
1490 + break;
1491 + default:
1492 + /* We don't support any other format */
1493 + goto err_frame_format;
1494 + }
1495 +
1496 + if (unlikely(!skb))
1497 + goto err_build_skb;
1498 +
1499 + prefetch(skb->data);
1500 +
1501 + /* Get the timestamp value */
1502 + if (priv->ts_rx_en) {
1503 + struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
1504 + u64 *ns = (u64 *)dpaa2_eth_get_ts(vaddr);
1505 +
1506 + *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ns);
1507 + memset(shhwtstamps, 0, sizeof(*shhwtstamps));
1508 + shhwtstamps->hwtstamp = ns_to_ktime(*ns);
1509 + }
1510 +
1511 + /* Check if we need to validate the L4 csum */
1512 + if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
1513 + status = le32_to_cpu(fas->status);
1514 + validate_rx_csum(priv, status, skb);
1515 + }
1516 +
1517 + skb->protocol = eth_type_trans(skb, priv->net_dev);
1518 +
1519 + /* Record Rx queue - this will be used when picking a Tx queue to
1520 + * forward the frames. We're keeping flow affinity through the
1521 + * network stack.
1522 + */
1523 + skb_record_rx_queue(skb, queue_id);
1524 +
1525 + percpu_stats->rx_packets++;
1526 + percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
1527 +
1528 + napi_gro_receive(napi, skb);
1529 +
1530 + return;
1531 +
1532 +err_build_skb:
1533 + free_rx_fd(priv, fd, vaddr);
1534 +err_frame_format:
1535 + percpu_stats->rx_dropped++;
1536 +}
1537 +
1538 +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
1539 +/* Processing of Rx frames received on the error FQ
1540 + * We check and print the error bits and then free the frame
1541 + */
1542 +static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv,
1543 + struct dpaa2_eth_channel *ch,
1544 + const struct dpaa2_fd *fd,
1545 + struct napi_struct *napi __always_unused,
1546 + u16 queue_id __always_unused)
1547 +{
1548 + struct device *dev = priv->net_dev->dev.parent;
1549 + dma_addr_t addr = dpaa2_fd_get_addr(fd);
1550 + void *vaddr;
1551 + struct rtnl_link_stats64 *percpu_stats;
1552 + struct dpaa2_fas *fas;
1553 + u32 status = 0;
1554 + bool check_fas_errors = false;
1555 +
1556 + vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, addr);
1557 + dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
1558 +
1559 + /* check frame errors in the FD field */
1560 + if (fd->simple.ctrl & DPAA2_FD_RX_ERR_MASK) {
1561 + check_fas_errors = !!(fd->simple.ctrl & FD_CTRL_FAERR) &&
1562 + !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV);
1563 + if (net_ratelimit())
1564 + netdev_dbg(priv->net_dev, "Rx frame FD err: %x08\n",
1565 + fd->simple.ctrl & DPAA2_FD_RX_ERR_MASK);
1566 + }
1567 +
1568 + /* check frame errors in the FAS field */
1569 + if (check_fas_errors) {
1570 + fas = dpaa2_eth_get_fas(vaddr);
1571 + status = le32_to_cpu(fas->status);
1572 + if (net_ratelimit())
1573 + netdev_dbg(priv->net_dev, "Rx frame FAS err: 0x%08x\n",
1574 + status & DPAA2_FAS_RX_ERR_MASK);
1575 + }
1576 + free_rx_fd(priv, fd, vaddr);
1577 +
1578 + percpu_stats = this_cpu_ptr(priv->percpu_stats);
1579 + percpu_stats->rx_errors++;
1580 +}
1581 +#endif
1582 +
1583 +/* Consume all frames pull-dequeued into the store. This is the simplest way to
1584 + * make sure we don't accidentally issue another volatile dequeue which would
1585 + * overwrite (leak) frames already in the store.
1586 + *
1587 + * The number of frames is returned using the last 2 output arguments,
1588 + * separately for Rx and Tx confirmations.
1589 + *
1590 + * Observance of NAPI budget is not our concern, leaving that to the caller.
1591 + */
1592 +static bool consume_frames(struct dpaa2_eth_channel *ch, int *rx_cleaned,
1593 + int *tx_conf_cleaned)
1594 +{
1595 + struct dpaa2_eth_priv *priv = ch->priv;
1596 + struct dpaa2_eth_fq *fq = NULL;
1597 + struct dpaa2_dq *dq;
1598 + const struct dpaa2_fd *fd;
1599 + int cleaned = 0;
1600 + int is_last;
1601 +
1602 + do {
1603 + dq = dpaa2_io_store_next(ch->store, &is_last);
1604 + if (unlikely(!dq)) {
1605 + /* If we're here, we *must* have placed a
1606 + * volatile dequeue comnmand, so keep reading through
1607 + * the store until we get some sort of valid response
1608 + * token (either a valid frame or an "empty dequeue")
1609 + */
1610 + continue;
1611 + }
1612 +
1613 + fd = dpaa2_dq_fd(dq);
1614 +
1615 + /* prefetch the frame descriptor */
1616 + prefetch(fd);
1617 +
1618 + fq = (struct dpaa2_eth_fq *)dpaa2_dq_fqd_ctx(dq);
1619 + fq->consume(priv, ch, fd, &ch->napi, fq->flowid);
1620 + cleaned++;
1621 + } while (!is_last);
1622 +
1623 + if (!cleaned)
1624 + return false;
1625 +
1626 + /* All frames brought in store by a volatile dequeue
1627 + * come from the same queue
1628 + */
1629 + if (fq->type == DPAA2_TX_CONF_FQ)
1630 + *tx_conf_cleaned += cleaned;
1631 + else
1632 + *rx_cleaned += cleaned;
1633 +
1634 + fq->stats.frames += cleaned;
1635 + ch->stats.frames += cleaned;
1636 +
1637 + return true;
1638 +}
1639 +
1640 +/* Configure the egress frame annotation for timestamp update */
1641 +static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start)
1642 +{
1643 + struct dpaa2_faead *faead;
1644 + u32 ctrl;
1645 + u32 frc;
1646 +
1647 + /* Mark the egress frame annotation area as valid */
1648 + frc = dpaa2_fd_get_frc(fd);
1649 + dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
1650 +
1651 + /* enable UPD (update prepanded data) bit in FAEAD field of
1652 + * hardware frame annotation area
1653 + */
1654 + ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
1655 + faead = dpaa2_eth_get_faead(buf_start);
1656 + faead->ctrl = cpu_to_le32(ctrl);
1657 +}
1658 +
1659 +/* Create a frame descriptor based on a fragmented skb */
1660 +static int build_sg_fd(struct dpaa2_eth_priv *priv,
1661 + struct sk_buff *skb,
1662 + struct dpaa2_fd *fd)
1663 +{
1664 + struct device *dev = priv->net_dev->dev.parent;
1665 + void *sgt_buf = NULL;
1666 + dma_addr_t addr;
1667 + int nr_frags = skb_shinfo(skb)->nr_frags;
1668 + struct dpaa2_sg_entry *sgt;
1669 + int i, err;
1670 + int sgt_buf_size;
1671 + struct scatterlist *scl, *crt_scl;
1672 + int num_sg;
1673 + int num_dma_bufs;
1674 + struct dpaa2_fas *fas;
1675 + struct dpaa2_eth_swa *swa;
1676 +
1677 + /* Create and map scatterlist.
1678 + * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
1679 + * to go beyond nr_frags+1.
1680 + * Note: We don't support chained scatterlists
1681 + */
1682 + if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
1683 + return -EINVAL;
1684 +
1685 + scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
1686 + if (unlikely(!scl))
1687 + return -ENOMEM;
1688 +
1689 + sg_init_table(scl, nr_frags + 1);
1690 + num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
1691 + num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_TO_DEVICE);
1692 + if (unlikely(!num_dma_bufs)) {
1693 + err = -ENOMEM;
1694 + goto dma_map_sg_failed;
1695 + }
1696 +
1697 + /* Prepare the HW SGT structure */
1698 + sgt_buf_size = priv->tx_data_offset +
1699 + sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
1700 + sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN, GFP_ATOMIC);
1701 + if (unlikely(!sgt_buf)) {
1702 + err = -ENOMEM;
1703 + goto sgt_buf_alloc_failed;
1704 + }
1705 + sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
1706 +
1707 + /* PTA from egress side is passed as is to the confirmation side so
1708 + * we need to clear some fields here in order to find consistent values
1709 + * on TX confirmation. We are clearing FAS (Frame Annotation Status)
1710 + * field from the hardware annotation area
1711 + */
1712 + fas = dpaa2_eth_get_fas(sgt_buf);
1713 + memset(fas, 0, DPAA2_FAS_SIZE);
1714 +
1715 + sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
1716 +
1717 + /* Fill in the HW SGT structure.
1718 + *
1719 + * sgt_buf is zeroed out, so the following fields are implicit
1720 + * in all sgt entries:
1721 + * - offset is 0
1722 + * - format is 'dpaa2_sg_single'
1723 + */
1724 + for_each_sg(scl, crt_scl, num_dma_bufs, i) {
1725 + dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
1726 + dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
1727 + }
1728 + dpaa2_sg_set_final(&sgt[i - 1], true);
1729 +
1730 + /* Store the skb backpointer in the SGT buffer.
1731 + * Fit the scatterlist and the number of buffers alongside the
1732 + * skb backpointer in the software annotation area. We'll need
1733 + * all of them on Tx Conf.
1734 + */
1735 + swa = (struct dpaa2_eth_swa *)sgt_buf;
1736 + swa->skb = skb;
1737 + swa->scl = scl;
1738 + swa->num_sg = num_sg;
1739 + swa->num_dma_bufs = num_dma_bufs;
1740 +
1741 + /* Separately map the SGT buffer */
1742 + addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
1743 + if (unlikely(dma_mapping_error(dev, addr))) {
1744 + err = -ENOMEM;
1745 + goto dma_map_single_failed;
1746 + }
1747 + dpaa2_fd_set_offset(fd, priv->tx_data_offset);
1748 + dpaa2_fd_set_format(fd, dpaa2_fd_sg);
1749 + dpaa2_fd_set_addr(fd, addr);
1750 + dpaa2_fd_set_len(fd, skb->len);
1751 +
1752 + fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | FD_CTRL_PTA | FD_CTRL_PTV1;
1753 +
1754 + if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
1755 + enable_tx_tstamp(fd, sgt_buf);
1756 +
1757 + return 0;
1758 +
1759 +dma_map_single_failed:
1760 + kfree(sgt_buf);
1761 +sgt_buf_alloc_failed:
1762 + dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE);
1763 +dma_map_sg_failed:
1764 + kfree(scl);
1765 + return err;
1766 +}
1767 +
1768 +/* Create a frame descriptor based on a linear skb */
1769 +static int build_single_fd(struct dpaa2_eth_priv *priv,
1770 + struct sk_buff *skb,
1771 + struct dpaa2_fd *fd)
1772 +{
1773 + struct device *dev = priv->net_dev->dev.parent;
1774 + u8 *buffer_start;
1775 + struct sk_buff **skbh;
1776 + dma_addr_t addr;
1777 + struct dpaa2_fas *fas;
1778 +
1779 + buffer_start = PTR_ALIGN(skb->data - priv->tx_data_offset -
1780 + DPAA2_ETH_TX_BUF_ALIGN,
1781 + DPAA2_ETH_TX_BUF_ALIGN);
1782 +
1783 + /* PTA from egress side is passed as is to the confirmation side so
1784 + * we need to clear some fields here in order to find consistent values
1785 + * on TX confirmation. We are clearing FAS (Frame Annotation Status)
1786 + * field from the hardware annotation area
1787 + */
1788 + fas = dpaa2_eth_get_fas(buffer_start);
1789 + memset(fas, 0, DPAA2_FAS_SIZE);
1790 +
1791 + /* Store a backpointer to the skb at the beginning of the buffer
1792 + * (in the private data area) such that we can release it
1793 + * on Tx confirm
1794 + */
1795 + skbh = (struct sk_buff **)buffer_start;
1796 + *skbh = skb;
1797 +
1798 + addr = dma_map_single(dev, buffer_start,
1799 + skb_tail_pointer(skb) - buffer_start,
1800 + DMA_BIDIRECTIONAL);
1801 + if (unlikely(dma_mapping_error(dev, addr)))
1802 + return -ENOMEM;
1803 +
1804 + dpaa2_fd_set_addr(fd, addr);
1805 + dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
1806 + dpaa2_fd_set_len(fd, skb->len);
1807 + dpaa2_fd_set_format(fd, dpaa2_fd_single);
1808 +
1809 + fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | FD_CTRL_PTA | FD_CTRL_PTV1;
1810 +
1811 + if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
1812 + enable_tx_tstamp(fd, buffer_start);
1813 +
1814 + return 0;
1815 +}
1816 +
1817 +/* FD freeing routine on the Tx path
1818 + *
1819 + * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
1820 + * back-pointed to is also freed.
1821 + * This can be called either from dpaa2_eth_tx_conf() or on the error path of
1822 + * dpaa2_eth_tx().
1823 + * Optionally, return the frame annotation status word (FAS), which needs
1824 + * to be checked if we're on the confirmation path.
1825 + */
1826 +static void free_tx_fd(const struct dpaa2_eth_priv *priv,
1827 + const struct dpaa2_fd *fd,
1828 + u32 *status)
1829 +{
1830 + struct device *dev = priv->net_dev->dev.parent;
1831 + dma_addr_t fd_addr;
1832 + struct sk_buff **skbh, *skb;
1833 + unsigned char *buffer_start;
1834 + int unmap_size;
1835 + struct scatterlist *scl;
1836 + int num_sg, num_dma_bufs;
1837 + struct dpaa2_eth_swa *swa;
1838 + u8 fd_format = dpaa2_fd_get_format(fd);
1839 + struct dpaa2_fas *fas;
1840 +
1841 + fd_addr = dpaa2_fd_get_addr(fd);
1842 + skbh = dpaa2_eth_iova_to_virt(priv->iommu_domain, fd_addr);
1843 +
1844 + /* HWA - FAS, timestamp (for Tx confirmation frames) */
1845 + fas = dpaa2_eth_get_fas(skbh);
1846 + prefetch(fas);
1847 +
1848 + switch (fd_format) {
1849 + case dpaa2_fd_single:
1850 + skb = *skbh;
1851 + buffer_start = (unsigned char *)skbh;
1852 + /* Accessing the skb buffer is safe before dma unmap, because
1853 + * we didn't map the actual skb shell.
1854 + */
1855 + dma_unmap_single(dev, fd_addr,
1856 + skb_tail_pointer(skb) - buffer_start,
1857 + DMA_BIDIRECTIONAL);
1858 + break;
1859 + case dpaa2_fd_sg:
1860 + swa = (struct dpaa2_eth_swa *)skbh;
1861 + skb = swa->skb;
1862 + scl = swa->scl;
1863 + num_sg = swa->num_sg;
1864 + num_dma_bufs = swa->num_dma_bufs;
1865 +
1866 + /* Unmap the scatterlist */
1867 + dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE);
1868 + kfree(scl);
1869 +
1870 + /* Unmap the SGT buffer */
1871 + unmap_size = priv->tx_data_offset +
1872 + sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
1873 + dma_unmap_single(dev, fd_addr, unmap_size, DMA_BIDIRECTIONAL);
1874 + break;
1875 + default:
1876 + /* Unsupported format, mark it as errored and give up */
1877 + if (status)
1878 + *status = ~0;
1879 + return;
1880 + }
1881 +
1882 + /* Get the timestamp value */
1883 + if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
1884 + struct skb_shared_hwtstamps shhwtstamps;
1885 + u64 *ns;
1886 +
1887 + memset(&shhwtstamps, 0, sizeof(shhwtstamps));
1888 +
1889 + ns = (u64 *)dpaa2_eth_get_ts(skbh);
1890 + *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ns);
1891 + shhwtstamps.hwtstamp = ns_to_ktime(*ns);
1892 + skb_tstamp_tx(skb, &shhwtstamps);
1893 + }
1894 +
1895 + /* Read the status from the Frame Annotation after we unmap the first
1896 + * buffer but before we free it. The caller function is responsible
1897 + * for checking the status value.
1898 + */
1899 + if (status)
1900 + *status = le32_to_cpu(fas->status);
1901 +
1902 + /* Free SGT buffer kmalloc'ed on tx */
1903 + if (fd_format != dpaa2_fd_single)
1904 + kfree(skbh);
1905 +
1906 + /* Move on with skb release */
1907 + dev_kfree_skb(skb);
1908 +}
1909 +
1910 +static int dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
1911 +{
1912 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1913 + struct device *dev = net_dev->dev.parent;
1914 + struct dpaa2_fd fd;
1915 + struct rtnl_link_stats64 *percpu_stats;
1916 + struct dpaa2_eth_drv_stats *percpu_extras;
1917 + struct dpaa2_eth_fq *fq;
1918 + u16 queue_mapping = skb_get_queue_mapping(skb);
1919 + int err, i;
1920 +
1921 + /* If we're congested, stop this tx queue; transmission of the
1922 + * current skb happens regardless of congestion state
1923 + */
1924 + fq = &priv->fq[queue_mapping];
1925 +
1926 + dma_sync_single_for_cpu(dev, priv->cscn_dma,
1927 + DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
1928 + if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem))) {
1929 + netif_stop_subqueue(net_dev, queue_mapping);
1930 + fq->stats.congestion_entry++;
1931 + }
1932 +
1933 + percpu_stats = this_cpu_ptr(priv->percpu_stats);
1934 + percpu_extras = this_cpu_ptr(priv->percpu_extras);
1935 +
1936 + if (unlikely(skb_headroom(skb) < DPAA2_ETH_NEEDED_HEADROOM(priv))) {
1937 + struct sk_buff *ns;
1938 +
1939 + ns = skb_realloc_headroom(skb, DPAA2_ETH_NEEDED_HEADROOM(priv));
1940 + if (unlikely(!ns)) {
1941 + percpu_stats->tx_dropped++;
1942 + goto err_alloc_headroom;
1943 + }
1944 + dev_kfree_skb(skb);
1945 + skb = ns;
1946 + }
1947 +
1948 + /* We'll be holding a back-reference to the skb until Tx Confirmation;
1949 + * we don't want that overwritten by a concurrent Tx with a cloned skb.
1950 + */
1951 + skb = skb_unshare(skb, GFP_ATOMIC);
1952 + if (unlikely(!skb)) {
1953 + /* skb_unshare() has already freed the skb */
1954 + percpu_stats->tx_dropped++;
1955 + return NETDEV_TX_OK;
1956 + }
1957 +
1958 + /* Setup the FD fields */
1959 + memset(&fd, 0, sizeof(fd));
1960 +
1961 + if (skb_is_nonlinear(skb)) {
1962 + err = build_sg_fd(priv, skb, &fd);
1963 + percpu_extras->tx_sg_frames++;
1964 + percpu_extras->tx_sg_bytes += skb->len;
1965 + } else {
1966 + err = build_single_fd(priv, skb, &fd);
1967 + }
1968 +
1969 + if (unlikely(err)) {
1970 + percpu_stats->tx_dropped++;
1971 + goto err_build_fd;
1972 + }
1973 +
1974 + /* Tracing point */
1975 + trace_dpaa2_tx_fd(net_dev, &fd);
1976 +
1977 + for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
1978 + err = dpaa2_io_service_enqueue_qd(NULL, priv->tx_qdid, 0,
1979 + fq->tx_qdbin, &fd);
1980 + /* TODO: This doesn't work. Check on simulator.
1981 + * err = dpaa2_io_service_enqueue_fq(NULL,
1982 + * priv->fq[0].fqid_tx, &fd);
1983 + */
1984 + if (err != -EBUSY)
1985 + break;
1986 + }
1987 + percpu_extras->tx_portal_busy += i;
1988 + if (unlikely(err < 0)) {
1989 + percpu_stats->tx_errors++;
1990 + /* Clean up everything, including freeing the skb */
1991 + free_tx_fd(priv, &fd, NULL);
1992 + } else {
1993 + percpu_stats->tx_packets++;
1994 + percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
1995 + }
1996 +
1997 + return NETDEV_TX_OK;
1998 +
1999 +err_build_fd:
2000 +err_alloc_headroom:
2001 + dev_kfree_skb(skb);
2002 +
2003 + return NETDEV_TX_OK;
2004 +}
2005 +
2006 +/* Tx confirmation frame processing routine */
2007 +static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
2008 + struct dpaa2_eth_channel *ch,
2009 + const struct dpaa2_fd *fd,
2010 + struct napi_struct *napi __always_unused,
2011 + u16 queue_id)
2012 +{
2013 + struct device *dev = priv->net_dev->dev.parent;
2014 + struct rtnl_link_stats64 *percpu_stats;
2015 + struct dpaa2_eth_drv_stats *percpu_extras;
2016 + u32 status = 0;
2017 + bool errors = !!(fd->simple.ctrl & DPAA2_FD_TX_ERR_MASK);
2018 + bool check_fas_errors = false;
2019 +
2020 + /* Tracing point */
2021 + trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
2022 +
2023 + percpu_extras = this_cpu_ptr(priv->percpu_extras);
2024 + percpu_extras->tx_conf_frames++;
2025 + percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd);
2026 +
2027 + /* Check congestion state and wake all queues if necessary */
2028 + if (unlikely(__netif_subqueue_stopped(priv->net_dev, queue_id))) {
2029 + dma_sync_single_for_cpu(dev, priv->cscn_dma,
2030 + DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
2031 + if (!dpaa2_cscn_state_congested(priv->cscn_mem))
2032 + netif_tx_wake_all_queues(priv->net_dev);
2033 + }
2034 +
2035 + /* check frame errors in the FD field */
2036 + if (unlikely(errors)) {
2037 + check_fas_errors = !!(fd->simple.ctrl & FD_CTRL_FAERR) &&
2038 + !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV);
2039 + if (net_ratelimit())
2040 + netdev_dbg(priv->net_dev, "Tx frame FD err: %x08\n",
2041 + fd->simple.ctrl & DPAA2_FD_TX_ERR_MASK);
2042 + }
2043 +
2044 + free_tx_fd(priv, fd, check_fas_errors ? &status : NULL);
2045 +
2046 + /* if there are no errors, we're done */
2047 + if (likely(!errors))
2048 + return;
2049 +
2050 + percpu_stats = this_cpu_ptr(priv->percpu_stats);
2051 + /* Tx-conf logically pertains to the egress path. */
2052 + percpu_stats->tx_errors++;
2053 +
2054 + if (net_ratelimit())
2055 + netdev_dbg(priv->net_dev, "Tx frame FAS err: %x08\n",
2056 + status & DPAA2_FAS_TX_ERR_MASK);
2057 +}
2058 +
2059 +static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
2060 +{
2061 + int err;
2062 +
2063 + err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
2064 + DPNI_OFF_RX_L3_CSUM, enable);
2065 + if (err) {
2066 + netdev_err(priv->net_dev,
2067 + "dpni_set_offload() DPNI_OFF_RX_L3_CSUM failed\n");
2068 + return err;
2069 + }
2070 +
2071 + err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
2072 + DPNI_OFF_RX_L4_CSUM, enable);
2073 + if (err) {
2074 + netdev_err(priv->net_dev,
2075 + "dpni_set_offload() DPNI_OFF_RX_L4_CSUM failed\n");
2076 + return err;
2077 + }
2078 +
2079 + return 0;
2080 +}
2081 +
2082 +static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
2083 +{
2084 + int err;
2085 +
2086 + err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
2087 + DPNI_OFF_TX_L3_CSUM, enable);
2088 + if (err) {
2089 + netdev_err(priv->net_dev,
2090 + "dpni_set_offload() DPNI_OFF_RX_L3_CSUM failed\n");
2091 + return err;
2092 + }
2093 +
2094 + err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
2095 + DPNI_OFF_TX_L4_CSUM, enable);
2096 + if (err) {
2097 + netdev_err(priv->net_dev,
2098 + "dpni_set_offload() DPNI_OFF_RX_L4_CSUM failed\n");
2099 + return err;
2100 + }
2101 +
2102 + return 0;
2103 +}
2104 +
2105 +/* Perform a single release command to add buffers
2106 + * to the specified buffer pool
2107 + */
2108 +static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid)
2109 +{
2110 + struct device *dev = priv->net_dev->dev.parent;
2111 + u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
2112 + void *buf;
2113 + dma_addr_t addr;
2114 + int i;
2115 +
2116 + for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
2117 + /* Allocate buffer visible to WRIOP + skb shared info +
2118 + * alignment padding.
2119 + */
2120 + buf = napi_alloc_frag(DPAA2_ETH_BUF_RAW_SIZE(priv));
2121 + if (unlikely(!buf))
2122 + goto err_alloc;
2123 +
2124 + buf = PTR_ALIGN(buf, priv->rx_buf_align);
2125 +
2126 + addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
2127 + DMA_FROM_DEVICE);
2128 + if (unlikely(dma_mapping_error(dev, addr)))
2129 + goto err_map;
2130 +
2131 + buf_array[i] = addr;
2132 +
2133 + /* tracing point */
2134 + trace_dpaa2_eth_buf_seed(priv->net_dev,
2135 + buf, DPAA2_ETH_BUF_RAW_SIZE(priv),
2136 + addr, DPAA2_ETH_RX_BUF_SIZE,
2137 + bpid);
2138 + }
2139 +
2140 +release_bufs:
2141 + /* In case the portal is busy, retry until successful.
2142 + * The buffer release function would only fail if the QBMan portal
2143 + * was busy, which implies portal contention (i.e. more CPUs than
2144 + * portals, i.e. GPPs w/o affine DPIOs). For all practical purposes,
2145 + * there is little we can realistically do, short of giving up -
2146 + * in which case we'd risk depleting the buffer pool and never again
2147 + * receiving the Rx interrupt which would kick-start the refill logic.
2148 + * So just keep retrying, at the risk of being moved to ksoftirqd.
2149 + */
2150 + while (dpaa2_io_service_release(NULL, bpid, buf_array, i))
2151 + cpu_relax();
2152 + return i;
2153 +
2154 +err_map:
2155 + put_page(virt_to_head_page(buf));
2156 +err_alloc:
2157 + if (i)
2158 + goto release_bufs;
2159 +
2160 + return 0;
2161 +}
2162 +
2163 +static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
2164 +{
2165 + int i, j;
2166 + int new_count;
2167 +
2168 + /* This is the lazy seeding of Rx buffer pools.
2169 + * dpaa2_add_bufs() is also used on the Rx hotpath and calls
2170 + * napi_alloc_frag(). The trouble with that is that it in turn ends up
2171 + * calling this_cpu_ptr(), which mandates execution in atomic context.
2172 + * Rather than splitting up the code, do a one-off preempt disable.
2173 + */
2174 + preempt_disable();
2175 + for (j = 0; j < priv->num_channels; j++) {
2176 + priv->channel[j]->buf_count = 0;
2177 + for (i = 0; i < priv->num_bufs;
2178 + i += DPAA2_ETH_BUFS_PER_CMD) {
2179 + new_count = add_bufs(priv, bpid);
2180 + priv->channel[j]->buf_count += new_count;
2181 +
2182 + if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
2183 + preempt_enable();
2184 + return -ENOMEM;
2185 + }
2186 + }
2187 + }
2188 + preempt_enable();
2189 +
2190 + return 0;
2191 +}
2192 +
2193 +/**
2194 + * Drain the specified number of buffers from the DPNI's private buffer pool.
2195 + * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
2196 + */
2197 +static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
2198 +{
2199 + struct device *dev = priv->net_dev->dev.parent;
2200 + u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
2201 + void *vaddr;
2202 + int ret, i;
2203 +
2204 + do {
2205 + ret = dpaa2_io_service_acquire(NULL, priv->bpid,
2206 + buf_array, count);
2207 + if (ret < 0) {
2208 + netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
2209 + return;
2210 + }
2211 + for (i = 0; i < ret; i++) {
2212 + /* Same logic as on regular Rx path */
2213 + vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain,
2214 + buf_array[i]);
2215 + dma_unmap_single(dev, buf_array[i],
2216 + DPAA2_ETH_RX_BUF_SIZE,
2217 + DMA_FROM_DEVICE);
2218 + put_page(virt_to_head_page(vaddr));
2219 + }
2220 + } while (ret);
2221 +}
2222 +
2223 +static void drain_pool(struct dpaa2_eth_priv *priv)
2224 +{
2225 + preempt_disable();
2226 + drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
2227 + drain_bufs(priv, 1);
2228 + preempt_enable();
2229 +}
2230 +
2231 +/* Function is called from softirq context only, so we don't need to guard
2232 + * the access to percpu count
2233 + */
2234 +static int refill_pool(struct dpaa2_eth_priv *priv,
2235 + struct dpaa2_eth_channel *ch,
2236 + u16 bpid)
2237 +{
2238 + int new_count;
2239 +
2240 + if (likely(ch->buf_count >= priv->refill_thresh))
2241 + return 0;
2242 +
2243 + do {
2244 + new_count = add_bufs(priv, bpid);
2245 + if (unlikely(!new_count)) {
2246 + /* Out of memory; abort for now, we'll try later on */
2247 + break;
2248 + }
2249 + ch->buf_count += new_count;
2250 + } while (ch->buf_count < priv->num_bufs);
2251 +
2252 + if (unlikely(ch->buf_count < priv->num_bufs))
2253 + return -ENOMEM;
2254 +
2255 + return 0;
2256 +}
2257 +
2258 +static int pull_channel(struct dpaa2_eth_channel *ch)
2259 +{
2260 + int err;
2261 + int dequeues = -1;
2262 +
2263 + /* Retry while portal is busy */
2264 + do {
2265 + err = dpaa2_io_service_pull_channel(NULL, ch->ch_id, ch->store);
2266 + dequeues++;
2267 + cpu_relax();
2268 + } while (err == -EBUSY);
2269 +
2270 + ch->stats.dequeue_portal_busy += dequeues;
2271 + if (unlikely(err))
2272 + ch->stats.pull_err++;
2273 +
2274 + return err;
2275 +}
2276 +
2277 +/* NAPI poll routine
2278 + *
2279 + * Frames are dequeued from the QMan channel associated with this NAPI context.
2280 + * Rx and (if configured) Rx error frames count towards the NAPI budget. Tx
2281 + * confirmation frames are limited by a threshold per NAPI poll cycle.
2282 + */
2283 +static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
2284 +{
2285 + struct dpaa2_eth_channel *ch;
2286 + int rx_cleaned = 0, tx_conf_cleaned = 0;
2287 + bool store_cleaned;
2288 + struct dpaa2_eth_priv *priv;
2289 + int err;
2290 +
2291 + ch = container_of(napi, struct dpaa2_eth_channel, napi);
2292 + priv = ch->priv;
2293 +
2294 + do {
2295 + err = pull_channel(ch);
2296 + if (unlikely(err))
2297 + break;
2298 +
2299 + /* Refill pool if appropriate */
2300 + refill_pool(priv, ch, priv->bpid);
2301 +
2302 + store_cleaned = consume_frames(ch, &rx_cleaned,
2303 + &tx_conf_cleaned);
2304 +
2305 + /* If we've either consumed the budget with Rx frames,
2306 + * or reached the Tx conf threshold, we're done.
2307 + */
2308 + if (rx_cleaned >= budget ||
2309 + tx_conf_cleaned >= TX_CONF_PER_NAPI_POLL)
2310 + return budget;
2311 + } while (store_cleaned);
2312 +
2313 + /* We didn't consume the entire budget, finish napi and
2314 + * re-enable data availability notifications.
2315 + */
2316 + napi_complete(napi);
2317 + do {
2318 + err = dpaa2_io_service_rearm(NULL, &ch->nctx);
2319 + cpu_relax();
2320 + } while (err == -EBUSY);
2321 +
2322 + return max(rx_cleaned, 1);
2323 +}
2324 +
2325 +static void enable_ch_napi(struct dpaa2_eth_priv *priv)
2326 +{
2327 + struct dpaa2_eth_channel *ch;
2328 + int i;
2329 +
2330 + for (i = 0; i < priv->num_channels; i++) {
2331 + ch = priv->channel[i];
2332 + napi_enable(&ch->napi);
2333 + }
2334 +}
2335 +
2336 +static void disable_ch_napi(struct dpaa2_eth_priv *priv)
2337 +{
2338 + struct dpaa2_eth_channel *ch;
2339 + int i;
2340 +
2341 + for (i = 0; i < priv->num_channels; i++) {
2342 + ch = priv->channel[i];
2343 + napi_disable(&ch->napi);
2344 + }
2345 +}
2346 +
2347 +static int link_state_update(struct dpaa2_eth_priv *priv)
2348 +{
2349 + struct dpni_link_state state;
2350 + int err;
2351 +
2352 + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
2353 + if (unlikely(err)) {
2354 + netdev_err(priv->net_dev,
2355 + "dpni_get_link_state() failed\n");
2356 + return err;
2357 + }
2358 +
2359 + /* Chech link state; speed / duplex changes are not treated yet */
2360 + if (priv->link_state.up == state.up)
2361 + return 0;
2362 +
2363 + priv->link_state = state;
2364 + if (state.up) {
2365 + netif_carrier_on(priv->net_dev);
2366 + netif_tx_start_all_queues(priv->net_dev);
2367 + } else {
2368 + netif_tx_stop_all_queues(priv->net_dev);
2369 + netif_carrier_off(priv->net_dev);
2370 + }
2371 +
2372 + netdev_info(priv->net_dev, "Link Event: state %s",
2373 + state.up ? "up" : "down");
2374 +
2375 + return 0;
2376 +}
2377 +
2378 +static int dpaa2_eth_open(struct net_device *net_dev)
2379 +{
2380 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2381 + int err;
2382 +
2383 + /* We'll only start the txqs when the link is actually ready; make sure
2384 + * we don't race against the link up notification, which may come
2385 + * immediately after dpni_enable();
2386 + */
2387 + netif_tx_stop_all_queues(net_dev);
2388 +
2389 + /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will
2390 + * return true and cause 'ip link show' to report the LOWER_UP flag,
2391 + * even though the link notification wasn't even received.
2392 + */
2393 + netif_carrier_off(net_dev);
2394 +
2395 + err = seed_pool(priv, priv->bpid);
2396 + if (err) {
2397 + /* Not much to do; the buffer pool, though not filled up,
2398 + * may still contain some buffers which would enable us
2399 + * to limp on.
2400 + */
2401 + netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
2402 + priv->dpbp_dev->obj_desc.id, priv->bpid);
2403 + }
2404 +
2405 + if (priv->tx_pause_frames)
2406 + priv->refill_thresh = priv->num_bufs - DPAA2_ETH_BUFS_PER_CMD;
2407 + else
2408 + priv->refill_thresh = DPAA2_ETH_REFILL_THRESH_TD;
2409 +
2410 + err = dpni_enable(priv->mc_io, 0, priv->mc_token);
2411 + if (err < 0) {
2412 + netdev_err(net_dev, "dpni_enable() failed\n");
2413 + goto enable_err;
2414 + }
2415 +
2416 + /* If the DPMAC object has already processed the link up interrupt,
2417 + * we have to learn the link state ourselves.
2418 + */
2419 + err = link_state_update(priv);
2420 + if (err < 0) {
2421 + netdev_err(net_dev, "Can't update link state\n");
2422 + goto link_state_err;
2423 + }
2424 +
2425 + return 0;
2426 +
2427 +link_state_err:
2428 +enable_err:
2429 + priv->refill_thresh = 0;
2430 + drain_pool(priv);
2431 + return err;
2432 +}
2433 +
2434 +static int dpaa2_eth_stop(struct net_device *net_dev)
2435 +{
2436 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2437 + int dpni_enabled;
2438 + int retries = 10, i;
2439 +
2440 + netif_tx_stop_all_queues(net_dev);
2441 + netif_carrier_off(net_dev);
2442 +
2443 + /* Loop while dpni_disable() attempts to drain the egress FQs
2444 + * and confirm them back to us.
2445 + */
2446 + do {
2447 + dpni_disable(priv->mc_io, 0, priv->mc_token);
2448 + dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
2449 + if (dpni_enabled)
2450 + /* Allow the MC some slack */
2451 + msleep(100);
2452 + } while (dpni_enabled && --retries);
2453 + if (!retries) {
2454 + netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
2455 + /* Must go on and disable NAPI nonetheless, so we don't crash at
2456 + * the next "ifconfig up"
2457 + */
2458 + }
2459 +
2460 + priv->refill_thresh = 0;
2461 +
2462 + /* Wait for all running napi poll routines to finish, so that no
2463 + * new refill operations are started.
2464 + */
2465 + for (i = 0; i < priv->num_channels; i++)
2466 + napi_synchronize(&priv->channel[i]->napi);
2467 +
2468 + /* Empty the buffer pool */
2469 + drain_pool(priv);
2470 +
2471 + return 0;
2472 +}
2473 +
2474 +static int dpaa2_eth_init(struct net_device *net_dev)
2475 +{
2476 + u64 supported = 0;
2477 + u64 not_supported = 0;
2478 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2479 + u32 options = priv->dpni_attrs.options;
2480 +
2481 + /* Capabilities listing */
2482 + supported |= IFF_LIVE_ADDR_CHANGE;
2483 +
2484 + if (options & DPNI_OPT_NO_MAC_FILTER)
2485 + not_supported |= IFF_UNICAST_FLT;
2486 + else
2487 + supported |= IFF_UNICAST_FLT;
2488 +
2489 + net_dev->priv_flags |= supported;
2490 + net_dev->priv_flags &= ~not_supported;
2491 +
2492 + /* Features */
2493 + net_dev->features = NETIF_F_RXCSUM |
2494 + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2495 + NETIF_F_SG | NETIF_F_HIGHDMA |
2496 + NETIF_F_LLTX;
2497 + net_dev->hw_features = net_dev->features;
2498 +
2499 + return 0;
2500 +}
2501 +
2502 +static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
2503 +{
2504 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2505 + struct device *dev = net_dev->dev.parent;
2506 + int err;
2507 +
2508 + err = eth_mac_addr(net_dev, addr);
2509 + if (err < 0) {
2510 + dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
2511 + return err;
2512 + }
2513 +
2514 + err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
2515 + net_dev->dev_addr);
2516 + if (err) {
2517 + dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
2518 + return err;
2519 + }
2520 +
2521 + return 0;
2522 +}
2523 +
2524 +/** Fill in counters maintained by the GPP driver. These may be different from
2525 + * the hardware counters obtained by ethtool.
2526 + */
2527 +static void dpaa2_eth_get_stats(struct net_device *net_dev,
2528 + struct rtnl_link_stats64 *stats)
2529 +{
2530 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2531 + struct rtnl_link_stats64 *percpu_stats;
2532 + u64 *cpustats;
2533 + u64 *netstats = (u64 *)stats;
2534 + int i, j;
2535 + int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
2536 +
2537 + for_each_possible_cpu(i) {
2538 + percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
2539 + cpustats = (u64 *)percpu_stats;
2540 + for (j = 0; j < num; j++)
2541 + netstats[j] += cpustats[j];
2542 + }
2543 +}
2544 +
2545 +static int dpaa2_eth_change_mtu(struct net_device *net_dev, int mtu)
2546 +{
2547 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2548 + int err;
2549 +
2550 + /* Set the maximum Rx frame length to match the transmit side;
2551 + * account for L2 headers when computing the MFL
2552 + */
2553 + err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
2554 + (u16)DPAA2_ETH_L2_MAX_FRM(mtu));
2555 + if (err) {
2556 + netdev_err(net_dev, "dpni_set_max_frame_length() failed\n");
2557 + return err;
2558 + }
2559 +
2560 + net_dev->mtu = mtu;
2561 + return 0;
2562 +}
2563 +
2564 +/* Copy mac unicast addresses from @net_dev to @priv.
2565 + * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
2566 + */
2567 +static void add_uc_hw_addr(const struct net_device *net_dev,
2568 + struct dpaa2_eth_priv *priv)
2569 +{
2570 + struct netdev_hw_addr *ha;
2571 + int err;
2572 +
2573 + netdev_for_each_uc_addr(ha, net_dev) {
2574 + err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
2575 + ha->addr);
2576 + if (err)
2577 + netdev_warn(priv->net_dev,
2578 + "Could not add ucast MAC %pM to the filtering table (err %d)\n",
2579 + ha->addr, err);
2580 + }
2581 +}
2582 +
2583 +/* Copy mac multicast addresses from @net_dev to @priv
2584 + * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
2585 + */
2586 +static void add_mc_hw_addr(const struct net_device *net_dev,
2587 + struct dpaa2_eth_priv *priv)
2588 +{
2589 + struct netdev_hw_addr *ha;
2590 + int err;
2591 +
2592 + netdev_for_each_mc_addr(ha, net_dev) {
2593 + err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
2594 + ha->addr);
2595 + if (err)
2596 + netdev_warn(priv->net_dev,
2597 + "Could not add mcast MAC %pM to the filtering table (err %d)\n",
2598 + ha->addr, err);
2599 + }
2600 +}
2601 +
2602 +static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
2603 +{
2604 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2605 + int uc_count = netdev_uc_count(net_dev);
2606 + int mc_count = netdev_mc_count(net_dev);
2607 + u8 max_mac = priv->dpni_attrs.mac_filter_entries;
2608 + u32 options = priv->dpni_attrs.options;
2609 + u16 mc_token = priv->mc_token;
2610 + struct fsl_mc_io *mc_io = priv->mc_io;
2611 + int err;
2612 +
2613 + /* Basic sanity checks; these probably indicate a misconfiguration */
2614 + if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
2615 + netdev_info(net_dev,
2616 + "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
2617 + max_mac);
2618 +
2619 + /* Force promiscuous if the uc or mc counts exceed our capabilities. */
2620 + if (uc_count > max_mac) {
2621 + netdev_info(net_dev,
2622 + "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
2623 + uc_count, max_mac);
2624 + goto force_promisc;
2625 + }
2626 + if (mc_count + uc_count > max_mac) {
2627 + netdev_info(net_dev,
2628 + "Unicast + Multicast addr count reached %d, max allowed is %d; forcing promisc\n",
2629 + uc_count + mc_count, max_mac);
2630 + goto force_mc_promisc;
2631 + }
2632 +
2633 + /* Adjust promisc settings due to flag combinations */
2634 + if (net_dev->flags & IFF_PROMISC)
2635 + goto force_promisc;
2636 + if (net_dev->flags & IFF_ALLMULTI) {
2637 + /* First, rebuild unicast filtering table. This should be done
2638 + * in promisc mode, in order to avoid frame loss while we
2639 + * progressively add entries to the table.
2640 + * We don't know whether we had been in promisc already, and
2641 + * making an MC call to find out is expensive; so set uc promisc
2642 + * nonetheless.
2643 + */
2644 + err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2645 + if (err)
2646 + netdev_warn(net_dev, "Can't set uc promisc\n");
2647 +
2648 + /* Actual uc table reconstruction. */
2649 + err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
2650 + if (err)
2651 + netdev_warn(net_dev, "Can't clear uc filters\n");
2652 + add_uc_hw_addr(net_dev, priv);
2653 +
2654 + /* Finally, clear uc promisc and set mc promisc as requested. */
2655 + err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
2656 + if (err)
2657 + netdev_warn(net_dev, "Can't clear uc promisc\n");
2658 + goto force_mc_promisc;
2659 + }
2660 +
2661 + /* Neither unicast, nor multicast promisc will be on... eventually.
2662 + * For now, rebuild mac filtering tables while forcing both of them on.
2663 + */
2664 + err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2665 + if (err)
2666 + netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
2667 + err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
2668 + if (err)
2669 + netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
2670 +
2671 + /* Actual mac filtering tables reconstruction */
2672 + err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
2673 + if (err)
2674 + netdev_warn(net_dev, "Can't clear mac filters\n");
2675 + add_mc_hw_addr(net_dev, priv);
2676 + add_uc_hw_addr(net_dev, priv);
2677 +
2678 + /* Now we can clear both ucast and mcast promisc, without risking
2679 + * to drop legitimate frames anymore.
2680 + */
2681 + err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
2682 + if (err)
2683 + netdev_warn(net_dev, "Can't clear ucast promisc\n");
2684 + err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
2685 + if (err)
2686 + netdev_warn(net_dev, "Can't clear mcast promisc\n");
2687 +
2688 + return;
2689 +
2690 +force_promisc:
2691 + err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2692 + if (err)
2693 + netdev_warn(net_dev, "Can't set ucast promisc\n");
2694 +force_mc_promisc:
2695 + err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
2696 + if (err)
2697 + netdev_warn(net_dev, "Can't set mcast promisc\n");
2698 +}
2699 +
2700 +static int dpaa2_eth_set_features(struct net_device *net_dev,
2701 + netdev_features_t features)
2702 +{
2703 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2704 + netdev_features_t changed = features ^ net_dev->features;
2705 + bool enable;
2706 + int err;
2707 +
2708 + if (changed & NETIF_F_RXCSUM) {
2709 + enable = !!(features & NETIF_F_RXCSUM);
2710 + err = set_rx_csum(priv, enable);
2711 + if (err)
2712 + return err;
2713 + }
2714 +
2715 + if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
2716 + enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
2717 + err = set_tx_csum(priv, enable);
2718 + if (err)
2719 + return err;
2720 + }
2721 +
2722 + return 0;
2723 +}
2724 +
2725 +static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2726 +{
2727 + struct dpaa2_eth_priv *priv = netdev_priv(dev);
2728 + struct hwtstamp_config config;
2729 +
2730 + if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
2731 + return -EFAULT;
2732 +
2733 + switch (config.tx_type) {
2734 + case HWTSTAMP_TX_OFF:
2735 + priv->ts_tx_en = false;
2736 + break;
2737 + case HWTSTAMP_TX_ON:
2738 + priv->ts_tx_en = true;
2739 + break;
2740 + default:
2741 + return -ERANGE;
2742 + }
2743 +
2744 + if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
2745 + priv->ts_rx_en = false;
2746 + } else {
2747 + priv->ts_rx_en = true;
2748 + /* TS is set for all frame types, not only those requested */
2749 + config.rx_filter = HWTSTAMP_FILTER_ALL;
2750 + }
2751 +
2752 + return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
2753 + -EFAULT : 0;
2754 +}
2755 +
2756 +static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2757 +{
2758 + if (cmd == SIOCSHWTSTAMP)
2759 + return dpaa2_eth_ts_ioctl(dev, rq, cmd);
2760 +
2761 + return -EINVAL;
2762 +}
2763 +
2764 +static const struct net_device_ops dpaa2_eth_ops = {
2765 + .ndo_open = dpaa2_eth_open,
2766 + .ndo_start_xmit = dpaa2_eth_tx,
2767 + .ndo_stop = dpaa2_eth_stop,
2768 + .ndo_init = dpaa2_eth_init,
2769 + .ndo_set_mac_address = dpaa2_eth_set_addr,
2770 + .ndo_get_stats64 = dpaa2_eth_get_stats,
2771 + .ndo_change_mtu = dpaa2_eth_change_mtu,
2772 + .ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
2773 + .ndo_set_features = dpaa2_eth_set_features,
2774 + .ndo_do_ioctl = dpaa2_eth_ioctl,
2775 +};
2776 +
2777 +static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
2778 +{
2779 + struct dpaa2_eth_channel *ch;
2780 +
2781 + ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
2782 +
2783 + /* Update NAPI statistics */
2784 + ch->stats.cdan++;
2785 +
2786 + napi_schedule_irqoff(&ch->napi);
2787 +}
2788 +
2789 +/* Allocate and configure a DPCON object */
2790 +static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
2791 +{
2792 + struct fsl_mc_device *dpcon;
2793 + struct device *dev = priv->net_dev->dev.parent;
2794 + struct dpcon_attr attrs;
2795 + int err;
2796 +
2797 + err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
2798 + FSL_MC_POOL_DPCON, &dpcon);
2799 + if (err) {
2800 + dev_info(dev, "Not enough DPCONs, will go on as-is\n");
2801 + return NULL;
2802 + }
2803 +
2804 + err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
2805 + if (err) {
2806 + dev_err(dev, "dpcon_open() failed\n");
2807 + goto err_open;
2808 + }
2809 +
2810 + err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
2811 + if (err) {
2812 + dev_err(dev, "dpcon_reset() failed\n");
2813 + goto err_reset;
2814 + }
2815 +
2816 + err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs);
2817 + if (err) {
2818 + dev_err(dev, "dpcon_get_attributes() failed\n");
2819 + goto err_get_attr;
2820 + }
2821 +
2822 + err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
2823 + if (err) {
2824 + dev_err(dev, "dpcon_enable() failed\n");
2825 + goto err_enable;
2826 + }
2827 +
2828 + return dpcon;
2829 +
2830 +err_enable:
2831 +err_get_attr:
2832 +err_reset:
2833 + dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
2834 +err_open:
2835 + fsl_mc_object_free(dpcon);
2836 +
2837 + return NULL;
2838 +}
2839 +
2840 +static void free_dpcon(struct dpaa2_eth_priv *priv,
2841 + struct fsl_mc_device *dpcon)
2842 +{
2843 + dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
2844 + dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
2845 + fsl_mc_object_free(dpcon);
2846 +}
2847 +
2848 +static struct dpaa2_eth_channel *
2849 +alloc_channel(struct dpaa2_eth_priv *priv)
2850 +{
2851 + struct dpaa2_eth_channel *channel;
2852 + struct dpcon_attr attr;
2853 + struct device *dev = priv->net_dev->dev.parent;
2854 + int err;
2855 +
2856 + channel = kzalloc(sizeof(*channel), GFP_KERNEL);
2857 + if (!channel)
2858 + return NULL;
2859 +
2860 + channel->dpcon = setup_dpcon(priv);
2861 + if (!channel->dpcon)
2862 + goto err_setup;
2863 +
2864 + err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
2865 + &attr);
2866 + if (err) {
2867 + dev_err(dev, "dpcon_get_attributes() failed\n");
2868 + goto err_get_attr;
2869 + }
2870 +
2871 + channel->dpcon_id = attr.id;
2872 + channel->ch_id = attr.qbman_ch_id;
2873 + channel->priv = priv;
2874 +
2875 + return channel;
2876 +
2877 +err_get_attr:
2878 + free_dpcon(priv, channel->dpcon);
2879 +err_setup:
2880 + kfree(channel);
2881 + return NULL;
2882 +}
2883 +
2884 +static void free_channel(struct dpaa2_eth_priv *priv,
2885 + struct dpaa2_eth_channel *channel)
2886 +{
2887 + free_dpcon(priv, channel->dpcon);
2888 + kfree(channel);
2889 +}
2890 +
2891 +/* DPIO setup: allocate and configure QBMan channels, setup core affinity
2892 + * and register data availability notifications
2893 + */
2894 +static int setup_dpio(struct dpaa2_eth_priv *priv)
2895 +{
2896 + struct dpaa2_io_notification_ctx *nctx;
2897 + struct dpaa2_eth_channel *channel;
2898 + struct dpcon_notification_cfg dpcon_notif_cfg;
2899 + struct device *dev = priv->net_dev->dev.parent;
2900 + int i, err;
2901 +
2902 + /* We want the ability to spread ingress traffic (RX, TX conf) to as
2903 + * many cores as possible, so we need one channel for each core
2904 + * (unless there's fewer queues than cores, in which case the extra
2905 + * channels would be wasted).
2906 + * Allocate one channel per core and register it to the core's
2907 + * affine DPIO. If not enough channels are available for all cores
2908 + * or if some cores don't have an affine DPIO, there will be no
2909 + * ingress frame processing on those cores.
2910 + */
2911 + cpumask_clear(&priv->dpio_cpumask);
2912 + for_each_online_cpu(i) {
2913 + /* Try to allocate a channel */
2914 + channel = alloc_channel(priv);
2915 + if (!channel) {
2916 + dev_info(dev,
2917 + "No affine channel for cpu %d and above\n", i);
2918 + goto err_alloc_ch;
2919 + }
2920 +
2921 + priv->channel[priv->num_channels] = channel;
2922 +
2923 + nctx = &channel->nctx;
2924 + nctx->is_cdan = 1;
2925 + nctx->cb = cdan_cb;
2926 + nctx->id = channel->ch_id;
2927 + nctx->desired_cpu = i;
2928 +
2929 + /* Register the new context */
2930 + err = dpaa2_io_service_register(NULL, nctx);
2931 + if (err) {
2932 + dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
2933 + /* If no affine DPIO for this core, there's probably
2934 + * none available for next cores either.
2935 + */
2936 + goto err_service_reg;
2937 + }
2938 +
2939 + /* Register DPCON notification with MC */
2940 + dpcon_notif_cfg.dpio_id = nctx->dpio_id;
2941 + dpcon_notif_cfg.priority = 0;
2942 + dpcon_notif_cfg.user_ctx = nctx->qman64;
2943 + err = dpcon_set_notification(priv->mc_io, 0,
2944 + channel->dpcon->mc_handle,
2945 + &dpcon_notif_cfg);
2946 + if (err) {
2947 + dev_err(dev, "dpcon_set_notification failed()\n");
2948 + goto err_set_cdan;
2949 + }
2950 +
2951 + /* If we managed to allocate a channel and also found an affine
2952 + * DPIO for this core, add it to the final mask
2953 + */
2954 + cpumask_set_cpu(i, &priv->dpio_cpumask);
2955 + priv->num_channels++;
2956 +
2957 + /* Stop if we already have enough channels to accommodate all
2958 + * RX and TX conf queues
2959 + */
2960 + if (priv->num_channels == dpaa2_eth_queue_count(priv))
2961 + break;
2962 + }
2963 +
2964 + /* Tx confirmation queues can only be serviced by cpus
2965 + * with an affine DPIO/channel
2966 + */
2967 + cpumask_copy(&priv->txconf_cpumask, &priv->dpio_cpumask);
2968 +
2969 + return 0;
2970 +
2971 +err_set_cdan:
2972 + dpaa2_io_service_deregister(NULL, nctx);
2973 +err_service_reg:
2974 + free_channel(priv, channel);
2975 +err_alloc_ch:
2976 + if (cpumask_empty(&priv->dpio_cpumask)) {
2977 + dev_dbg(dev, "No cpu with an affine DPIO/DPCON\n");
2978 + return -ENODEV;
2979 + }
2980 + cpumask_copy(&priv->txconf_cpumask, &priv->dpio_cpumask);
2981 +
2982 + dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
2983 + cpumask_pr_args(&priv->dpio_cpumask));
2984 +
2985 + return 0;
2986 +}
2987 +
2988 +static void free_dpio(struct dpaa2_eth_priv *priv)
2989 +{
2990 + int i;
2991 + struct dpaa2_eth_channel *ch;
2992 +
2993 + /* deregister CDAN notifications and free channels */
2994 + for (i = 0; i < priv->num_channels; i++) {
2995 + ch = priv->channel[i];
2996 + dpaa2_io_service_deregister(NULL, &ch->nctx);
2997 + free_channel(priv, ch);
2998 + }
2999 +}
3000 +
3001 +static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv,
3002 + int cpu)
3003 +{
3004 + struct device *dev = priv->net_dev->dev.parent;
3005 + int i;
3006 +
3007 + for (i = 0; i < priv->num_channels; i++)
3008 + if (priv->channel[i]->nctx.desired_cpu == cpu)
3009 + return priv->channel[i];
3010 +
3011 + /* We should never get here. Issue a warning and return
3012 + * the first channel, because it's still better than nothing
3013 + */
3014 + dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
3015 +
3016 + return priv->channel[0];
3017 +}
3018 +
3019 +static void set_fq_affinity(struct dpaa2_eth_priv *priv)
3020 +{
3021 + struct device *dev = priv->net_dev->dev.parent;
3022 + struct cpumask xps_mask = CPU_MASK_NONE;
3023 + struct dpaa2_eth_fq *fq;
3024 + int rx_cpu, txc_cpu;
3025 + int i, err;
3026 +
3027 + /* For each FQ, pick one channel/CPU to deliver frames to.
3028 + * This may well change at runtime, either through irqbalance or
3029 + * through direct user intervention.
3030 + */
3031 + rx_cpu = cpumask_first(&priv->dpio_cpumask);
3032 + txc_cpu = cpumask_first(&priv->txconf_cpumask);
3033 +
3034 + for (i = 0; i < priv->num_fqs; i++) {
3035 + fq = &priv->fq[i];
3036 + switch (fq->type) {
3037 + case DPAA2_RX_FQ:
3038 + case DPAA2_RX_ERR_FQ:
3039 + fq->target_cpu = rx_cpu;
3040 + rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
3041 + if (rx_cpu >= nr_cpu_ids)
3042 + rx_cpu = cpumask_first(&priv->dpio_cpumask);
3043 + break;
3044 + case DPAA2_TX_CONF_FQ:
3045 + fq->target_cpu = txc_cpu;
3046 +
3047 + /* register txc_cpu to XPS */
3048 + cpumask_set_cpu(txc_cpu, &xps_mask);
3049 + err = netif_set_xps_queue(priv->net_dev, &xps_mask,
3050 + fq->flowid);
3051 + if (err)
3052 + dev_info_once(dev,
3053 + "Tx: error setting XPS queue\n");
3054 + cpumask_clear_cpu(txc_cpu, &xps_mask);
3055 +
3056 + txc_cpu = cpumask_next(txc_cpu, &priv->txconf_cpumask);
3057 + if (txc_cpu >= nr_cpu_ids)
3058 + txc_cpu = cpumask_first(&priv->txconf_cpumask);
3059 + break;
3060 + default:
3061 + dev_err(dev, "Unknown FQ type: %d\n", fq->type);
3062 + }
3063 + fq->channel = get_affine_channel(priv, fq->target_cpu);
3064 + }
3065 +}
3066 +
3067 +static void setup_fqs(struct dpaa2_eth_priv *priv)
3068 +{
3069 + int i;
3070 +
3071 + /* We have one TxConf FQ per Tx flow. Tx queues MUST be at the
3072 + * beginning of the queue array.
3073 + * Number of Rx and Tx queues are the same.
3074 + * We only support one traffic class for now.
3075 + */
3076 + for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
3077 + priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
3078 + priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
3079 + priv->fq[priv->num_fqs++].flowid = (u16)i;
3080 + }
3081 +
3082 + for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
3083 + priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
3084 + priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
3085 + priv->fq[priv->num_fqs++].flowid = (u16)i;
3086 + }
3087 +
3088 +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
3089 + /* We have exactly one Rx error queue per DPNI */
3090 + priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ;
3091 + priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err;
3092 +#endif
3093 +
3094 + /* For each FQ, decide on which core to process incoming frames */
3095 + set_fq_affinity(priv);
3096 +}
3097 +
3098 +/* Allocate and configure one buffer pool for each interface */
3099 +static int setup_dpbp(struct dpaa2_eth_priv *priv)
3100 +{
3101 + int err;
3102 + struct fsl_mc_device *dpbp_dev;
3103 + struct dpbp_attr dpbp_attrs;
3104 + struct device *dev = priv->net_dev->dev.parent;
3105 +
3106 + err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
3107 + &dpbp_dev);
3108 + if (err) {
3109 + dev_err(dev, "DPBP device allocation failed\n");
3110 + return err;
3111 + }
3112 +
3113 + priv->dpbp_dev = dpbp_dev;
3114 +
3115 + err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
3116 + &dpbp_dev->mc_handle);
3117 + if (err) {
3118 + dev_err(dev, "dpbp_open() failed\n");
3119 + goto err_open;
3120 + }
3121 +
3122 + err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle);
3123 + if (err) {
3124 + dev_err(dev, "dpbp_reset() failed\n");
3125 + goto err_reset;
3126 + }
3127 +
3128 + err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
3129 + if (err) {
3130 + dev_err(dev, "dpbp_enable() failed\n");
3131 + goto err_enable;
3132 + }
3133 +
3134 + err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
3135 + &dpbp_attrs);
3136 + if (err) {
3137 + dev_err(dev, "dpbp_get_attributes() failed\n");
3138 + goto err_get_attr;
3139 + }
3140 +
3141 + priv->bpid = dpbp_attrs.bpid;
3142 + priv->num_bufs = DPAA2_ETH_NUM_BUFS_FC / priv->num_channels;
3143 +
3144 + return 0;
3145 +
3146 +err_get_attr:
3147 + dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
3148 +err_enable:
3149 +err_reset:
3150 + dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
3151 +err_open:
3152 + fsl_mc_object_free(dpbp_dev);
3153 +
3154 + return err;
3155 +}
3156 +
3157 +static void free_dpbp(struct dpaa2_eth_priv *priv)
3158 +{
3159 + drain_pool(priv);
3160 + dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
3161 + dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
3162 + fsl_mc_object_free(priv->dpbp_dev);
3163 +}
3164 +
3165 +static int setup_tx_congestion(struct dpaa2_eth_priv *priv)
3166 +{
3167 + struct dpni_congestion_notification_cfg cong_notif_cfg = { 0 };
3168 + struct device *dev = priv->net_dev->dev.parent;
3169 + int err;
3170 +
3171 + priv->cscn_unaligned = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
3172 + GFP_KERNEL);
3173 + if (!priv->cscn_unaligned)
3174 + return -ENOMEM;
3175 +
3176 + priv->cscn_mem = PTR_ALIGN(priv->cscn_unaligned, DPAA2_CSCN_ALIGN);
3177 + priv->cscn_dma = dma_map_single(dev, priv->cscn_mem, DPAA2_CSCN_SIZE,
3178 + DMA_FROM_DEVICE);
3179 + if (dma_mapping_error(dev, priv->cscn_dma)) {
3180 + dev_err(dev, "Error mapping CSCN memory area\n");
3181 + err = -ENOMEM;
3182 + goto err_dma_map;
3183 + }
3184 +
3185 + cong_notif_cfg.units = DPNI_CONGESTION_UNIT_BYTES;
3186 + cong_notif_cfg.threshold_entry = DPAA2_ETH_TX_CONG_ENTRY_THRESH;
3187 + cong_notif_cfg.threshold_exit = DPAA2_ETH_TX_CONG_EXIT_THRESH;
3188 + cong_notif_cfg.message_ctx = (u64)priv;
3189 + cong_notif_cfg.message_iova = priv->cscn_dma;
3190 + cong_notif_cfg.notification_mode = DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
3191 + DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
3192 + DPNI_CONG_OPT_COHERENT_WRITE;
3193 + err = dpni_set_congestion_notification(priv->mc_io, 0, priv->mc_token,
3194 + DPNI_QUEUE_TX, 0,
3195 + &cong_notif_cfg);
3196 + if (err) {
3197 + dev_err(dev, "dpni_set_congestion_notification failed\n");
3198 + goto err_set_cong;
3199 + }
3200 +
3201 + return 0;
3202 +
3203 +err_set_cong:
3204 + dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
3205 +err_dma_map:
3206 + kfree(priv->cscn_unaligned);
3207 +
3208 + return err;
3209 +}
3210 +
3211 +/* Configure the DPNI object this interface is associated with */
3212 +static int setup_dpni(struct fsl_mc_device *ls_dev)
3213 +{
3214 + struct device *dev = &ls_dev->dev;
3215 + struct dpaa2_eth_priv *priv;
3216 + struct net_device *net_dev;
3217 + struct dpni_buffer_layout buf_layout;
3218 + struct dpni_link_cfg cfg = {0};
3219 + int err;
3220 +
3221 + net_dev = dev_get_drvdata(dev);
3222 + priv = netdev_priv(net_dev);
3223 +
3224 + priv->dpni_id = ls_dev->obj_desc.id;
3225 +
3226 + /* get a handle for the DPNI object */
3227 + err = dpni_open(priv->mc_io, 0, priv->dpni_id, &priv->mc_token);
3228 + if (err) {
3229 + dev_err(dev, "dpni_open() failed\n");
3230 + goto err_open;
3231 + }
3232 +
3233 + ls_dev->mc_io = priv->mc_io;
3234 + ls_dev->mc_handle = priv->mc_token;
3235 +
3236 + err = dpni_reset(priv->mc_io, 0, priv->mc_token);
3237 + if (err) {
3238 + dev_err(dev, "dpni_reset() failed\n");
3239 + goto err_reset;
3240 + }
3241 +
3242 + err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
3243 + &priv->dpni_attrs);
3244 +
3245 + if (err) {
3246 + dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
3247 + goto err_get_attr;
3248 + }
3249 +
3250 + /* due to a limitation in WRIOP 1.0.0 (ERR009354), the Rx buf
3251 + * align value must be a multiple of 256.
3252 + */
3253 + priv->rx_buf_align =
3254 + priv->dpni_attrs.wriop_version & 0x3ff ?
3255 + DPAA2_ETH_RX_BUF_ALIGN : DPAA2_ETH_RX_BUF_ALIGN_V1;
3256 +
3257 + /* Update number of logical FQs in netdev */
3258 + err = netif_set_real_num_tx_queues(net_dev,
3259 + dpaa2_eth_queue_count(priv));
3260 + if (err) {
3261 + dev_err(dev, "netif_set_real_num_tx_queues failed (%d)\n", err);
3262 + goto err_set_tx_queues;
3263 + }
3264 +
3265 + err = netif_set_real_num_rx_queues(net_dev,
3266 + dpaa2_eth_queue_count(priv));
3267 + if (err) {
3268 + dev_err(dev, "netif_set_real_num_rx_queues failed (%d)\n", err);
3269 + goto err_set_rx_queues;
3270 + }
3271 +
3272 + /* Configure buffer layouts */
3273 + /* rx buffer */
3274 + buf_layout.pass_parser_result = true;
3275 + buf_layout.pass_frame_status = true;
3276 + buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
3277 + buf_layout.data_align = priv->rx_buf_align;
3278 + buf_layout.data_head_room = DPAA2_ETH_RX_HEAD_ROOM;
3279 + buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
3280 + DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
3281 + DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
3282 + DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
3283 + DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;
3284 + err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3285 + DPNI_QUEUE_RX, &buf_layout);
3286 + if (err) {
3287 + dev_err(dev,
3288 + "dpni_set_buffer_layout(RX) failed\n");
3289 + goto err_buf_layout;
3290 + }
3291 +
3292 + /* tx buffer */
3293 + buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
3294 + DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
3295 + DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
3296 + buf_layout.pass_timestamp = true;
3297 + err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3298 + DPNI_QUEUE_TX, &buf_layout);
3299 + if (err) {
3300 + dev_err(dev,
3301 + "dpni_set_buffer_layout(TX) failed\n");
3302 + goto err_buf_layout;
3303 + }
3304 +
3305 + /* tx-confirm buffer */
3306 + buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
3307 + DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
3308 + err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3309 + DPNI_QUEUE_TX_CONFIRM, &buf_layout);
3310 + if (err) {
3311 + dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
3312 + goto err_buf_layout;
3313 + }
3314 +
3315 + /* Now that we've set our tx buffer layout, retrieve the minimum
3316 + * required tx data offset.
3317 + */
3318 + err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
3319 + &priv->tx_data_offset);
3320 + if (err) {
3321 + dev_err(dev, "dpni_get_tx_data_offset() failed (%d)\n", err);
3322 + goto err_data_offset;
3323 + }
3324 +
3325 + if ((priv->tx_data_offset % 64) != 0)
3326 + dev_warn(dev, "Tx data offset (%d) not a multiple of 64B",
3327 + priv->tx_data_offset);
3328 +
3329 + /* Accommodate software annotation space (SWA) */
3330 + priv->tx_data_offset += DPAA2_ETH_SWA_SIZE;
3331 +
3332 + /* Enable congestion notifications for Tx queues */
3333 + err = setup_tx_congestion(priv);
3334 + if (err)
3335 + goto err_tx_cong;
3336 +
3337 + /* allocate classification rule space */
3338 + priv->cls_rule = kzalloc(sizeof(*priv->cls_rule) *
3339 + dpaa2_eth_fs_count(priv), GFP_KERNEL);
3340 + if (!priv->cls_rule)
3341 + goto err_cls_rule;
3342 +
3343 + /* Enable flow control */
3344 + cfg.options = DPNI_LINK_OPT_AUTONEG | DPNI_LINK_OPT_PAUSE;
3345 + priv->tx_pause_frames = 1;
3346 +
3347 + err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
3348 + if (err) {
3349 + netdev_err(net_dev, "ERROR %d setting link cfg", err);
3350 + goto err_set_link_cfg;
3351 + }
3352 +
3353 + return 0;
3354 +
3355 +err_set_link_cfg:
3356 +err_cls_rule:
3357 +err_tx_cong:
3358 +err_data_offset:
3359 +err_buf_layout:
3360 +err_set_rx_queues:
3361 +err_set_tx_queues:
3362 +err_get_attr:
3363 +err_reset:
3364 + dpni_close(priv->mc_io, 0, priv->mc_token);
3365 +err_open:
3366 + return err;
3367 +}
3368 +
3369 +static void free_dpni(struct dpaa2_eth_priv *priv)
3370 +{
3371 + struct device *dev = priv->net_dev->dev.parent;
3372 + int err;
3373 +
3374 + err = dpni_reset(priv->mc_io, 0, priv->mc_token);
3375 + if (err)
3376 + netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
3377 + err);
3378 +
3379 + dpni_close(priv->mc_io, 0, priv->mc_token);
3380 +
3381 + kfree(priv->cls_rule);
3382 +
3383 + dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
3384 + kfree(priv->cscn_unaligned);
3385 +}
3386 +
3387 +int setup_fqs_taildrop(struct dpaa2_eth_priv *priv,
3388 + bool enable)
3389 +{
3390 + struct device *dev = priv->net_dev->dev.parent;
3391 + struct dpni_taildrop td;
3392 + int err = 0, i;
3393 +
3394 + td.enable = enable;
3395 + td.threshold = DPAA2_ETH_TAILDROP_THRESH;
3396 +
3397 + if (enable) {
3398 + priv->num_bufs = DPAA2_ETH_NUM_BUFS_TD;
3399 + priv->refill_thresh = DPAA2_ETH_REFILL_THRESH_TD;
3400 + } else {
3401 + priv->num_bufs = DPAA2_ETH_NUM_BUFS_FC /
3402 + priv->num_channels;
3403 + priv->refill_thresh = priv->num_bufs - DPAA2_ETH_BUFS_PER_CMD;
3404 + }
3405 +
3406 + for (i = 0; i < priv->num_fqs; i++) {
3407 + if (priv->fq[i].type != DPAA2_RX_FQ)
3408 + continue;
3409 +
3410 + err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
3411 + DPNI_CP_QUEUE, DPNI_QUEUE_RX, 0,
3412 + priv->fq[i].flowid, &td);
3413 + if (err) {
3414 + dev_err(dev, "dpni_set_taildrop() failed (%d)\n", err);
3415 + break;
3416 + }
3417 + }
3418 +
3419 + return err;
3420 +}
3421 +
3422 +static int setup_rx_flow(struct dpaa2_eth_priv *priv,
3423 + struct dpaa2_eth_fq *fq)
3424 +{
3425 + struct device *dev = priv->net_dev->dev.parent;
3426 + struct dpni_queue q = { { 0 } };
3427 + struct dpni_queue_id qid;
3428 + u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
3429 + int err;
3430 +
3431 + err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3432 + DPNI_QUEUE_RX, 0, fq->flowid, &q, &qid);
3433 + if (err) {
3434 + dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
3435 + return err;
3436 + }
3437 +
3438 + fq->fqid = qid.fqid;
3439 +
3440 + q.destination.id = fq->channel->dpcon_id;
3441 + q.destination.type = DPNI_DEST_DPCON;
3442 + q.destination.priority = 1;
3443 + q.user_context = (u64)fq;
3444 + err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3445 + DPNI_QUEUE_RX, 0, fq->flowid, q_opt, &q);
3446 + if (err) {
3447 + dev_err(dev, "dpni_set_queue() failed (%d)\n", err);
3448 + return err;
3449 + }
3450 +
3451 + return 0;
3452 +}
3453 +
3454 +static int setup_tx_flow(struct dpaa2_eth_priv *priv,
3455 + struct dpaa2_eth_fq *fq)
3456 +{
3457 + struct device *dev = priv->net_dev->dev.parent;
3458 + struct dpni_queue q = { { 0 } };
3459 + struct dpni_queue_id qid;
3460 + u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
3461 + int err;
3462 +
3463 + err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3464 + DPNI_QUEUE_TX, 0, fq->flowid, &q, &qid);
3465 + if (err) {
3466 + dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
3467 + return err;
3468 + }
3469 +
3470 + fq->tx_qdbin = qid.qdbin;
3471 +
3472 + err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3473 + DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, &q, &qid);
3474 + if (err) {
3475 + dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
3476 + return err;
3477 + }
3478 +
3479 + fq->fqid = qid.fqid;
3480 +
3481 + q.destination.id = fq->channel->dpcon_id;
3482 + q.destination.type = DPNI_DEST_DPCON;
3483 + q.destination.priority = 0;
3484 + q.user_context = (u64)fq;
3485 + err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3486 + DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, q_opt, &q);
3487 + if (err) {
3488 + dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
3489 + return err;
3490 + }
3491 +
3492 + return 0;
3493 +}
3494 +
3495 +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
3496 +static int setup_rx_err_flow(struct dpaa2_eth_priv *priv,
3497 + struct dpaa2_eth_fq *fq)
3498 +{
3499 + struct device *dev = priv->net_dev->dev.parent;
3500 + struct dpni_queue q = { { 0 } };
3501 + struct dpni_queue_id qid;
3502 + u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
3503 + int err;
3504 +
3505 + err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3506 + DPNI_QUEUE_RX_ERR, 0, 0, &q, &qid);
3507 + if (err) {
3508 + dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
3509 + return err;
3510 + }
3511 +
3512 + fq->fqid = qid.fqid;
3513 +
3514 + q.destination.id = fq->channel->dpcon_id;
3515 + q.destination.type = DPNI_DEST_DPCON;
3516 + q.destination.priority = 1;
3517 + q.user_context = (u64)fq;
3518 + err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3519 + DPNI_QUEUE_RX_ERR, 0, 0, q_opt, &q);
3520 + if (err) {
3521 + dev_err(dev, "dpni_set_queue() failed (%d)\n", err);
3522 + return err;
3523 + }
3524 +
3525 + return 0;
3526 +}
3527 +#endif
3528 +
3529 +/* default hash key fields */
3530 +static struct dpaa2_eth_hash_fields default_hash_fields[] = {
3531 + {
3532 + /* L2 header */
3533 + .rxnfc_field = RXH_L2DA,
3534 + .cls_prot = NET_PROT_ETH,
3535 + .cls_field = NH_FLD_ETH_DA,
3536 + .size = 6,
3537 + }, {
3538 + .cls_prot = NET_PROT_ETH,
3539 + .cls_field = NH_FLD_ETH_SA,
3540 + .size = 6,
3541 + }, {
3542 + /* This is the last ethertype field parsed:
3543 + * depending on frame format, it can be the MAC ethertype
3544 + * or the VLAN etype.
3545 + */
3546 + .cls_prot = NET_PROT_ETH,
3547 + .cls_field = NH_FLD_ETH_TYPE,
3548 + .size = 2,
3549 + }, {
3550 + /* VLAN header */
3551 + .rxnfc_field = RXH_VLAN,
3552 + .cls_prot = NET_PROT_VLAN,
3553 + .cls_field = NH_FLD_VLAN_TCI,
3554 + .size = 2,
3555 + }, {
3556 + /* IP header */
3557 + .rxnfc_field = RXH_IP_SRC,
3558 + .cls_prot = NET_PROT_IP,
3559 + .cls_field = NH_FLD_IP_SRC,
3560 + .size = 4,
3561 + }, {
3562 + .rxnfc_field = RXH_IP_DST,
3563 + .cls_prot = NET_PROT_IP,
3564 + .cls_field = NH_FLD_IP_DST,
3565 + .size = 4,
3566 + }, {
3567 + .rxnfc_field = RXH_L3_PROTO,
3568 + .cls_prot = NET_PROT_IP,
3569 + .cls_field = NH_FLD_IP_PROTO,
3570 + .size = 1,
3571 + }, {
3572 + /* Using UDP ports, this is functionally equivalent to raw
3573 + * byte pairs from L4 header.
3574 + */
3575 + .rxnfc_field = RXH_L4_B_0_1,
3576 + .cls_prot = NET_PROT_UDP,
3577 + .cls_field = NH_FLD_UDP_PORT_SRC,
3578 + .size = 2,
3579 + }, {
3580 + .rxnfc_field = RXH_L4_B_2_3,
3581 + .cls_prot = NET_PROT_UDP,
3582 + .cls_field = NH_FLD_UDP_PORT_DST,
3583 + .size = 2,
3584 + },
3585 +};
3586 +
3587 +/* Set RX hash options */
3588 +static int set_hash(struct dpaa2_eth_priv *priv)
3589 +{
3590 + struct device *dev = priv->net_dev->dev.parent;
3591 + struct dpkg_profile_cfg cls_cfg;
3592 + struct dpni_rx_tc_dist_cfg dist_cfg;
3593 + u8 *dma_mem;
3594 + int i;
3595 + int err = 0;
3596 +
3597 + memset(&cls_cfg, 0, sizeof(cls_cfg));
3598 +
3599 + for (i = 0; i < priv->num_hash_fields; i++) {
3600 + struct dpkg_extract *key =
3601 + &cls_cfg.extracts[cls_cfg.num_extracts];
3602 +
3603 + key->type = DPKG_EXTRACT_FROM_HDR;
3604 + key->extract.from_hdr.prot = priv->hash_fields[i].cls_prot;
3605 + key->extract.from_hdr.type = DPKG_FULL_FIELD;
3606 + key->extract.from_hdr.field = priv->hash_fields[i].cls_field;
3607 + cls_cfg.num_extracts++;
3608 +
3609 + priv->rx_flow_hash |= priv->hash_fields[i].rxnfc_field;
3610 + }
3611 +
3612 + dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_DMA | GFP_KERNEL);
3613 + if (!dma_mem)
3614 + return -ENOMEM;
3615 +
3616 + err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
3617 + if (err) {
3618 + dev_err(dev, "dpni_prepare_key_cfg() failed (%d)", err);
3619 + goto err_prep_key;
3620 + }
3621 +
3622 + memset(&dist_cfg, 0, sizeof(dist_cfg));
3623 +
3624 + /* Prepare for setting the rx dist */
3625 + dist_cfg.key_cfg_iova = dma_map_single(dev, dma_mem,
3626 + DPAA2_CLASSIFIER_DMA_SIZE,
3627 + DMA_TO_DEVICE);
3628 + if (dma_mapping_error(dev, dist_cfg.key_cfg_iova)) {
3629 + dev_err(dev, "DMA mapping failed\n");
3630 + err = -ENOMEM;
3631 + goto err_dma_map;
3632 + }
3633 +
3634 + dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
3635 + if (dpaa2_eth_fs_enabled(priv)) {
3636 + dist_cfg.dist_mode = DPNI_DIST_MODE_FS;
3637 + dist_cfg.fs_cfg.miss_action = DPNI_FS_MISS_HASH;
3638 + } else {
3639 + dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
3640 + }
3641 +
3642 + err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
3643 + dma_unmap_single(dev, dist_cfg.key_cfg_iova,
3644 + DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE);
3645 + if (err)
3646 + dev_err(dev, "dpni_set_rx_tc_dist() failed (%d)\n", err);
3647 +
3648 +err_dma_map:
3649 +err_prep_key:
3650 + kfree(dma_mem);
3651 + return err;
3652 +}
3653 +
3654 +/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
3655 + * frame queues and channels
3656 + */
3657 +static int bind_dpni(struct dpaa2_eth_priv *priv)
3658 +{
3659 + struct net_device *net_dev = priv->net_dev;
3660 + struct device *dev = net_dev->dev.parent;
3661 + struct dpni_pools_cfg pools_params;
3662 + struct dpni_error_cfg err_cfg;
3663 + int err = 0;
3664 + int i;
3665 +
3666 + pools_params.num_dpbp = 1;
3667 + pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
3668 + pools_params.pools[0].backup_pool = 0;
3669 + pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
3670 + err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
3671 + if (err) {
3672 + dev_err(dev, "dpni_set_pools() failed\n");
3673 + return err;
3674 + }
3675 +
3676 + /* Verify classification options and disable hashing and/or
3677 + * flow steering support in case of invalid configuration values
3678 + */
3679 + priv->hash_fields = default_hash_fields;
3680 + priv->num_hash_fields = ARRAY_SIZE(default_hash_fields);
3681 + check_cls_support(priv);
3682 +
3683 + /* have the interface implicitly distribute traffic based on
3684 + * a static hash key
3685 + */
3686 + if (dpaa2_eth_hash_enabled(priv)) {
3687 + err = set_hash(priv);
3688 + if (err) {
3689 + dev_err(dev, "Hashing configuration failed\n");
3690 + return err;
3691 + }
3692 + }
3693 +
3694 + /* Configure handling of error frames */
3695 + err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
3696 + err_cfg.set_frame_annotation = 1;
3697 +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
3698 + err_cfg.error_action = DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE;
3699 +#else
3700 + err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
3701 +#endif
3702 + err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
3703 + &err_cfg);
3704 + if (err) {
3705 + dev_err(dev, "dpni_set_errors_behavior() failed (%d)\n", err);
3706 + return err;
3707 + }
3708 +
3709 + /* Configure Rx and Tx conf queues to generate CDANs */
3710 + for (i = 0; i < priv->num_fqs; i++) {
3711 + switch (priv->fq[i].type) {
3712 + case DPAA2_RX_FQ:
3713 + err = setup_rx_flow(priv, &priv->fq[i]);
3714 + break;
3715 + case DPAA2_TX_CONF_FQ:
3716 + err = setup_tx_flow(priv, &priv->fq[i]);
3717 + break;
3718 +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
3719 + case DPAA2_RX_ERR_FQ:
3720 + err = setup_rx_err_flow(priv, &priv->fq[i]);
3721 + break;
3722 +#endif
3723 + default:
3724 + dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
3725 + return -EINVAL;
3726 + }
3727 + if (err)
3728 + return err;
3729 + }
3730 +
3731 + err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token, DPNI_QUEUE_TX,
3732 + &priv->tx_qdid);
3733 + if (err) {
3734 + dev_err(dev, "dpni_get_qdid() failed\n");
3735 + return err;
3736 + }
3737 +
3738 + return 0;
3739 +}
3740 +
3741 +/* Allocate rings for storing incoming frame descriptors */
3742 +static int alloc_rings(struct dpaa2_eth_priv *priv)
3743 +{
3744 + struct net_device *net_dev = priv->net_dev;
3745 + struct device *dev = net_dev->dev.parent;
3746 + int i;
3747 +
3748 + for (i = 0; i < priv->num_channels; i++) {
3749 + priv->channel[i]->store =
3750 + dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
3751 + if (!priv->channel[i]->store) {
3752 + netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
3753 + goto err_ring;
3754 + }
3755 + }
3756 +
3757 + return 0;
3758 +
3759 +err_ring:
3760 + for (i = 0; i < priv->num_channels; i++) {
3761 + if (!priv->channel[i]->store)
3762 + break;
3763 + dpaa2_io_store_destroy(priv->channel[i]->store);
3764 + }
3765 +
3766 + return -ENOMEM;
3767 +}
3768 +
3769 +static void free_rings(struct dpaa2_eth_priv *priv)
3770 +{
3771 + int i;
3772 +
3773 + for (i = 0; i < priv->num_channels; i++)
3774 + dpaa2_io_store_destroy(priv->channel[i]->store);
3775 +}
3776 +
3777 +static int netdev_init(struct net_device *net_dev)
3778 +{
3779 + int err;
3780 + struct device *dev = net_dev->dev.parent;
3781 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3782 + u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
3783 + u8 bcast_addr[ETH_ALEN];
3784 + u16 rx_headroom, rx_req_headroom;
3785 +
3786 + net_dev->netdev_ops = &dpaa2_eth_ops;
3787 +
3788 + /* Get firmware address, if any */
3789 + err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
3790 + if (err) {
3791 + dev_err(dev, "dpni_get_port_mac_addr() failed (%d)\n", err);
3792 + return err;
3793 + }
3794 +
3795 + /* Get DPNI atttributes address, if any */
3796 + err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
3797 + dpni_mac_addr);
3798 + if (err) {
3799 + dev_err(dev, "dpni_get_primary_mac_addr() failed (%d)\n", err);
3800 + return err;
3801 + }
3802 +
3803 + /* First check if firmware has any address configured by bootloader */
3804 + if (!is_zero_ether_addr(mac_addr)) {
3805 + /* If the DPMAC addr != the DPNI addr, update it */
3806 + if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
3807 + err = dpni_set_primary_mac_addr(priv->mc_io, 0,
3808 + priv->mc_token,
3809 + mac_addr);
3810 + if (err) {
3811 + dev_err(dev,
3812 + "dpni_set_primary_mac_addr() failed (%d)\n",
3813 + err);
3814 + return err;
3815 + }
3816 + }
3817 + memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
3818 + } else if (is_zero_ether_addr(dpni_mac_addr)) {
3819 + /* Fills in net_dev->dev_addr, as required by
3820 + * register_netdevice()
3821 + */
3822 + eth_hw_addr_random(net_dev);
3823 + /* Make the user aware, without cluttering the boot log */
3824 + dev_dbg_once(dev, " device(s) have all-zero hwaddr, replaced with random\n");
3825 + err = dpni_set_primary_mac_addr(priv->mc_io, 0,
3826 + priv->mc_token, net_dev->dev_addr);
3827 + if (err) {
3828 + dev_err(dev,
3829 + "dpni_set_primary_mac_addr() failed (%d)\n", err);
3830 + return err;
3831 + }
3832 + /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
3833 + * practical purposes, this will be our "permanent" mac address,
3834 + * at least until the next reboot. This move will also permit
3835 + * register_netdevice() to properly fill up net_dev->perm_addr.
3836 + */
3837 + net_dev->addr_assign_type = NET_ADDR_PERM;
3838 + /* If DPMAC address is non-zero, use that one */
3839 + } else {
3840 + /* NET_ADDR_PERM is default, all we have to do is
3841 + * fill in the device addr.
3842 + */
3843 + memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len);
3844 + }
3845 +
3846 + /* Explicitly add the broadcast address to the MAC filtering table;
3847 + * the MC won't do that for us.
3848 + */
3849 + eth_broadcast_addr(bcast_addr);
3850 + err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
3851 + if (err) {
3852 + dev_warn(dev, "dpni_add_mac_addr() failed (%d)\n", err);
3853 + /* Won't return an error; at least, we'd have egress traffic */
3854 + }
3855 +
3856 + /* Reserve enough space to align buffer as per hardware requirement;
3857 + * NOTE: priv->tx_data_offset MUST be initialized at this point.
3858 + */
3859 + net_dev->needed_headroom = DPAA2_ETH_NEEDED_HEADROOM(priv);
3860 +
3861 + /* Set MTU limits */
3862 + net_dev->min_mtu = 68;
3863 + net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
3864 +
3865 + /* Required headroom for Rx skbs, to avoid reallocation on
3866 + * forwarding path.
3867 + */
3868 + rx_req_headroom = LL_RESERVED_SPACE(net_dev) - ETH_HLEN;
3869 + rx_headroom = ALIGN(DPAA2_ETH_RX_HWA_SIZE + DPAA2_ETH_SWA_SIZE +
3870 + DPAA2_ETH_RX_HEAD_ROOM, priv->rx_buf_align);
3871 + if (rx_req_headroom > rx_headroom)
3872 + dev_info_once(dev,
3873 + "Required headroom (%d) greater than available (%d).\n"
3874 + "This will impact performance due to reallocations.\n",
3875 + rx_req_headroom, rx_headroom);
3876 +
3877 + /* Our .ndo_init will be called herein */
3878 + err = register_netdev(net_dev);
3879 + if (err < 0) {
3880 + dev_err(dev, "register_netdev() failed (%d)\n", err);
3881 + return err;
3882 + }
3883 +
3884 + return 0;
3885 +}
3886 +
3887 +static int poll_link_state(void *arg)
3888 +{
3889 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
3890 + int err;
3891 +
3892 + while (!kthread_should_stop()) {
3893 + err = link_state_update(priv);
3894 + if (unlikely(err))
3895 + return err;
3896 +
3897 + msleep(DPAA2_ETH_LINK_STATE_REFRESH);
3898 + }
3899 +
3900 + return 0;
3901 +}
3902 +
3903 +static irqreturn_t dpni_irq0_handler(int irq_num, void *arg)
3904 +{
3905 + return IRQ_WAKE_THREAD;
3906 +}
3907 +
3908 +static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
3909 +{
3910 + u32 status = 0, clear = 0;
3911 + struct device *dev = (struct device *)arg;
3912 + struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
3913 + struct net_device *net_dev = dev_get_drvdata(dev);
3914 + int err;
3915 +
3916 + err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
3917 + DPNI_IRQ_INDEX, &status);
3918 + if (unlikely(err)) {
3919 + netdev_err(net_dev, "Can't get irq status (err %d)", err);
3920 + clear = 0xffffffff;
3921 + goto out;
3922 + }
3923 +
3924 + if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
3925 + clear |= DPNI_IRQ_EVENT_LINK_CHANGED;
3926 + link_state_update(netdev_priv(net_dev));
3927 + }
3928 +
3929 +out:
3930 + dpni_clear_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
3931 + DPNI_IRQ_INDEX, clear);
3932 + return IRQ_HANDLED;
3933 +}
3934 +
3935 +static int setup_irqs(struct fsl_mc_device *ls_dev)
3936 +{
3937 + int err = 0;
3938 + struct fsl_mc_device_irq *irq;
3939 +
3940 + err = fsl_mc_allocate_irqs(ls_dev);
3941 + if (err) {
3942 + dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
3943 + return err;
3944 + }
3945 +
3946 + irq = ls_dev->irqs[0];
3947 + err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
3948 + dpni_irq0_handler,
3949 + dpni_irq0_handler_thread,
3950 + IRQF_NO_SUSPEND | IRQF_ONESHOT,
3951 + dev_name(&ls_dev->dev), &ls_dev->dev);
3952 + if (err < 0) {
3953 + dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d", err);
3954 + goto free_mc_irq;
3955 + }
3956 +
3957 + err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
3958 + DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED);
3959 + if (err < 0) {
3960 + dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d", err);
3961 + goto free_irq;
3962 + }
3963 +
3964 + err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
3965 + DPNI_IRQ_INDEX, 1);
3966 + if (err < 0) {
3967 + dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d", err);
3968 + goto free_irq;
3969 + }
3970 +
3971 + return 0;
3972 +
3973 +free_irq:
3974 + devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev);
3975 +free_mc_irq:
3976 + fsl_mc_free_irqs(ls_dev);
3977 +
3978 + return err;
3979 +}
3980 +
3981 +static void add_ch_napi(struct dpaa2_eth_priv *priv)
3982 +{
3983 + int i;
3984 + struct dpaa2_eth_channel *ch;
3985 +
3986 + for (i = 0; i < priv->num_channels; i++) {
3987 + ch = priv->channel[i];
3988 + /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
3989 + netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
3990 + NAPI_POLL_WEIGHT);
3991 + }
3992 +}
3993 +
3994 +static void del_ch_napi(struct dpaa2_eth_priv *priv)
3995 +{
3996 + int i;
3997 + struct dpaa2_eth_channel *ch;
3998 +
3999 + for (i = 0; i < priv->num_channels; i++) {
4000 + ch = priv->channel[i];
4001 + netif_napi_del(&ch->napi);
4002 + }
4003 +}
4004 +
4005 +/* SysFS support */
4006 +static ssize_t dpaa2_eth_show_tx_shaping(struct device *dev,
4007 + struct device_attribute *attr,
4008 + char *buf)
4009 +{
4010 + struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
4011 + /* No MC API for getting the shaping config. We're stateful. */
4012 + struct dpni_tx_shaping_cfg *scfg = &priv->shaping_cfg;
4013 +
4014 + return sprintf(buf, "%u %hu\n", scfg->rate_limit, scfg->max_burst_size);
4015 +}
4016 +
4017 +static ssize_t dpaa2_eth_write_tx_shaping(struct device *dev,
4018 + struct device_attribute *attr,
4019 + const char *buf,
4020 + size_t count)
4021 +{
4022 + int err, items;
4023 + struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
4024 + struct dpni_tx_shaping_cfg scfg;
4025 +
4026 + items = sscanf(buf, "%u %hu", &scfg.rate_limit, &scfg.max_burst_size);
4027 + if (items != 2) {
4028 + pr_err("Expected format: \"rate_limit(Mbps) max_burst_size(bytes)\"\n");
4029 + return -EINVAL;
4030 + }
4031 + /* Size restriction as per MC API documentation */
4032 + if (scfg.max_burst_size > DPAA2_ETH_MAX_BURST_SIZE) {
4033 + pr_err("max_burst_size must be <= %d\n",
4034 + DPAA2_ETH_MAX_BURST_SIZE);
4035 + return -EINVAL;
4036 + }
4037 +
4038 + err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &scfg);
4039 + if (err) {
4040 + dev_err(dev, "dpni_set_tx_shaping() failed\n");
4041 + return -EPERM;
4042 + }
4043 + /* If successful, save the current configuration for future inquiries */
4044 + priv->shaping_cfg = scfg;
4045 +
4046 + return count;
4047 +}
4048 +
4049 +static ssize_t dpaa2_eth_show_txconf_cpumask(struct device *dev,
4050 + struct device_attribute *attr,
4051 + char *buf)
4052 +{
4053 + struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
4054 +
4055 + return cpumap_print_to_pagebuf(1, buf, &priv->txconf_cpumask);
4056 +}
4057 +
4058 +static ssize_t dpaa2_eth_write_txconf_cpumask(struct device *dev,
4059 + struct device_attribute *attr,
4060 + const char *buf,
4061 + size_t count)
4062 +{
4063 + struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
4064 + struct dpaa2_eth_fq *fq;
4065 + bool running = netif_running(priv->net_dev);
4066 + int i, err;
4067 +
4068 + err = cpulist_parse(buf, &priv->txconf_cpumask);
4069 + if (err)
4070 + return err;
4071 +
4072 + /* Only accept CPUs that have an affine DPIO */
4073 + if (!cpumask_subset(&priv->txconf_cpumask, &priv->dpio_cpumask)) {
4074 + netdev_info(priv->net_dev,
4075 + "cpumask must be a subset of 0x%lx\n",
4076 + *cpumask_bits(&priv->dpio_cpumask));
4077 + cpumask_and(&priv->txconf_cpumask, &priv->dpio_cpumask,
4078 + &priv->txconf_cpumask);
4079 + }
4080 +
4081 + /* Rewiring the TxConf FQs requires interface shutdown.
4082 + */
4083 + if (running) {
4084 + err = dpaa2_eth_stop(priv->net_dev);
4085 + if (err)
4086 + return -ENODEV;
4087 + }
4088 +
4089 + /* Set the new TxConf FQ affinities */
4090 + set_fq_affinity(priv);
4091 +
4092 + /* dpaa2_eth_open() below will *stop* the Tx queues until an explicit
4093 + * link up notification is received. Give the polling thread enough time
4094 + * to detect the link state change, or else we'll end up with the
4095 + * transmission side forever shut down.
4096 + */
4097 + if (priv->do_link_poll)
4098 + msleep(2 * DPAA2_ETH_LINK_STATE_REFRESH);
4099 +
4100 + for (i = 0; i < priv->num_fqs; i++) {
4101 + fq = &priv->fq[i];
4102 + if (fq->type != DPAA2_TX_CONF_FQ)
4103 + continue;
4104 + setup_tx_flow(priv, fq);
4105 + }
4106 +
4107 + if (running) {
4108 + err = dpaa2_eth_open(priv->net_dev);
4109 + if (err)
4110 + return -ENODEV;
4111 + }
4112 +
4113 + return count;
4114 +}
4115 +
4116 +static struct device_attribute dpaa2_eth_attrs[] = {
4117 + __ATTR(txconf_cpumask,
4118 + 0600,
4119 + dpaa2_eth_show_txconf_cpumask,
4120 + dpaa2_eth_write_txconf_cpumask),
4121 +
4122 + __ATTR(tx_shaping,
4123 + 0600,
4124 + dpaa2_eth_show_tx_shaping,
4125 + dpaa2_eth_write_tx_shaping),
4126 +};
4127 +
4128 +static void dpaa2_eth_sysfs_init(struct device *dev)
4129 +{
4130 + int i, err;
4131 +
4132 + for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++) {
4133 + err = device_create_file(dev, &dpaa2_eth_attrs[i]);
4134 + if (err) {
4135 + dev_err(dev, "ERROR creating sysfs file\n");
4136 + goto undo;
4137 + }
4138 + }
4139 + return;
4140 +
4141 +undo:
4142 + while (i > 0)
4143 + device_remove_file(dev, &dpaa2_eth_attrs[--i]);
4144 +}
4145 +
4146 +static void dpaa2_eth_sysfs_remove(struct device *dev)
4147 +{
4148 + int i;
4149 +
4150 + for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++)
4151 + device_remove_file(dev, &dpaa2_eth_attrs[i]);
4152 +}
4153 +
4154 +static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
4155 +{
4156 + struct device *dev;
4157 + struct net_device *net_dev = NULL;
4158 + struct dpaa2_eth_priv *priv = NULL;
4159 + int err = 0;
4160 +
4161 + dev = &dpni_dev->dev;
4162 +
4163 + /* Net device */
4164 + net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES);
4165 + if (!net_dev) {
4166 + dev_err(dev, "alloc_etherdev_mq() failed\n");
4167 + return -ENOMEM;
4168 + }
4169 +
4170 + SET_NETDEV_DEV(net_dev, dev);
4171 + dev_set_drvdata(dev, net_dev);
4172 +
4173 + priv = netdev_priv(net_dev);
4174 + priv->net_dev = net_dev;
4175 +
4176 + priv->iommu_domain = iommu_get_domain_for_dev(dev);
4177 +
4178 + /* Obtain a MC portal */
4179 + err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
4180 + &priv->mc_io);
4181 + if (err) {
4182 + dev_err(dev, "MC portal allocation failed\n");
4183 + goto err_portal_alloc;
4184 + }
4185 +
4186 + /* MC objects initialization and configuration */
4187 + err = setup_dpni(dpni_dev);
4188 + if (err)
4189 + goto err_dpni_setup;
4190 +
4191 + err = setup_dpio(priv);
4192 + if (err) {
4193 + dev_info(dev, "Defer probing as no DPIO available\n");
4194 + err = -EPROBE_DEFER;
4195 + goto err_dpio_setup;
4196 + }
4197 +
4198 + setup_fqs(priv);
4199 +
4200 + err = setup_dpbp(priv);
4201 + if (err)
4202 + goto err_dpbp_setup;
4203 +
4204 + err = bind_dpni(priv);
4205 + if (err)
4206 + goto err_bind;
4207 +
4208 + /* Add a NAPI context for each channel */
4209 + add_ch_napi(priv);
4210 + enable_ch_napi(priv);
4211 +
4212 + /* Percpu statistics */
4213 + priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
4214 + if (!priv->percpu_stats) {
4215 + dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
4216 + err = -ENOMEM;
4217 + goto err_alloc_percpu_stats;
4218 + }
4219 + priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
4220 + if (!priv->percpu_extras) {
4221 + dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
4222 + err = -ENOMEM;
4223 + goto err_alloc_percpu_extras;
4224 + }
4225 +
4226 + snprintf(net_dev->name, IFNAMSIZ, "ni%d", dpni_dev->obj_desc.id);
4227 + if (!dev_valid_name(net_dev->name)) {
4228 + dev_warn(&net_dev->dev,
4229 + "netdevice name \"%s\" cannot be used, reverting to default..\n",
4230 + net_dev->name);
4231 + dev_alloc_name(net_dev, "eth%d");
4232 + dev_warn(&net_dev->dev, "using name \"%s\"\n", net_dev->name);
4233 + }
4234 +
4235 + err = netdev_init(net_dev);
4236 + if (err)
4237 + goto err_netdev_init;
4238 +
4239 + /* Configure checksum offload based on current interface flags */
4240 + err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
4241 + if (err)
4242 + goto err_csum;
4243 +
4244 + err = set_tx_csum(priv, !!(net_dev->features &
4245 + (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
4246 + if (err)
4247 + goto err_csum;
4248 +
4249 + err = alloc_rings(priv);
4250 + if (err)
4251 + goto err_alloc_rings;
4252 +
4253 + net_dev->ethtool_ops = &dpaa2_ethtool_ops;
4254 +
4255 + err = setup_irqs(dpni_dev);
4256 + if (err) {
4257 + netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
4258 + priv->poll_thread = kthread_run(poll_link_state, priv,
4259 + "%s_poll_link", net_dev->name);
4260 + if (IS_ERR(priv->poll_thread)) {
4261 + netdev_err(net_dev, "Error starting polling thread\n");
4262 + goto err_poll_thread;
4263 + }
4264 + priv->do_link_poll = true;
4265 + }
4266 +
4267 + dpaa2_eth_sysfs_init(&net_dev->dev);
4268 +#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
4269 + dpaa2_dbg_add(priv);
4270 +#endif
4271 +
4272 + dev_info(dev, "Probed interface %s\n", net_dev->name);
4273 + return 0;
4274 +
4275 +err_poll_thread:
4276 + free_rings(priv);
4277 +err_alloc_rings:
4278 +err_csum:
4279 + unregister_netdev(net_dev);
4280 +err_netdev_init:
4281 + free_percpu(priv->percpu_extras);
4282 +err_alloc_percpu_extras:
4283 + free_percpu(priv->percpu_stats);
4284 +err_alloc_percpu_stats:
4285 + disable_ch_napi(priv);
4286 + del_ch_napi(priv);
4287 +err_bind:
4288 + free_dpbp(priv);
4289 +err_dpbp_setup:
4290 + free_dpio(priv);
4291 +err_dpio_setup:
4292 + free_dpni(priv);
4293 +err_dpni_setup:
4294 + fsl_mc_portal_free(priv->mc_io);
4295 +err_portal_alloc:
4296 + dev_set_drvdata(dev, NULL);
4297 + free_netdev(net_dev);
4298 +
4299 + return err;
4300 +}
4301 +
4302 +static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
4303 +{
4304 + struct device *dev;
4305 + struct net_device *net_dev;
4306 + struct dpaa2_eth_priv *priv;
4307 +
4308 + dev = &ls_dev->dev;
4309 + net_dev = dev_get_drvdata(dev);
4310 + priv = netdev_priv(net_dev);
4311 +
4312 +#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
4313 + dpaa2_dbg_remove(priv);
4314 +#endif
4315 + dpaa2_eth_sysfs_remove(&net_dev->dev);
4316 +
4317 + unregister_netdev(net_dev);
4318 + dev_info(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
4319 +
4320 + if (priv->do_link_poll)
4321 + kthread_stop(priv->poll_thread);
4322 + else
4323 + fsl_mc_free_irqs(ls_dev);
4324 +
4325 + free_rings(priv);
4326 + free_percpu(priv->percpu_stats);
4327 + free_percpu(priv->percpu_extras);
4328 +
4329 + disable_ch_napi(priv);
4330 + del_ch_napi(priv);
4331 + free_dpbp(priv);
4332 + free_dpio(priv);
4333 + free_dpni(priv);
4334 +
4335 + fsl_mc_portal_free(priv->mc_io);
4336 +
4337 + dev_set_drvdata(dev, NULL);
4338 + free_netdev(net_dev);
4339 +
4340 + return 0;
4341 +}
4342 +
4343 +static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
4344 + {
4345 + .vendor = FSL_MC_VENDOR_FREESCALE,
4346 + .obj_type = "dpni",
4347 + },
4348 + { .vendor = 0x0 }
4349 +};
4350 +MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
4351 +
4352 +static struct fsl_mc_driver dpaa2_eth_driver = {
4353 + .driver = {
4354 + .name = KBUILD_MODNAME,
4355 + .owner = THIS_MODULE,
4356 + },
4357 + .probe = dpaa2_eth_probe,
4358 + .remove = dpaa2_eth_remove,
4359 + .match_id_table = dpaa2_eth_match_id_table
4360 +};
4361 +
4362 +static int __init dpaa2_eth_driver_init(void)
4363 +{
4364 + int err;
4365 +
4366 + dpaa2_eth_dbg_init();
4367 + err = fsl_mc_driver_register(&dpaa2_eth_driver);
4368 + if (err) {
4369 + dpaa2_eth_dbg_exit();
4370 + return err;
4371 + }
4372 +
4373 + return 0;
4374 +}
4375 +
4376 +static void __exit dpaa2_eth_driver_exit(void)
4377 +{
4378 + dpaa2_eth_dbg_exit();
4379 + fsl_mc_driver_unregister(&dpaa2_eth_driver);
4380 +}
4381 +
4382 +module_init(dpaa2_eth_driver_init);
4383 +module_exit(dpaa2_eth_driver_exit);
4384 diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
4385 new file mode 100644
4386 index 00000000..86cb12e9
4387 --- /dev/null
4388 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
4389 @@ -0,0 +1,460 @@
4390 +/* Copyright 2014-2015 Freescale Semiconductor Inc.
4391 + *
4392 + * Redistribution and use in source and binary forms, with or without
4393 + * modification, are permitted provided that the following conditions are met:
4394 + * * Redistributions of source code must retain the above copyright
4395 + * notice, this list of conditions and the following disclaimer.
4396 + * * Redistributions in binary form must reproduce the above copyright
4397 + * notice, this list of conditions and the following disclaimer in the
4398 + * documentation and/or other materials provided with the distribution.
4399 + * * Neither the name of Freescale Semiconductor nor the
4400 + * names of its contributors may be used to endorse or promote products
4401 + * derived from this software without specific prior written permission.
4402 + *
4403 + *
4404 + * ALTERNATIVELY, this software may be distributed under the terms of the
4405 + * GNU General Public License ("GPL") as published by the Free Software
4406 + * Foundation, either version 2 of that License or (at your option) any
4407 + * later version.
4408 + *
4409 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
4410 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
4411 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
4412 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
4413 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
4414 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
4415 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
4416 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
4417 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
4418 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
4419 + */
4420 +
4421 +#ifndef __DPAA2_ETH_H
4422 +#define __DPAA2_ETH_H
4423 +
4424 +#include <linux/atomic.h>
4425 +#include <linux/netdevice.h>
4426 +#include <linux/if_vlan.h>
4427 +#include "../../fsl-mc/include/dpaa2-io.h"
4428 +#include "dpni.h"
4429 +#include "net.h"
4430 +
4431 +#include "dpaa2-eth-debugfs.h"
4432 +
4433 +#define DPAA2_ETH_STORE_SIZE 16
4434 +
4435 +/* We set a max threshold for how many Tx confirmations we should process
4436 + * on a NAPI poll call, they take less processing time.
4437 + */
4438 +#define TX_CONF_PER_NAPI_POLL 256
4439 +
4440 +/* Maximum number of scatter-gather entries in an ingress frame,
4441 + * considering the maximum receive frame size is 64K
4442 + */
4443 +#define DPAA2_ETH_MAX_SG_ENTRIES ((64 * 1024) / DPAA2_ETH_RX_BUF_SIZE)
4444 +
4445 +/* Maximum acceptable MTU value. It is in direct relation with the hardware
4446 + * enforced Max Frame Length (currently 10k).
4447 + */
4448 +#define DPAA2_ETH_MFL (10 * 1024)
4449 +#define DPAA2_ETH_MAX_MTU (DPAA2_ETH_MFL - VLAN_ETH_HLEN)
4450 +/* Convert L3 MTU to L2 MFL */
4451 +#define DPAA2_ETH_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN)
4452 +
4453 +/* Maximum burst size value for Tx shaping */
4454 +#define DPAA2_ETH_MAX_BURST_SIZE 0xF7FF
4455 +
4456 +/* Maximum number of buffers that can be acquired/released through a single
4457 + * QBMan command
4458 + */
4459 +#define DPAA2_ETH_BUFS_PER_CMD 7
4460 +
4461 +/* Set the taildrop threshold (in bytes) to allow the enqueue of several jumbo
4462 + * frames in the Rx queues (length of the current frame is not
4463 + * taken into account when making the taildrop decision)
4464 + */
4465 +#define DPAA2_ETH_TAILDROP_THRESH (64 * 1024)
4466 +
4467 +/* Buffer quota per queue. Must be large enough such that for minimum sized
4468 + * frames taildrop kicks in before the bpool gets depleted, so we compute
4469 + * how many 64B frames fit inside the taildrop threshold and add a margin
4470 + * to accommodate the buffer refill delay.
4471 + */
4472 +#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64)
4473 +#define DPAA2_ETH_NUM_BUFS_TD (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256)
4474 +#define DPAA2_ETH_REFILL_THRESH_TD \
4475 + (DPAA2_ETH_NUM_BUFS_TD - DPAA2_ETH_BUFS_PER_CMD)
4476 +
4477 +/* Buffer quota per queue to use when flow control is active. */
4478 +#define DPAA2_ETH_NUM_BUFS_FC 256
4479 +
4480 +/* Hardware requires alignment for ingress/egress buffer addresses
4481 + * and ingress buffer lengths.
4482 + */
4483 +#define DPAA2_ETH_RX_BUF_SIZE 2048
4484 +#define DPAA2_ETH_TX_BUF_ALIGN 64
4485 +#define DPAA2_ETH_RX_BUF_ALIGN 64
4486 +#define DPAA2_ETH_RX_BUF_ALIGN_V1 256
4487 +#define DPAA2_ETH_NEEDED_HEADROOM(p_priv) \
4488 + ((p_priv)->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN)
4489 +
4490 +/* rx_extra_head prevents reallocations in L3 processing. */
4491 +#define DPAA2_ETH_SKB_SIZE \
4492 + (DPAA2_ETH_RX_BUF_SIZE + \
4493 + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
4494 +
4495 +/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but we need to allocate ingress
4496 + * buffers large enough to allow building an skb around them and also account
4497 + * for alignment restrictions.
4498 + */
4499 +#define DPAA2_ETH_BUF_RAW_SIZE(p_priv) \
4500 + (DPAA2_ETH_SKB_SIZE + \
4501 + (p_priv)->rx_buf_align)
4502 +
4503 +/* PTP nominal frequency 1GHz */
4504 +#define DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS 1
4505 +
4506 +/* Leave enough extra space in the headroom to make sure the skb is
4507 + * not realloc'd in forwarding scenarios.
4508 + */
4509 +#define DPAA2_ETH_RX_HEAD_ROOM 192
4510 +
4511 +/* We are accommodating a skb backpointer and some S/G info
4512 + * in the frame's software annotation. The hardware
4513 + * options are either 0 or 64, so we choose the latter.
4514 + */
4515 +#define DPAA2_ETH_SWA_SIZE 64
4516 +
4517 +/* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */
4518 +struct dpaa2_eth_swa {
4519 + struct sk_buff *skb;
4520 + struct scatterlist *scl;
4521 + int num_sg;
4522 + int num_dma_bufs;
4523 +};
4524 +
4525 +/* Annotation valid bits in FD FRC */
4526 +#define DPAA2_FD_FRC_FASV 0x8000
4527 +#define DPAA2_FD_FRC_FAEADV 0x4000
4528 +#define DPAA2_FD_FRC_FAPRV 0x2000
4529 +#define DPAA2_FD_FRC_FAIADV 0x1000
4530 +#define DPAA2_FD_FRC_FASWOV 0x0800
4531 +#define DPAA2_FD_FRC_FAICFDV 0x0400
4532 +
4533 +#define DPAA2_FD_RX_ERR_MASK (FD_CTRL_SBE | FD_CTRL_FAERR)
4534 +#define DPAA2_FD_TX_ERR_MASK (FD_CTRL_UFD | \
4535 + FD_CTRL_SBE | \
4536 + FD_CTRL_FSE | \
4537 + FD_CTRL_FAERR)
4538 +
4539 +/* Annotation bits in FD CTRL */
4540 +#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128 */
4541 +
4542 +/* Size of hardware annotation area based on the current buffer layout
4543 + * configuration
4544 + */
4545 +#define DPAA2_ETH_RX_HWA_SIZE 64
4546 +#define DPAA2_ETH_TX_HWA_SIZE 128
4547 +
4548 +/* Frame annotation status */
4549 +struct dpaa2_fas {
4550 + u8 reserved;
4551 + u8 ppid;
4552 + __le16 ifpid;
4553 + __le32 status;
4554 +} __packed;
4555 +
4556 +/* Frame annotation status word is located in the first 8 bytes
4557 + * of the buffer's hardware annotation area
4558 + */
4559 +#define DPAA2_FAS_OFFSET 0
4560 +#define DPAA2_FAS_SIZE (sizeof(struct dpaa2_fas))
4561 +
4562 +/* Timestamp is located in the next 8 bytes of the buffer's
4563 + * hardware annotation area
4564 + */
4565 +#define DPAA2_TS_OFFSET 0x8
4566 +
4567 +/* Frame annotation egress action descriptor */
4568 +#define DPAA2_FAEAD_OFFSET 0x58
4569 +
4570 +struct dpaa2_faead {
4571 + __le32 conf_fqid;
4572 + __le32 ctrl;
4573 +};
4574 +
4575 +#define DPAA2_FAEAD_A2V 0x20000000
4576 +#define DPAA2_FAEAD_UPDV 0x00001000
4577 +#define DPAA2_FAEAD_UPD 0x00000010
4578 +
4579 +/* accessors for the hardware annotation fields that we use */
4580 +#define dpaa2_eth_get_hwa(buf_addr) \
4581 + ((void *)(buf_addr) + DPAA2_ETH_SWA_SIZE)
4582 +
4583 +#define dpaa2_eth_get_fas(buf_addr) \
4584 + (struct dpaa2_fas *)(dpaa2_eth_get_hwa(buf_addr) + DPAA2_FAS_OFFSET)
4585 +
4586 +#define dpaa2_eth_get_ts(buf_addr) \
4587 + (u64 *)(dpaa2_eth_get_hwa(buf_addr) + DPAA2_TS_OFFSET)
4588 +
4589 +#define dpaa2_eth_get_faead(buf_addr) \
4590 + (struct dpaa2_faead *)(dpaa2_eth_get_hwa(buf_addr) + DPAA2_FAEAD_OFFSET)
4591 +
4592 +/* Error and status bits in the frame annotation status word */
4593 +/* Debug frame, otherwise supposed to be discarded */
4594 +#define DPAA2_FAS_DISC 0x80000000
4595 +/* MACSEC frame */
4596 +#define DPAA2_FAS_MS 0x40000000
4597 +#define DPAA2_FAS_PTP 0x08000000
4598 +/* Ethernet multicast frame */
4599 +#define DPAA2_FAS_MC 0x04000000
4600 +/* Ethernet broadcast frame */
4601 +#define DPAA2_FAS_BC 0x02000000
4602 +#define DPAA2_FAS_KSE 0x00040000
4603 +#define DPAA2_FAS_EOFHE 0x00020000
4604 +#define DPAA2_FAS_MNLE 0x00010000
4605 +#define DPAA2_FAS_TIDE 0x00008000
4606 +#define DPAA2_FAS_PIEE 0x00004000
4607 +/* Frame length error */
4608 +#define DPAA2_FAS_FLE 0x00002000
4609 +/* Frame physical error */
4610 +#define DPAA2_FAS_FPE 0x00001000
4611 +#define DPAA2_FAS_PTE 0x00000080
4612 +#define DPAA2_FAS_ISP 0x00000040
4613 +#define DPAA2_FAS_PHE 0x00000020
4614 +#define DPAA2_FAS_BLE 0x00000010
4615 +/* L3 csum validation performed */
4616 +#define DPAA2_FAS_L3CV 0x00000008
4617 +/* L3 csum error */
4618 +#define DPAA2_FAS_L3CE 0x00000004
4619 +/* L4 csum validation performed */
4620 +#define DPAA2_FAS_L4CV 0x00000002
4621 +/* L4 csum error */
4622 +#define DPAA2_FAS_L4CE 0x00000001
4623 +/* Possible errors on the ingress path */
4624 +#define DPAA2_FAS_RX_ERR_MASK ((DPAA2_FAS_KSE) | \
4625 + (DPAA2_FAS_EOFHE) | \
4626 + (DPAA2_FAS_MNLE) | \
4627 + (DPAA2_FAS_TIDE) | \
4628 + (DPAA2_FAS_PIEE) | \
4629 + (DPAA2_FAS_FLE) | \
4630 + (DPAA2_FAS_FPE) | \
4631 + (DPAA2_FAS_PTE) | \
4632 + (DPAA2_FAS_ISP) | \
4633 + (DPAA2_FAS_PHE) | \
4634 + (DPAA2_FAS_BLE) | \
4635 + (DPAA2_FAS_L3CE) | \
4636 + (DPAA2_FAS_L4CE))
4637 +/* Tx errors */
4638 +#define DPAA2_FAS_TX_ERR_MASK ((DPAA2_FAS_KSE) | \
4639 + (DPAA2_FAS_EOFHE) | \
4640 + (DPAA2_FAS_MNLE) | \
4641 + (DPAA2_FAS_TIDE))
4642 +
4643 +/* Time in milliseconds between link state updates */
4644 +#define DPAA2_ETH_LINK_STATE_REFRESH 1000
4645 +
4646 +/* Number of times to retry a frame enqueue before giving up.
4647 + * Value determined empirically, in order to minimize the number
4648 + * of frames dropped on Tx
4649 + */
4650 +#define DPAA2_ETH_ENQUEUE_RETRIES 10
4651 +
4652 +/* Tx congestion entry & exit thresholds, in number of bytes.
4653 + * We allow a maximum of 512KB worth of frames pending processing on the Tx
4654 + * queues of an interface
4655 + */
4656 +#define DPAA2_ETH_TX_CONG_ENTRY_THRESH (512 * 1024)
4657 +#define DPAA2_ETH_TX_CONG_EXIT_THRESH (DPAA2_ETH_TX_CONG_ENTRY_THRESH * 9/10)
4658 +
4659 +/* Driver statistics, other than those in struct rtnl_link_stats64.
4660 + * These are usually collected per-CPU and aggregated by ethtool.
4661 + */
4662 +struct dpaa2_eth_drv_stats {
4663 + __u64 tx_conf_frames;
4664 + __u64 tx_conf_bytes;
4665 + __u64 tx_sg_frames;
4666 + __u64 tx_sg_bytes;
4667 + __u64 rx_sg_frames;
4668 + __u64 rx_sg_bytes;
4669 + /* Enqueues retried due to portal busy */
4670 + __u64 tx_portal_busy;
4671 +};
4672 +
4673 +/* Per-FQ statistics */
4674 +struct dpaa2_eth_fq_stats {
4675 + /* Number of frames received on this queue */
4676 + __u64 frames;
4677 + /* Number of times this queue entered congestion */
4678 + __u64 congestion_entry;
4679 +};
4680 +
4681 +/* Per-channel statistics */
4682 +struct dpaa2_eth_ch_stats {
4683 + /* Volatile dequeues retried due to portal busy */
4684 + __u64 dequeue_portal_busy;
4685 + /* Number of CDANs; useful to estimate avg NAPI len */
4686 + __u64 cdan;
4687 + /* Number of frames received on queues from this channel */
4688 + __u64 frames;
4689 + /* Pull errors */
4690 + __u64 pull_err;
4691 +};
4692 +
4693 +/* Maximum number of queues associated with a DPNI */
4694 +#define DPAA2_ETH_MAX_RX_QUEUES 16
4695 +#define DPAA2_ETH_MAX_TX_QUEUES NR_CPUS
4696 +#define DPAA2_ETH_MAX_RX_ERR_QUEUES 1
4697 +#define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \
4698 + DPAA2_ETH_MAX_TX_QUEUES + \
4699 + DPAA2_ETH_MAX_RX_ERR_QUEUES)
4700 +
4701 +#define DPAA2_ETH_MAX_DPCONS NR_CPUS
4702 +
4703 +enum dpaa2_eth_fq_type {
4704 + DPAA2_RX_FQ = 0,
4705 + DPAA2_TX_CONF_FQ,
4706 + DPAA2_RX_ERR_FQ
4707 +};
4708 +
4709 +struct dpaa2_eth_priv;
4710 +
4711 +struct dpaa2_eth_fq {
4712 + u32 fqid;
4713 + u32 tx_qdbin;
4714 + u16 flowid;
4715 + int target_cpu;
4716 + struct dpaa2_eth_channel *channel;
4717 + enum dpaa2_eth_fq_type type;
4718 +
4719 + void (*consume)(struct dpaa2_eth_priv *,
4720 + struct dpaa2_eth_channel *,
4721 + const struct dpaa2_fd *,
4722 + struct napi_struct *,
4723 + u16 queue_id);
4724 + struct dpaa2_eth_fq_stats stats;
4725 +};
4726 +
4727 +struct dpaa2_eth_channel {
4728 + struct dpaa2_io_notification_ctx nctx;
4729 + struct fsl_mc_device *dpcon;
4730 + int dpcon_id;
4731 + int ch_id;
4732 + int dpio_id;
4733 + struct napi_struct napi;
4734 + struct dpaa2_io_store *store;
4735 + struct dpaa2_eth_priv *priv;
4736 + int buf_count;
4737 + struct dpaa2_eth_ch_stats stats;
4738 +};
4739 +
4740 +struct dpaa2_eth_cls_rule {
4741 + struct ethtool_rx_flow_spec fs;
4742 + bool in_use;
4743 +};
4744 +
4745 +struct dpaa2_eth_hash_fields {
4746 + u64 rxnfc_field;
4747 + enum net_prot cls_prot;
4748 + int cls_field;
4749 + int offset;
4750 + int size;
4751 +};
4752 +
4753 +/* Driver private data */
4754 +struct dpaa2_eth_priv {
4755 + struct net_device *net_dev;
4756 +
4757 + /* Standard statistics */
4758 + struct rtnl_link_stats64 __percpu *percpu_stats;
4759 + /* Extra stats, in addition to the ones known by the kernel */
4760 + struct dpaa2_eth_drv_stats __percpu *percpu_extras;
4761 + struct iommu_domain *iommu_domain;
4762 +
4763 + bool ts_tx_en; /* Tx timestamping enabled */
4764 + bool ts_rx_en; /* Rx timestamping enabled */
4765 +
4766 + u16 tx_data_offset;
4767 + u16 rx_buf_align;
4768 +
4769 + u16 bpid;
4770 + u16 tx_qdid;
4771 +
4772 + int tx_pause_frames;
4773 + int num_bufs;
4774 + int refill_thresh;
4775 +
4776 + /* Tx congestion notifications are written here */
4777 + void *cscn_mem;
4778 + void *cscn_unaligned;
4779 + dma_addr_t cscn_dma;
4780 +
4781 + u8 num_fqs;
4782 + /* Tx queues are at the beginning of the array */
4783 + struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES];
4784 +
4785 + u8 num_channels;
4786 + struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS];
4787 +
4788 + int dpni_id;
4789 + struct dpni_attr dpni_attrs;
4790 + struct fsl_mc_device *dpbp_dev;
4791 +
4792 + struct fsl_mc_io *mc_io;
4793 + /* SysFS-controlled affinity mask for TxConf FQs */
4794 + struct cpumask txconf_cpumask;
4795 + /* Cores which have an affine DPIO/DPCON.
4796 + * This is the cpu set on which Rx frames are processed;
4797 + * Tx confirmation frames are processed on a subset of this,
4798 + * depending on user settings.
4799 + */
4800 + struct cpumask dpio_cpumask;
4801 +
4802 + u16 mc_token;
4803 +
4804 + struct dpni_link_state link_state;
4805 + bool do_link_poll;
4806 + struct task_struct *poll_thread;
4807 +
4808 + struct dpaa2_eth_hash_fields *hash_fields;
4809 + u8 num_hash_fields;
4810 + /* enabled ethtool hashing bits */
4811 + u64 rx_flow_hash;
4812 +
4813 +#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
4814 + struct dpaa2_debugfs dbg;
4815 +#endif
4816 +
4817 + /* array of classification rules */
4818 + struct dpaa2_eth_cls_rule *cls_rule;
4819 +
4820 + struct dpni_tx_shaping_cfg shaping_cfg;
4821 +};
4822 +
4823 +#define dpaa2_eth_hash_enabled(priv) \
4824 + ((priv)->dpni_attrs.num_queues > 1)
4825 +
4826 +#define dpaa2_eth_fs_enabled(priv) \
4827 + (!((priv)->dpni_attrs.options & DPNI_OPT_NO_FS))
4828 +
4829 +#define dpaa2_eth_fs_mask_enabled(priv) \
4830 + ((priv)->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)
4831 +
4832 +#define dpaa2_eth_fs_count(priv) \
4833 + ((priv)->dpni_attrs.fs_entries)
4834 +
4835 +/* size of DMA memory used to pass configuration to classifier, in bytes */
4836 +#define DPAA2_CLASSIFIER_DMA_SIZE 256
4837 +
4838 +extern const struct ethtool_ops dpaa2_ethtool_ops;
4839 +extern const char dpaa2_eth_drv_version[];
4840 +
4841 +static inline int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv)
4842 +{
4843 + return priv->dpni_attrs.num_queues;
4844 +}
4845 +
4846 +void check_cls_support(struct dpaa2_eth_priv *priv);
4847 +
4848 +int setup_fqs_taildrop(struct dpaa2_eth_priv *priv, bool enable);
4849 +#endif /* __DPAA2_H */
4850 diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
4851 new file mode 100644
4852 index 00000000..9859814e
4853 --- /dev/null
4854 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
4855 @@ -0,0 +1,856 @@
4856 +/* Copyright 2014-2015 Freescale Semiconductor Inc.
4857 + *
4858 + * Redistribution and use in source and binary forms, with or without
4859 + * modification, are permitted provided that the following conditions are met:
4860 + * * Redistributions of source code must retain the above copyright
4861 + * notice, this list of conditions and the following disclaimer.
4862 + * * Redistributions in binary form must reproduce the above copyright
4863 + * notice, this list of conditions and the following disclaimer in the
4864 + * documentation and/or other materials provided with the distribution.
4865 + * * Neither the name of Freescale Semiconductor nor the
4866 + * names of its contributors may be used to endorse or promote products
4867 + * derived from this software without specific prior written permission.
4868 + *
4869 + *
4870 + * ALTERNATIVELY, this software may be distributed under the terms of the
4871 + * GNU General Public License ("GPL") as published by the Free Software
4872 + * Foundation, either version 2 of that License or (at your option) any
4873 + * later version.
4874 + *
4875 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
4876 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
4877 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
4878 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
4879 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
4880 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
4881 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
4882 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
4883 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
4884 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
4885 + */
4886 +
4887 +#include "dpni.h" /* DPNI_LINK_OPT_* */
4888 +#include "dpaa2-eth.h"
4889 +
4890 +/* To be kept in sync with dpni_statistics */
4891 +static char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = {
4892 + "rx frames",
4893 + "rx bytes",
4894 + "rx mcast frames",
4895 + "rx mcast bytes",
4896 + "rx bcast frames",
4897 + "rx bcast bytes",
4898 + "tx frames",
4899 + "tx bytes",
4900 + "tx mcast frames",
4901 + "tx mcast bytes",
4902 + "tx bcast frames",
4903 + "tx bcast bytes",
4904 + "rx filtered frames",
4905 + "rx discarded frames",
4906 + "rx nobuffer discards",
4907 + "tx discarded frames",
4908 + "tx confirmed frames",
4909 +};
4910 +
4911 +#define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats)
4912 +
4913 +/* To be kept in sync with 'struct dpaa2_eth_drv_stats' */
4914 +static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
4915 + /* per-cpu stats */
4916 +
4917 + "tx conf frames",
4918 + "tx conf bytes",
4919 + "tx sg frames",
4920 + "tx sg bytes",
4921 + "rx sg frames",
4922 + "rx sg bytes",
4923 + /* how many times we had to retry the enqueue command */
4924 + "enqueue portal busy",
4925 +
4926 + /* Channel stats */
4927 + /* How many times we had to retry the volatile dequeue command */
4928 + "dequeue portal busy",
4929 + "channel pull errors",
4930 + /* Number of notifications received */
4931 + "cdan",
4932 + "tx congestion state",
4933 +#ifdef CONFIG_FSL_QBMAN_DEBUG
4934 + /* FQ stats */
4935 + "rx pending frames",
4936 + "rx pending bytes",
4937 + "tx conf pending frames",
4938 + "tx conf pending bytes",
4939 + "buffer count"
4940 +#endif
4941 +};
4942 +
4943 +#define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras)
4944 +
4945 +static void dpaa2_eth_get_drvinfo(struct net_device *net_dev,
4946 + struct ethtool_drvinfo *drvinfo)
4947 +{
4948 + strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
4949 + strlcpy(drvinfo->version, dpaa2_eth_drv_version,
4950 + sizeof(drvinfo->version));
4951 + strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
4952 + strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
4953 + sizeof(drvinfo->bus_info));
4954 +}
4955 +
4956 +static int dpaa2_eth_get_settings(struct net_device *net_dev,
4957 + struct ethtool_cmd *cmd)
4958 +{
4959 + struct dpni_link_state state = {0};
4960 + int err = 0;
4961 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4962 +
4963 + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
4964 + if (err) {
4965 + netdev_err(net_dev, "ERROR %d getting link state", err);
4966 + goto out;
4967 + }
4968 +
4969 + /* At the moment, we have no way of interrogating the DPMAC
4970 + * from the DPNI side - and for that matter there may exist
4971 + * no DPMAC at all. So for now we just don't report anything
4972 + * beyond the DPNI attributes.
4973 + */
4974 + if (state.options & DPNI_LINK_OPT_AUTONEG)
4975 + cmd->autoneg = AUTONEG_ENABLE;
4976 + if (!(state.options & DPNI_LINK_OPT_HALF_DUPLEX))
4977 + cmd->duplex = DUPLEX_FULL;
4978 + ethtool_cmd_speed_set(cmd, state.rate);
4979 +
4980 +out:
4981 + return err;
4982 +}
4983 +
4984 +static int dpaa2_eth_set_settings(struct net_device *net_dev,
4985 + struct ethtool_cmd *cmd)
4986 +{
4987 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4988 + struct dpni_link_state state = {0};
4989 + struct dpni_link_cfg cfg = {0};
4990 + int err = 0;
4991 +
4992 + netdev_dbg(net_dev, "Setting link parameters...");
4993 +
4994 + /* Need to interrogate on link state to get flow control params */
4995 + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
4996 + if (err) {
4997 + netdev_err(net_dev, "ERROR %d getting link state", err);
4998 + goto out;
4999 + }
5000 +
5001 + cfg.options = state.options;
5002 + cfg.rate = ethtool_cmd_speed(cmd);
5003 + if (cmd->autoneg == AUTONEG_ENABLE)
5004 + cfg.options |= DPNI_LINK_OPT_AUTONEG;
5005 + else
5006 + cfg.options &= ~DPNI_LINK_OPT_AUTONEG;
5007 + if (cmd->duplex == DUPLEX_HALF)
5008 + cfg.options |= DPNI_LINK_OPT_HALF_DUPLEX;
5009 + else
5010 + cfg.options &= ~DPNI_LINK_OPT_HALF_DUPLEX;
5011 +
5012 + err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
5013 + if (err)
5014 + /* ethtool will be loud enough if we return an error; no point
5015 + * in putting our own error message on the console by default
5016 + */
5017 + netdev_dbg(net_dev, "ERROR %d setting link cfg", err);
5018 +
5019 +out:
5020 + return err;
5021 +}
5022 +
5023 +static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
5024 + struct ethtool_pauseparam *pause)
5025 +{
5026 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5027 + struct dpni_link_state state = {0};
5028 + int err;
5029 +
5030 + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
5031 + if (err)
5032 + netdev_dbg(net_dev, "ERROR %d getting link state", err);
5033 +
5034 + /* for now, pause frames autonegotiation is not separate */
5035 + pause->autoneg = !!(state.options & DPNI_LINK_OPT_AUTONEG);
5036 + pause->rx_pause = !!(state.options & DPNI_LINK_OPT_PAUSE);
5037 + pause->tx_pause = pause->rx_pause ^
5038 + !!(state.options & DPNI_LINK_OPT_ASYM_PAUSE);
5039 +}
5040 +
5041 +static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
5042 + struct ethtool_pauseparam *pause)
5043 +{
5044 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5045 + struct dpni_link_state state = {0};
5046 + struct dpni_link_cfg cfg = {0};
5047 + u32 current_tx_pause;
5048 + int err = 0;
5049 +
5050 + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
5051 + if (err) {
5052 + netdev_dbg(net_dev, "ERROR %d getting link state", err);
5053 + goto out;
5054 + }
5055 +
5056 + cfg.rate = state.rate;
5057 + cfg.options = state.options;
5058 + current_tx_pause = !!(cfg.options & DPNI_LINK_OPT_PAUSE) ^
5059 + !!(cfg.options & DPNI_LINK_OPT_ASYM_PAUSE);
5060 +
5061 + if (pause->autoneg != !!(state.options & DPNI_LINK_OPT_AUTONEG))
5062 + netdev_warn(net_dev,
5063 + "WARN: Can't change pause frames autoneg separately\n");
5064 +
5065 + if (pause->rx_pause)
5066 + cfg.options |= DPNI_LINK_OPT_PAUSE;
5067 + else
5068 + cfg.options &= ~DPNI_LINK_OPT_PAUSE;
5069 +
5070 + if (pause->rx_pause ^ pause->tx_pause)
5071 + cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
5072 + else
5073 + cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
5074 +
5075 + err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
5076 + if (err) {
5077 + /* ethtool will be loud enough if we return an error; no point
5078 + * in putting our own error message on the console by default
5079 + */
5080 + netdev_dbg(net_dev, "ERROR %d setting link cfg", err);
5081 + goto out;
5082 + }
5083 +
5084 + /* Enable / disable taildrops if Tx pause frames have changed */
5085 + if (current_tx_pause == pause->tx_pause)
5086 + goto out;
5087 +
5088 + err = setup_fqs_taildrop(priv, !pause->tx_pause);
5089 + if (err)
5090 + netdev_dbg(net_dev, "ERROR %d configuring taildrop", err);
5091 +
5092 + priv->tx_pause_frames = pause->tx_pause;
5093 +out:
5094 + return err;
5095 +}
5096 +
5097 +static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
5098 + u8 *data)
5099 +{
5100 + u8 *p = data;
5101 + int i;
5102 +
5103 + switch (stringset) {
5104 + case ETH_SS_STATS:
5105 + for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) {
5106 + strlcpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN);
5107 + p += ETH_GSTRING_LEN;
5108 + }
5109 + for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) {
5110 + strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN);
5111 + p += ETH_GSTRING_LEN;
5112 + }
5113 + break;
5114 + }
5115 +}
5116 +
5117 +static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset)
5118 +{
5119 + switch (sset) {
5120 + case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */
5121 + return DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS;
5122 + default:
5123 + return -EOPNOTSUPP;
5124 + }
5125 +}
5126 +
5127 +/** Fill in hardware counters, as returned by MC.
5128 + */
5129 +static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
5130 + struct ethtool_stats *stats,
5131 + u64 *data)
5132 +{
5133 + int i = 0; /* Current index in the data array */
5134 + int j = 0, k, err;
5135 + union dpni_statistics dpni_stats;
5136 +
5137 +#ifdef CONFIG_FSL_QBMAN_DEBUG
5138 + u32 fcnt, bcnt;
5139 + u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
5140 + u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
5141 + u32 buf_cnt;
5142 +#endif
5143 + u64 cdan = 0;
5144 + u64 portal_busy = 0, pull_err = 0;
5145 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5146 + struct dpaa2_eth_drv_stats *extras;
5147 + struct dpaa2_eth_ch_stats *ch_stats;
5148 +
5149 + memset(data, 0,
5150 + sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS));
5151 +
5152 + /* Print standard counters, from DPNI statistics */
5153 + for (j = 0; j <= 2; j++) {
5154 + err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token,
5155 + j, &dpni_stats);
5156 + if (err != 0)
5157 + netdev_warn(net_dev, "Err %d getting DPNI stats page %d",
5158 + err, j);
5159 +
5160 + switch (j) {
5161 + case 0:
5162 + *(data + i++) = dpni_stats.page_0.ingress_all_frames;
5163 + *(data + i++) = dpni_stats.page_0.ingress_all_bytes;
5164 + *(data + i++) = dpni_stats.page_0.ingress_multicast_frames;
5165 + *(data + i++) = dpni_stats.page_0.ingress_multicast_bytes;
5166 + *(data + i++) = dpni_stats.page_0.ingress_broadcast_frames;
5167 + *(data + i++) = dpni_stats.page_0.ingress_broadcast_bytes;
5168 + break;
5169 + case 1:
5170 + *(data + i++) = dpni_stats.page_1.egress_all_frames;
5171 + *(data + i++) = dpni_stats.page_1.egress_all_bytes;
5172 + *(data + i++) = dpni_stats.page_1.egress_multicast_frames;
5173 + *(data + i++) = dpni_stats.page_1.egress_multicast_bytes;
5174 + *(data + i++) = dpni_stats.page_1.egress_broadcast_frames;
5175 + *(data + i++) = dpni_stats.page_1.egress_broadcast_bytes;
5176 + break;
5177 + case 2:
5178 + *(data + i++) = dpni_stats.page_2.ingress_filtered_frames;
5179 + *(data + i++) = dpni_stats.page_2.ingress_discarded_frames;
5180 + *(data + i++) = dpni_stats.page_2.ingress_nobuffer_discards;
5181 + *(data + i++) = dpni_stats.page_2.egress_discarded_frames;
5182 + *(data + i++) = dpni_stats.page_2.egress_confirmed_frames;
5183 + break;
5184 + default:
5185 + break;
5186 + }
5187 + }
5188 +
5189 + /* Print per-cpu extra stats */
5190 + for_each_online_cpu(k) {
5191 + extras = per_cpu_ptr(priv->percpu_extras, k);
5192 + for (j = 0; j < sizeof(*extras) / sizeof(__u64); j++)
5193 + *((__u64 *)data + i + j) += *((__u64 *)extras + j);
5194 + }
5195 +
5196 + i += j;
5197 +
5198 + /* We may be using fewer DPIOs than actual CPUs */
5199 + for (j = 0; j < priv->num_channels; j++) {
5200 + ch_stats = &priv->channel[j]->stats;
5201 + cdan += ch_stats->cdan;
5202 + portal_busy += ch_stats->dequeue_portal_busy;
5203 + pull_err += ch_stats->pull_err;
5204 + }
5205 +
5206 + *(data + i++) = portal_busy;
5207 + *(data + i++) = pull_err;
5208 + *(data + i++) = cdan;
5209 +
5210 + *(data + i++) = dpaa2_cscn_state_congested(priv->cscn_mem);
5211 +
5212 +#ifdef CONFIG_FSL_QBMAN_DEBUG
5213 + for (j = 0; j < priv->num_fqs; j++) {
5214 + /* Print FQ instantaneous counts */
5215 + err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid,
5216 + &fcnt, &bcnt);
5217 + if (err) {
5218 + netdev_warn(net_dev, "FQ query error %d", err);
5219 + return;
5220 + }
5221 +
5222 + if (priv->fq[j].type == DPAA2_TX_CONF_FQ) {
5223 + fcnt_tx_total += fcnt;
5224 + bcnt_tx_total += bcnt;
5225 + } else {
5226 + fcnt_rx_total += fcnt;
5227 + bcnt_rx_total += bcnt;
5228 + }
5229 + }
5230 +
5231 + *(data + i++) = fcnt_rx_total;
5232 + *(data + i++) = bcnt_rx_total;
5233 + *(data + i++) = fcnt_tx_total;
5234 + *(data + i++) = bcnt_tx_total;
5235 +
5236 + err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt);
5237 + if (err) {
5238 + netdev_warn(net_dev, "Buffer count query error %d\n", err);
5239 + return;
5240 + }
5241 + *(data + i++) = buf_cnt;
5242 +#endif
5243 +}
5244 +
5245 +static int cls_key_off(struct dpaa2_eth_priv *priv, int prot, int field)
5246 +{
5247 + int i, off = 0;
5248 +
5249 + for (i = 0; i < priv->num_hash_fields; i++) {
5250 + if (priv->hash_fields[i].cls_prot == prot &&
5251 + priv->hash_fields[i].cls_field == field)
5252 + return off;
5253 + off += priv->hash_fields[i].size;
5254 + }
5255 +
5256 + return -1;
5257 +}
5258 +
5259 +static u8 cls_key_size(struct dpaa2_eth_priv *priv)
5260 +{
5261 + u8 i, size = 0;
5262 +
5263 + for (i = 0; i < priv->num_hash_fields; i++)
5264 + size += priv->hash_fields[i].size;
5265 +
5266 + return size;
5267 +}
5268 +
5269 +void check_cls_support(struct dpaa2_eth_priv *priv)
5270 +{
5271 + u8 key_size = cls_key_size(priv);
5272 + struct device *dev = priv->net_dev->dev.parent;
5273 +
5274 + if (dpaa2_eth_hash_enabled(priv)) {
5275 + if (priv->dpni_attrs.fs_key_size < key_size) {
5276 + dev_info(dev, "max_dist_key_size = %d, expected %d. Hashing and steering are disabled\n",
5277 + priv->dpni_attrs.fs_key_size,
5278 + key_size);
5279 + goto disable_fs;
5280 + }
5281 + if (priv->num_hash_fields > DPKG_MAX_NUM_OF_EXTRACTS) {
5282 + dev_info(dev, "Too many key fields (max = %d). Hashing and steering are disabled\n",
5283 + DPKG_MAX_NUM_OF_EXTRACTS);
5284 + goto disable_fs;
5285 + }
5286 + }
5287 +
5288 + if (dpaa2_eth_fs_enabled(priv)) {
5289 + if (!dpaa2_eth_hash_enabled(priv)) {
5290 + dev_info(dev, "Insufficient queues. Steering is disabled\n");
5291 + goto disable_fs;
5292 + }
5293 +
5294 + if (!dpaa2_eth_fs_mask_enabled(priv)) {
5295 + dev_info(dev, "Key masks not supported. Steering is disabled\n");
5296 + goto disable_fs;
5297 + }
5298 + }
5299 +
5300 + return;
5301 +
5302 +disable_fs:
5303 + priv->dpni_attrs.options |= DPNI_OPT_NO_FS;
5304 + priv->dpni_attrs.options &= ~DPNI_OPT_HAS_KEY_MASKING;
5305 +}
5306 +
5307 +static int prep_l4_rule(struct dpaa2_eth_priv *priv,
5308 + struct ethtool_tcpip4_spec *l4_value,
5309 + struct ethtool_tcpip4_spec *l4_mask,
5310 + void *key, void *mask, u8 l4_proto)
5311 +{
5312 + int offset;
5313 +
5314 + if (l4_mask->tos) {
5315 + netdev_err(priv->net_dev, "ToS is not supported for IPv4 L4\n");
5316 + return -EOPNOTSUPP;
5317 + }
5318 +
5319 + if (l4_mask->ip4src) {
5320 + offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_SRC);
5321 + *(u32 *)(key + offset) = l4_value->ip4src;
5322 + *(u32 *)(mask + offset) = l4_mask->ip4src;
5323 + }
5324 +
5325 + if (l4_mask->ip4dst) {
5326 + offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_DST);
5327 + *(u32 *)(key + offset) = l4_value->ip4dst;
5328 + *(u32 *)(mask + offset) = l4_mask->ip4dst;
5329 + }
5330 +
5331 + if (l4_mask->psrc) {
5332 + offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
5333 + *(u32 *)(key + offset) = l4_value->psrc;
5334 + *(u32 *)(mask + offset) = l4_mask->psrc;
5335 + }
5336 +
5337 + if (l4_mask->pdst) {
5338 + offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
5339 + *(u32 *)(key + offset) = l4_value->pdst;
5340 + *(u32 *)(mask + offset) = l4_mask->pdst;
5341 + }
5342 +
5343 + /* Only apply the rule for the user-specified L4 protocol
5344 + * and if ethertype matches IPv4
5345 + */
5346 + offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_TYPE);
5347 + *(u16 *)(key + offset) = htons(ETH_P_IP);
5348 + *(u16 *)(mask + offset) = 0xFFFF;
5349 +
5350 + offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_PROTO);
5351 + *(u8 *)(key + offset) = l4_proto;
5352 + *(u8 *)(mask + offset) = 0xFF;
5353 +
5354 + /* TODO: check IP version */
5355 +
5356 + return 0;
5357 +}
5358 +
5359 +static int prep_eth_rule(struct dpaa2_eth_priv *priv,
5360 + struct ethhdr *eth_value, struct ethhdr *eth_mask,
5361 + void *key, void *mask)
5362 +{
5363 + int offset;
5364 +
5365 + if (eth_mask->h_proto) {
5366 + netdev_err(priv->net_dev, "Ethertype is not supported!\n");
5367 + return -EOPNOTSUPP;
5368 + }
5369 +
5370 + if (!is_zero_ether_addr(eth_mask->h_source)) {
5371 + offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_SA);
5372 + ether_addr_copy(key + offset, eth_value->h_source);
5373 + ether_addr_copy(mask + offset, eth_mask->h_source);
5374 + }
5375 +
5376 + if (!is_zero_ether_addr(eth_mask->h_dest)) {
5377 + offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_DA);
5378 + ether_addr_copy(key + offset, eth_value->h_dest);
5379 + ether_addr_copy(mask + offset, eth_mask->h_dest);
5380 + }
5381 +
5382 + return 0;
5383 +}
5384 +
5385 +static int prep_user_ip_rule(struct dpaa2_eth_priv *priv,
5386 + struct ethtool_usrip4_spec *uip_value,
5387 + struct ethtool_usrip4_spec *uip_mask,
5388 + void *key, void *mask)
5389 +{
5390 + int offset;
5391 +
5392 + if (uip_mask->tos)
5393 + return -EOPNOTSUPP;
5394 +
5395 + if (uip_mask->ip4src) {
5396 + offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_SRC);
5397 + *(u32 *)(key + offset) = uip_value->ip4src;
5398 + *(u32 *)(mask + offset) = uip_mask->ip4src;
5399 + }
5400 +
5401 + if (uip_mask->ip4dst) {
5402 + offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_DST);
5403 + *(u32 *)(key + offset) = uip_value->ip4dst;
5404 + *(u32 *)(mask + offset) = uip_mask->ip4dst;
5405 + }
5406 +
5407 + if (uip_mask->proto) {
5408 + offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_PROTO);
5409 + *(u32 *)(key + offset) = uip_value->proto;
5410 + *(u32 *)(mask + offset) = uip_mask->proto;
5411 + }
5412 + if (uip_mask->l4_4_bytes) {
5413 + offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
5414 + *(u16 *)(key + offset) = uip_value->l4_4_bytes << 16;
5415 + *(u16 *)(mask + offset) = uip_mask->l4_4_bytes << 16;
5416 +
5417 + offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
5418 + *(u16 *)(key + offset) = uip_value->l4_4_bytes & 0xFFFF;
5419 + *(u16 *)(mask + offset) = uip_mask->l4_4_bytes & 0xFFFF;
5420 + }
5421 +
5422 + /* Ethertype must be IP */
5423 + offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_TYPE);
5424 + *(u16 *)(key + offset) = htons(ETH_P_IP);
5425 + *(u16 *)(mask + offset) = 0xFFFF;
5426 +
5427 + return 0;
5428 +}
5429 +
5430 +static int prep_ext_rule(struct dpaa2_eth_priv *priv,
5431 + struct ethtool_flow_ext *ext_value,
5432 + struct ethtool_flow_ext *ext_mask,
5433 + void *key, void *mask)
5434 +{
5435 + int offset;
5436 +
5437 + if (ext_mask->vlan_etype)
5438 + return -EOPNOTSUPP;
5439 +
5440 + if (ext_mask->vlan_tci) {
5441 + offset = cls_key_off(priv, NET_PROT_VLAN, NH_FLD_VLAN_TCI);
5442 + *(u16 *)(key + offset) = ext_value->vlan_tci;
5443 + *(u16 *)(mask + offset) = ext_mask->vlan_tci;
5444 + }
5445 +
5446 + return 0;
5447 +}
5448 +
5449 +static int prep_mac_ext_rule(struct dpaa2_eth_priv *priv,
5450 + struct ethtool_flow_ext *ext_value,
5451 + struct ethtool_flow_ext *ext_mask,
5452 + void *key, void *mask)
5453 +{
5454 + int offset;
5455 +
5456 + if (!is_zero_ether_addr(ext_mask->h_dest)) {
5457 + offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_DA);
5458 + ether_addr_copy(key + offset, ext_value->h_dest);
5459 + ether_addr_copy(mask + offset, ext_mask->h_dest);
5460 + }
5461 +
5462 + return 0;
5463 +}
5464 +
5465 +static int prep_cls_rule(struct net_device *net_dev,
5466 + struct ethtool_rx_flow_spec *fs,
5467 + void *key)
5468 +{
5469 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5470 + const u8 key_size = cls_key_size(priv);
5471 + void *msk = key + key_size;
5472 + int err;
5473 +
5474 + memset(key, 0, key_size * 2);
5475 +
5476 + switch (fs->flow_type & 0xff) {
5477 + case TCP_V4_FLOW:
5478 + err = prep_l4_rule(priv, &fs->h_u.tcp_ip4_spec,
5479 + &fs->m_u.tcp_ip4_spec, key, msk,
5480 + IPPROTO_TCP);
5481 + break;
5482 + case UDP_V4_FLOW:
5483 + err = prep_l4_rule(priv, &fs->h_u.udp_ip4_spec,
5484 + &fs->m_u.udp_ip4_spec, key, msk,
5485 + IPPROTO_UDP);
5486 + break;
5487 + case SCTP_V4_FLOW:
5488 + err = prep_l4_rule(priv, &fs->h_u.sctp_ip4_spec,
5489 + &fs->m_u.sctp_ip4_spec, key, msk,
5490 + IPPROTO_SCTP);
5491 + break;
5492 + case ETHER_FLOW:
5493 + err = prep_eth_rule(priv, &fs->h_u.ether_spec,
5494 + &fs->m_u.ether_spec, key, msk);
5495 + break;
5496 + case IP_USER_FLOW:
5497 + err = prep_user_ip_rule(priv, &fs->h_u.usr_ip4_spec,
5498 + &fs->m_u.usr_ip4_spec, key, msk);
5499 + break;
5500 + default:
5501 + /* TODO: AH, ESP */
5502 + return -EOPNOTSUPP;
5503 + }
5504 + if (err)
5505 + return err;
5506 +
5507 + if (fs->flow_type & FLOW_EXT) {
5508 + err = prep_ext_rule(priv, &fs->h_ext, &fs->m_ext, key, msk);
5509 + if (err)
5510 + return err;
5511 + }
5512 +
5513 + if (fs->flow_type & FLOW_MAC_EXT) {
5514 + err = prep_mac_ext_rule(priv, &fs->h_ext, &fs->m_ext, key, msk);
5515 + if (err)
5516 + return err;
5517 + }
5518 +
5519 + return 0;
5520 +}
5521 +
5522 +static int del_cls(struct net_device *net_dev, int location);
5523 +
5524 +static int do_cls(struct net_device *net_dev,
5525 + struct ethtool_rx_flow_spec *fs,
5526 + bool add)
5527 +{
5528 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5529 + struct device *dev = net_dev->dev.parent;
5530 + const int rule_cnt = dpaa2_eth_fs_count(priv);
5531 + struct dpni_rule_cfg rule_cfg;
5532 + struct dpni_fs_action_cfg fs_act = { 0 };
5533 + void *dma_mem;
5534 + int err = 0;
5535 +
5536 + if (!dpaa2_eth_fs_enabled(priv)) {
5537 + netdev_err(net_dev, "dev does not support steering!\n");
5538 + /* dev doesn't support steering */
5539 + return -EOPNOTSUPP;
5540 + }
5541 +
5542 + if ((fs->ring_cookie != RX_CLS_FLOW_DISC &&
5543 + fs->ring_cookie >= dpaa2_eth_queue_count(priv)) ||
5544 + fs->location >= rule_cnt)
5545 + return -EINVAL;
5546 +
5547 + /* When adding a new rule, check if location if available,
5548 + * and if not free the existing table entry before inserting
5549 + * the new one
5550 + */
5551 + if (add && (priv->cls_rule[fs->location].in_use == true))
5552 + del_cls(net_dev, fs->location);
5553 +
5554 + memset(&rule_cfg, 0, sizeof(rule_cfg));
5555 + rule_cfg.key_size = cls_key_size(priv);
5556 +
5557 + /* allocate twice the key size, for the actual key and for mask */
5558 + dma_mem = kzalloc(rule_cfg.key_size * 2, GFP_DMA | GFP_KERNEL);
5559 + if (!dma_mem)
5560 + return -ENOMEM;
5561 +
5562 + err = prep_cls_rule(net_dev, fs, dma_mem);
5563 + if (err)
5564 + goto err_free_mem;
5565 +
5566 + rule_cfg.key_iova = dma_map_single(dev, dma_mem,
5567 + rule_cfg.key_size * 2,
5568 + DMA_TO_DEVICE);
5569 +
5570 + rule_cfg.mask_iova = rule_cfg.key_iova + rule_cfg.key_size;
5571 +
5572 + if (fs->ring_cookie == RX_CLS_FLOW_DISC)
5573 + fs_act.options |= DPNI_FS_OPT_DISCARD;
5574 + else
5575 + fs_act.flow_id = fs->ring_cookie;
5576 +
5577 + if (add)
5578 + err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token,
5579 + 0, fs->location, &rule_cfg, &fs_act);
5580 + else
5581 + err = dpni_remove_fs_entry(priv->mc_io, 0, priv->mc_token,
5582 + 0, &rule_cfg);
5583 +
5584 + dma_unmap_single(dev, rule_cfg.key_iova,
5585 + rule_cfg.key_size * 2, DMA_TO_DEVICE);
5586 +
5587 + if (err)
5588 + netdev_err(net_dev, "dpaa2_add/remove_cls() error %d\n", err);
5589 +
5590 +err_free_mem:
5591 + kfree(dma_mem);
5592 +
5593 + return err;
5594 +}
5595 +
5596 +static int add_cls(struct net_device *net_dev,
5597 + struct ethtool_rx_flow_spec *fs)
5598 +{
5599 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5600 + int err;
5601 +
5602 + err = do_cls(net_dev, fs, true);
5603 + if (err)
5604 + return err;
5605 +
5606 + priv->cls_rule[fs->location].in_use = true;
5607 + priv->cls_rule[fs->location].fs = *fs;
5608 +
5609 + return 0;
5610 +}
5611 +
5612 +static int del_cls(struct net_device *net_dev, int location)
5613 +{
5614 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5615 + int err;
5616 +
5617 + err = do_cls(net_dev, &priv->cls_rule[location].fs, false);
5618 + if (err)
5619 + return err;
5620 +
5621 + priv->cls_rule[location].in_use = false;
5622 +
5623 + return 0;
5624 +}
5625 +
5626 +static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
5627 + struct ethtool_rxnfc *rxnfc)
5628 +{
5629 + int err = 0;
5630 +
5631 + switch (rxnfc->cmd) {
5632 + case ETHTOOL_SRXCLSRLINS:
5633 + err = add_cls(net_dev, &rxnfc->fs);
5634 + break;
5635 +
5636 + case ETHTOOL_SRXCLSRLDEL:
5637 + err = del_cls(net_dev, rxnfc->fs.location);
5638 + break;
5639 +
5640 + default:
5641 + err = -EOPNOTSUPP;
5642 + }
5643 +
5644 + return err;
5645 +}
5646 +
5647 +static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
5648 + struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
5649 +{
5650 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5651 + const int rule_cnt = dpaa2_eth_fs_count(priv);
5652 + int i, j;
5653 +
5654 + switch (rxnfc->cmd) {
5655 + case ETHTOOL_GRXFH:
5656 + /* we purposely ignore cmd->flow_type, because the hashing key
5657 + * is the same (and fixed) for all protocols
5658 + */
5659 + rxnfc->data = priv->rx_flow_hash;
5660 + break;
5661 +
5662 + case ETHTOOL_GRXRINGS:
5663 + rxnfc->data = dpaa2_eth_queue_count(priv);
5664 + break;
5665 +
5666 + case ETHTOOL_GRXCLSRLCNT:
5667 + for (i = 0, rxnfc->rule_cnt = 0; i < rule_cnt; i++)
5668 + if (priv->cls_rule[i].in_use)
5669 + rxnfc->rule_cnt++;
5670 + rxnfc->data = rule_cnt;
5671 + break;
5672 +
5673 + case ETHTOOL_GRXCLSRULE:
5674 + if (!priv->cls_rule[rxnfc->fs.location].in_use)
5675 + return -EINVAL;
5676 +
5677 + rxnfc->fs = priv->cls_rule[rxnfc->fs.location].fs;
5678 + break;
5679 +
5680 + case ETHTOOL_GRXCLSRLALL:
5681 + for (i = 0, j = 0; i < rule_cnt; i++) {
5682 + if (!priv->cls_rule[i].in_use)
5683 + continue;
5684 + if (j == rxnfc->rule_cnt)
5685 + return -EMSGSIZE;
5686 + rule_locs[j++] = i;
5687 + }
5688 + rxnfc->rule_cnt = j;
5689 + rxnfc->data = rule_cnt;
5690 + break;
5691 +
5692 + default:
5693 + return -EOPNOTSUPP;
5694 + }
5695 +
5696 + return 0;
5697 +}
5698 +
5699 +const struct ethtool_ops dpaa2_ethtool_ops = {
5700 + .get_drvinfo = dpaa2_eth_get_drvinfo,
5701 + .get_link = ethtool_op_get_link,
5702 + .get_settings = dpaa2_eth_get_settings,
5703 + .set_settings = dpaa2_eth_set_settings,
5704 + .get_pauseparam = dpaa2_eth_get_pauseparam,
5705 + .set_pauseparam = dpaa2_eth_set_pauseparam,
5706 + .get_sset_count = dpaa2_eth_get_sset_count,
5707 + .get_ethtool_stats = dpaa2_eth_get_ethtool_stats,
5708 + .get_strings = dpaa2_eth_get_strings,
5709 + .get_rxnfc = dpaa2_eth_get_rxnfc,
5710 + .set_rxnfc = dpaa2_eth_set_rxnfc,
5711 +};
5712 diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpkg.h b/drivers/staging/fsl-dpaa2/ethernet/dpkg.h
5713 new file mode 100644
5714 index 00000000..02290a08
5715 --- /dev/null
5716 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpkg.h
5717 @@ -0,0 +1,176 @@
5718 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
5719 + *
5720 + * Redistribution and use in source and binary forms, with or without
5721 + * modification, are permitted provided that the following conditions are met:
5722 + * * Redistributions of source code must retain the above copyright
5723 + * notice, this list of conditions and the following disclaimer.
5724 + * * Redistributions in binary form must reproduce the above copyright
5725 + * notice, this list of conditions and the following disclaimer in the
5726 + * documentation and/or other materials provided with the distribution.
5727 + * * Neither the name of the above-listed copyright holders nor the
5728 + * names of any contributors may be used to endorse or promote products
5729 + * derived from this software without specific prior written permission.
5730 + *
5731 + *
5732 + * ALTERNATIVELY, this software may be distributed under the terms of the
5733 + * GNU General Public License ("GPL") as published by the Free Software
5734 + * Foundation, either version 2 of that License or (at your option) any
5735 + * later version.
5736 + *
5737 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
5738 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
5739 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
5740 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
5741 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
5742 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
5743 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
5744 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
5745 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
5746 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
5747 + * POSSIBILITY OF SUCH DAMAGE.
5748 + */
5749 +#ifndef __FSL_DPKG_H_
5750 +#define __FSL_DPKG_H_
5751 +
5752 +#include <linux/types.h>
5753 +#include "net.h"
5754 +
5755 +/* Data Path Key Generator API
5756 + * Contains initialization APIs and runtime APIs for the Key Generator
5757 + */
5758 +
5759 +/** Key Generator properties */
5760 +
5761 +/**
5762 + * Number of masks per key extraction
5763 + */
5764 +#define DPKG_NUM_OF_MASKS 4
5765 +/**
5766 + * Number of extractions per key profile
5767 + */
5768 +#define DPKG_MAX_NUM_OF_EXTRACTS 10
5769 +
5770 +/**
5771 + * enum dpkg_extract_from_hdr_type - Selecting extraction by header types
5772 + * @DPKG_FROM_HDR: Extract selected bytes from header, by offset
5773 + * @DPKG_FROM_FIELD: Extract selected bytes from header, by offset from field
5774 + * @DPKG_FULL_FIELD: Extract a full field
5775 + */
5776 +enum dpkg_extract_from_hdr_type {
5777 + DPKG_FROM_HDR = 0,
5778 + DPKG_FROM_FIELD = 1,
5779 + DPKG_FULL_FIELD = 2
5780 +};
5781 +
5782 +/**
5783 + * enum dpkg_extract_type - Enumeration for selecting extraction type
5784 + * @DPKG_EXTRACT_FROM_HDR: Extract from the header
5785 + * @DPKG_EXTRACT_FROM_DATA: Extract from data not in specific header
5786 + * @DPKG_EXTRACT_FROM_PARSE: Extract from parser-result;
5787 + * e.g. can be used to extract header existence;
5788 + * please refer to 'Parse Result definition' section in the parser BG
5789 + */
5790 +enum dpkg_extract_type {
5791 + DPKG_EXTRACT_FROM_HDR = 0,
5792 + DPKG_EXTRACT_FROM_DATA = 1,
5793 + DPKG_EXTRACT_FROM_PARSE = 3
5794 +};
5795 +
5796 +/**
5797 + * struct dpkg_mask - A structure for defining a single extraction mask
5798 + * @mask: Byte mask for the extracted content
5799 + * @offset: Offset within the extracted content
5800 + */
5801 +struct dpkg_mask {
5802 + u8 mask;
5803 + u8 offset;
5804 +};
5805 +
5806 +/**
5807 + * struct dpkg_extract - A structure for defining a single extraction
5808 + * @type: Determines how the union below is interpreted:
5809 + * DPKG_EXTRACT_FROM_HDR: selects 'from_hdr';
5810 + * DPKG_EXTRACT_FROM_DATA: selects 'from_data';
5811 + * DPKG_EXTRACT_FROM_PARSE: selects 'from_parse'
5812 + * @extract: Selects extraction method
5813 + * @num_of_byte_masks: Defines the number of valid entries in the array below;
5814 + * This is also the number of bytes to be used as masks
5815 + * @masks: Masks parameters
5816 + */
5817 +struct dpkg_extract {
5818 + enum dpkg_extract_type type;
5819 + /**
5820 + * union extract - Selects extraction method
5821 + * @from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR'
5822 + * @from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA'
5823 + * @from_parse - Used when 'type = DPKG_EXTRACT_FROM_PARSE'
5824 + */
5825 + union {
5826 + /**
5827 + * struct from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR'
5828 + * @prot: Any of the supported headers
5829 + * @type: Defines the type of header extraction:
5830 + * DPKG_FROM_HDR: use size & offset below;
5831 + * DPKG_FROM_FIELD: use field, size and offset below;
5832 + * DPKG_FULL_FIELD: use field below
5833 + * @field: One of the supported fields (NH_FLD_)
5834 + *
5835 + * @size: Size in bytes
5836 + * @offset: Byte offset
5837 + * @hdr_index: Clear for cases not listed below;
5838 + * Used for protocols that may have more than a single
5839 + * header, 0 indicates an outer header;
5840 + * Supported protocols (possible values):
5841 + * NET_PROT_VLAN (0, HDR_INDEX_LAST);
5842 + * NET_PROT_MPLS (0, 1, HDR_INDEX_LAST);
5843 + * NET_PROT_IP(0, HDR_INDEX_LAST);
5844 + * NET_PROT_IPv4(0, HDR_INDEX_LAST);
5845 + * NET_PROT_IPv6(0, HDR_INDEX_LAST);
5846 + */
5847 +
5848 + struct {
5849 + enum net_prot prot;
5850 + enum dpkg_extract_from_hdr_type type;
5851 + u32 field;
5852 + u8 size;
5853 + u8 offset;
5854 + u8 hdr_index;
5855 + } from_hdr;
5856 + /**
5857 + * struct from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA'
5858 + * @size: Size in bytes
5859 + * @offset: Byte offset
5860 + */
5861 + struct {
5862 + u8 size;
5863 + u8 offset;
5864 + } from_data;
5865 +
5866 + /**
5867 + * struct from_parse - Used when
5868 + * 'type = DPKG_EXTRACT_FROM_PARSE'
5869 + * @size: Size in bytes
5870 + * @offset: Byte offset
5871 + */
5872 + struct {
5873 + u8 size;
5874 + u8 offset;
5875 + } from_parse;
5876 + } extract;
5877 +
5878 + u8 num_of_byte_masks;
5879 + struct dpkg_mask masks[DPKG_NUM_OF_MASKS];
5880 +};
5881 +
5882 +/**
5883 + * struct dpkg_profile_cfg - A structure for defining a full Key Generation
5884 + * profile (rule)
5885 + * @num_extracts: Defines the number of valid entries in the array below
5886 + * @extracts: Array of required extractions
5887 + */
5888 +struct dpkg_profile_cfg {
5889 + u8 num_extracts;
5890 + struct dpkg_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS];
5891 +};
5892 +
5893 +#endif /* __FSL_DPKG_H_ */
5894 diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
5895 new file mode 100644
5896 index 00000000..fa353d75
5897 --- /dev/null
5898 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
5899 @@ -0,0 +1,600 @@
5900 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
5901 + * Copyright 2016 NXP
5902 + *
5903 + * Redistribution and use in source and binary forms, with or without
5904 + * modification, are permitted provided that the following conditions are met:
5905 + * * Redistributions of source code must retain the above copyright
5906 + * notice, this list of conditions and the following disclaimer.
5907 + * * Redistributions in binary form must reproduce the above copyright
5908 + * notice, this list of conditions and the following disclaimer in the
5909 + * documentation and/or other materials provided with the distribution.
5910 + * * Neither the name of the above-listed copyright holders nor the
5911 + * names of any contributors may be used to endorse or promote products
5912 + * derived from this software without specific prior written permission.
5913 + *
5914 + *
5915 + * ALTERNATIVELY, this software may be distributed under the terms of the
5916 + * GNU General Public License ("GPL") as published by the Free Software
5917 + * Foundation, either version 2 of that License or (at your option) any
5918 + * later version.
5919 + *
5920 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
5921 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
5922 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
5923 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
5924 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
5925 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
5926 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
5927 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
5928 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
5929 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
5930 + * POSSIBILITY OF SUCH DAMAGE.
5931 + */
5932 +#ifndef _FSL_DPNI_CMD_H
5933 +#define _FSL_DPNI_CMD_H
5934 +
5935 +/* DPNI Version */
5936 +#define DPNI_VER_MAJOR 7
5937 +#define DPNI_VER_MINOR 0
5938 +#define DPNI_CMD_BASE_VERSION 1
5939 +#define DPNI_CMD_ID_OFFSET 4
5940 +
5941 +#define DPNI_CMD(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION)
5942 +
5943 +#define DPNI_CMDID_OPEN DPNI_CMD(0x801)
5944 +#define DPNI_CMDID_CLOSE DPNI_CMD(0x800)
5945 +#define DPNI_CMDID_CREATE DPNI_CMD(0x901)
5946 +#define DPNI_CMDID_DESTROY DPNI_CMD(0x900)
5947 +#define DPNI_CMDID_GET_API_VERSION DPNI_CMD(0xa01)
5948 +
5949 +#define DPNI_CMDID_ENABLE DPNI_CMD(0x002)
5950 +#define DPNI_CMDID_DISABLE DPNI_CMD(0x003)
5951 +#define DPNI_CMDID_GET_ATTR DPNI_CMD(0x004)
5952 +#define DPNI_CMDID_RESET DPNI_CMD(0x005)
5953 +#define DPNI_CMDID_IS_ENABLED DPNI_CMD(0x006)
5954 +
5955 +#define DPNI_CMDID_SET_IRQ DPNI_CMD(0x010)
5956 +#define DPNI_CMDID_GET_IRQ DPNI_CMD(0x011)
5957 +#define DPNI_CMDID_SET_IRQ_ENABLE DPNI_CMD(0x012)
5958 +#define DPNI_CMDID_GET_IRQ_ENABLE DPNI_CMD(0x013)
5959 +#define DPNI_CMDID_SET_IRQ_MASK DPNI_CMD(0x014)
5960 +#define DPNI_CMDID_GET_IRQ_MASK DPNI_CMD(0x015)
5961 +#define DPNI_CMDID_GET_IRQ_STATUS DPNI_CMD(0x016)
5962 +#define DPNI_CMDID_CLEAR_IRQ_STATUS DPNI_CMD(0x017)
5963 +
5964 +#define DPNI_CMDID_SET_POOLS DPNI_CMD(0x200)
5965 +#define DPNI_CMDID_SET_ERRORS_BEHAVIOR DPNI_CMD(0x20B)
5966 +
5967 +#define DPNI_CMDID_GET_QDID DPNI_CMD(0x210)
5968 +#define DPNI_CMDID_GET_TX_DATA_OFFSET DPNI_CMD(0x212)
5969 +#define DPNI_CMDID_GET_LINK_STATE DPNI_CMD(0x215)
5970 +#define DPNI_CMDID_SET_MAX_FRAME_LENGTH DPNI_CMD(0x216)
5971 +#define DPNI_CMDID_GET_MAX_FRAME_LENGTH DPNI_CMD(0x217)
5972 +#define DPNI_CMDID_SET_LINK_CFG DPNI_CMD(0x21A)
5973 +#define DPNI_CMDID_SET_TX_SHAPING DPNI_CMD(0x21B)
5974 +
5975 +#define DPNI_CMDID_SET_MCAST_PROMISC DPNI_CMD(0x220)
5976 +#define DPNI_CMDID_GET_MCAST_PROMISC DPNI_CMD(0x221)
5977 +#define DPNI_CMDID_SET_UNICAST_PROMISC DPNI_CMD(0x222)
5978 +#define DPNI_CMDID_GET_UNICAST_PROMISC DPNI_CMD(0x223)
5979 +#define DPNI_CMDID_SET_PRIM_MAC DPNI_CMD(0x224)
5980 +#define DPNI_CMDID_GET_PRIM_MAC DPNI_CMD(0x225)
5981 +#define DPNI_CMDID_ADD_MAC_ADDR DPNI_CMD(0x226)
5982 +#define DPNI_CMDID_REMOVE_MAC_ADDR DPNI_CMD(0x227)
5983 +#define DPNI_CMDID_CLR_MAC_FILTERS DPNI_CMD(0x228)
5984 +
5985 +#define DPNI_CMDID_SET_RX_TC_DIST DPNI_CMD(0x235)
5986 +
5987 +#define DPNI_CMDID_ADD_FS_ENT DPNI_CMD(0x244)
5988 +#define DPNI_CMDID_REMOVE_FS_ENT DPNI_CMD(0x245)
5989 +#define DPNI_CMDID_CLR_FS_ENT DPNI_CMD(0x246)
5990 +
5991 +#define DPNI_CMDID_GET_STATISTICS DPNI_CMD(0x25D)
5992 +#define DPNI_CMDID_RESET_STATISTICS DPNI_CMD(0x25E)
5993 +#define DPNI_CMDID_GET_QUEUE DPNI_CMD(0x25F)
5994 +#define DPNI_CMDID_SET_QUEUE DPNI_CMD(0x260)
5995 +#define DPNI_CMDID_GET_TAILDROP DPNI_CMD(0x261)
5996 +#define DPNI_CMDID_SET_TAILDROP DPNI_CMD(0x262)
5997 +
5998 +#define DPNI_CMDID_GET_PORT_MAC_ADDR DPNI_CMD(0x263)
5999 +
6000 +#define DPNI_CMDID_GET_BUFFER_LAYOUT DPNI_CMD(0x264)
6001 +#define DPNI_CMDID_SET_BUFFER_LAYOUT DPNI_CMD(0x265)
6002 +
6003 +#define DPNI_CMDID_SET_TX_CONFIRMATION_MODE DPNI_CMD(0x266)
6004 +#define DPNI_CMDID_SET_CONGESTION_NOTIFICATION DPNI_CMD(0x267)
6005 +#define DPNI_CMDID_GET_CONGESTION_NOTIFICATION DPNI_CMD(0x268)
6006 +#define DPNI_CMDID_SET_EARLY_DROP DPNI_CMD(0x269)
6007 +#define DPNI_CMDID_GET_EARLY_DROP DPNI_CMD(0x26A)
6008 +#define DPNI_CMDID_GET_OFFLOAD DPNI_CMD(0x26B)
6009 +#define DPNI_CMDID_SET_OFFLOAD DPNI_CMD(0x26C)
6010 +
6011 +/* Macros for accessing command fields smaller than 1byte */
6012 +#define DPNI_MASK(field) \
6013 + GENMASK(DPNI_##field##_SHIFT + DPNI_##field##_SIZE - 1, \
6014 + DPNI_##field##_SHIFT)
6015 +
6016 +#define dpni_set_field(var, field, val) \
6017 + ((var) |= (((val) << DPNI_##field##_SHIFT) & DPNI_MASK(field)))
6018 +#define dpni_get_field(var, field) \
6019 + (((var) & DPNI_MASK(field)) >> DPNI_##field##_SHIFT)
6020 +
6021 +struct dpni_cmd_open {
6022 + __le32 dpni_id;
6023 +};
6024 +
6025 +#define DPNI_BACKUP_POOL(val, order) (((val) & 0x1) << (order))
6026 +struct dpni_cmd_set_pools {
6027 + /* cmd word 0 */
6028 + u8 num_dpbp;
6029 + u8 backup_pool_mask;
6030 + __le16 pad;
6031 + /* cmd word 0..4 */
6032 + __le32 dpbp_id[DPNI_MAX_DPBP];
6033 + /* cmd word 4..6 */
6034 + __le16 buffer_size[DPNI_MAX_DPBP];
6035 +};
6036 +
6037 +/* The enable indication is always the least significant bit */
6038 +#define DPNI_ENABLE_SHIFT 0
6039 +#define DPNI_ENABLE_SIZE 1
6040 +
6041 +struct dpni_rsp_is_enabled {
6042 + u8 enabled;
6043 +};
6044 +
6045 +struct dpni_rsp_get_irq {
6046 + /* response word 0 */
6047 + __le32 irq_val;
6048 + __le32 pad;
6049 + /* response word 1 */
6050 + __le64 irq_addr;
6051 + /* response word 2 */
6052 + __le32 irq_num;
6053 + __le32 type;
6054 +};
6055 +
6056 +struct dpni_cmd_set_irq_enable {
6057 + u8 enable;
6058 + u8 pad[3];
6059 + u8 irq_index;
6060 +};
6061 +
6062 +struct dpni_cmd_get_irq_enable {
6063 + __le32 pad;
6064 + u8 irq_index;
6065 +};
6066 +
6067 +struct dpni_rsp_get_irq_enable {
6068 + u8 enabled;
6069 +};
6070 +
6071 +struct dpni_cmd_set_irq_mask {
6072 + __le32 mask;
6073 + u8 irq_index;
6074 +};
6075 +
6076 +struct dpni_cmd_get_irq_mask {
6077 + __le32 pad;
6078 + u8 irq_index;
6079 +};
6080 +
6081 +struct dpni_rsp_get_irq_mask {
6082 + __le32 mask;
6083 +};
6084 +
6085 +struct dpni_cmd_get_irq_status {
6086 + __le32 status;
6087 + u8 irq_index;
6088 +};
6089 +
6090 +struct dpni_rsp_get_irq_status {
6091 + __le32 status;
6092 +};
6093 +
6094 +struct dpni_cmd_clear_irq_status {
6095 + __le32 status;
6096 + u8 irq_index;
6097 +};
6098 +
6099 +struct dpni_rsp_get_attr {
6100 + /* response word 0 */
6101 + __le32 options;
6102 + u8 num_queues;
6103 + u8 num_tcs;
6104 + u8 mac_filter_entries;
6105 + u8 pad0;
6106 + /* response word 1 */
6107 + u8 vlan_filter_entries;
6108 + u8 pad1;
6109 + u8 qos_entries;
6110 + u8 pad2;
6111 + __le16 fs_entries;
6112 + __le16 pad3;
6113 + /* response word 2 */
6114 + u8 qos_key_size;
6115 + u8 fs_key_size;
6116 + __le16 wriop_version;
6117 +};
6118 +
6119 +#define DPNI_ERROR_ACTION_SHIFT 0
6120 +#define DPNI_ERROR_ACTION_SIZE 4
6121 +#define DPNI_FRAME_ANN_SHIFT 4
6122 +#define DPNI_FRAME_ANN_SIZE 1
6123 +
6124 +struct dpni_cmd_set_errors_behavior {
6125 + __le32 errors;
6126 + /* from least significant bit: error_action:4, set_frame_annotation:1 */
6127 + u8 flags;
6128 +};
6129 +
6130 +/* There are 3 separate commands for configuring Rx, Tx and Tx confirmation
6131 + * buffer layouts, but they all share the same parameters.
6132 + * If one of the functions changes, below structure needs to be split.
6133 + */
6134 +
6135 +#define DPNI_PASS_TS_SHIFT 0
6136 +#define DPNI_PASS_TS_SIZE 1
6137 +#define DPNI_PASS_PR_SHIFT 1
6138 +#define DPNI_PASS_PR_SIZE 1
6139 +#define DPNI_PASS_FS_SHIFT 2
6140 +#define DPNI_PASS_FS_SIZE 1
6141 +
6142 +struct dpni_cmd_get_buffer_layout {
6143 + u8 qtype;
6144 +};
6145 +
6146 +struct dpni_rsp_get_buffer_layout {
6147 + /* response word 0 */
6148 + u8 pad0[6];
6149 + /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */
6150 + u8 flags;
6151 + u8 pad1;
6152 + /* response word 1 */
6153 + __le16 private_data_size;
6154 + __le16 data_align;
6155 + __le16 head_room;
6156 + __le16 tail_room;
6157 +};
6158 +
6159 +struct dpni_cmd_set_buffer_layout {
6160 + /* cmd word 0 */
6161 + u8 qtype;
6162 + u8 pad0[3];
6163 + __le16 options;
6164 + /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */
6165 + u8 flags;
6166 + u8 pad1;
6167 + /* cmd word 1 */
6168 + __le16 private_data_size;
6169 + __le16 data_align;
6170 + __le16 head_room;
6171 + __le16 tail_room;
6172 +};
6173 +
6174 +struct dpni_cmd_set_offload {
6175 + u8 pad[3];
6176 + u8 dpni_offload;
6177 + __le32 config;
6178 +};
6179 +
6180 +struct dpni_cmd_get_offload {
6181 + u8 pad[3];
6182 + u8 dpni_offload;
6183 +};
6184 +
6185 +struct dpni_rsp_get_offload {
6186 + __le32 pad;
6187 + __le32 config;
6188 +};
6189 +
6190 +struct dpni_cmd_get_qdid {
6191 + u8 qtype;
6192 +};
6193 +
6194 +struct dpni_rsp_get_qdid {
6195 + __le16 qdid;
6196 +};
6197 +
6198 +struct dpni_rsp_get_tx_data_offset {
6199 + __le16 data_offset;
6200 +};
6201 +
6202 +struct dpni_cmd_get_statistics {
6203 + u8 page_number;
6204 +};
6205 +
6206 +struct dpni_rsp_get_statistics {
6207 + __le64 counter[DPNI_STATISTICS_CNT];
6208 +};
6209 +
6210 +struct dpni_cmd_set_link_cfg {
6211 + /* cmd word 0 */
6212 + __le64 pad0;
6213 + /* cmd word 1 */
6214 + __le32 rate;
6215 + __le32 pad1;
6216 + /* cmd word 2 */
6217 + __le64 options;
6218 +};
6219 +
6220 +#define DPNI_LINK_STATE_SHIFT 0
6221 +#define DPNI_LINK_STATE_SIZE 1
6222 +
6223 +struct dpni_rsp_get_link_state {
6224 + /* response word 0 */
6225 + __le32 pad0;
6226 + /* from LSB: up:1 */
6227 + u8 flags;
6228 + u8 pad1[3];
6229 + /* response word 1 */
6230 + __le32 rate;
6231 + __le32 pad2;
6232 + /* response word 2 */
6233 + __le64 options;
6234 +};
6235 +
6236 +struct dpni_cmd_set_tx_shaping {
6237 + /* cmd word 0 */
6238 + __le16 max_burst_size;
6239 + __le16 pad0[3];
6240 + /* cmd word 1 */
6241 + __le32 rate_limit;
6242 +};
6243 +
6244 +struct dpni_cmd_set_max_frame_length {
6245 + __le16 max_frame_length;
6246 +};
6247 +
6248 +struct dpni_rsp_get_max_frame_length {
6249 + __le16 max_frame_length;
6250 +};
6251 +
6252 +struct dpni_cmd_set_multicast_promisc {
6253 + u8 enable;
6254 +};
6255 +
6256 +struct dpni_rsp_get_multicast_promisc {
6257 + u8 enabled;
6258 +};
6259 +
6260 +struct dpni_cmd_set_unicast_promisc {
6261 + u8 enable;
6262 +};
6263 +
6264 +struct dpni_rsp_get_unicast_promisc {
6265 + u8 enabled;
6266 +};
6267 +
6268 +struct dpni_cmd_set_primary_mac_addr {
6269 + __le16 pad;
6270 + u8 mac_addr[6];
6271 +};
6272 +
6273 +struct dpni_rsp_get_primary_mac_addr {
6274 + __le16 pad;
6275 + u8 mac_addr[6];
6276 +};
6277 +
6278 +struct dpni_rsp_get_port_mac_addr {
6279 + __le16 pad;
6280 + u8 mac_addr[6];
6281 +};
6282 +
6283 +struct dpni_cmd_add_mac_addr {
6284 + __le16 pad;
6285 + u8 mac_addr[6];
6286 +};
6287 +
6288 +struct dpni_cmd_remove_mac_addr {
6289 + __le16 pad;
6290 + u8 mac_addr[6];
6291 +};
6292 +
6293 +#define DPNI_UNICAST_FILTERS_SHIFT 0
6294 +#define DPNI_UNICAST_FILTERS_SIZE 1
6295 +#define DPNI_MULTICAST_FILTERS_SHIFT 1
6296 +#define DPNI_MULTICAST_FILTERS_SIZE 1
6297 +
6298 +struct dpni_cmd_clear_mac_filters {
6299 + /* from LSB: unicast:1, multicast:1 */
6300 + u8 flags;
6301 +};
6302 +
6303 +#define DPNI_DIST_MODE_SHIFT 0
6304 +#define DPNI_DIST_MODE_SIZE 4
6305 +#define DPNI_MISS_ACTION_SHIFT 4
6306 +#define DPNI_MISS_ACTION_SIZE 4
6307 +
6308 +struct dpni_cmd_set_rx_tc_dist {
6309 + /* cmd word 0 */
6310 + __le16 dist_size;
6311 + u8 tc_id;
6312 + /* from LSB: dist_mode:4, miss_action:4 */
6313 + u8 flags;
6314 + __le16 pad0;
6315 + __le16 default_flow_id;
6316 + /* cmd word 1..5 */
6317 + __le64 pad1[5];
6318 + /* cmd word 6 */
6319 + __le64 key_cfg_iova;
6320 +};
6321 +
6322 +/* dpni_set_rx_tc_dist extension (structure of the DMA-able memory at
6323 + * key_cfg_iova)
6324 + */
6325 +struct dpni_mask_cfg {
6326 + u8 mask;
6327 + u8 offset;
6328 +};
6329 +
6330 +#define DPNI_EFH_TYPE_SHIFT 0
6331 +#define DPNI_EFH_TYPE_SIZE 4
6332 +#define DPNI_EXTRACT_TYPE_SHIFT 0
6333 +#define DPNI_EXTRACT_TYPE_SIZE 4
6334 +
6335 +struct dpni_dist_extract {
6336 + /* word 0 */
6337 + u8 prot;
6338 + /* EFH type stored in the 4 least significant bits */
6339 + u8 efh_type;
6340 + u8 size;
6341 + u8 offset;
6342 + __le32 field;
6343 + /* word 1 */
6344 + u8 hdr_index;
6345 + u8 constant;
6346 + u8 num_of_repeats;
6347 + u8 num_of_byte_masks;
6348 + /* Extraction type is stored in the 4 LSBs */
6349 + u8 extract_type;
6350 + u8 pad[3];
6351 + /* word 2 */
6352 + struct dpni_mask_cfg masks[4];
6353 +};
6354 +
6355 +struct dpni_ext_set_rx_tc_dist {
6356 + /* extension word 0 */
6357 + u8 num_extracts;
6358 + u8 pad[7];
6359 + /* words 1..25 */
6360 + struct dpni_dist_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS];
6361 +};
6362 +
6363 +struct dpni_cmd_get_queue {
6364 + u8 qtype;
6365 + u8 tc;
6366 + u8 index;
6367 +};
6368 +
6369 +#define DPNI_DEST_TYPE_SHIFT 0
6370 +#define DPNI_DEST_TYPE_SIZE 4
6371 +#define DPNI_STASH_CTRL_SHIFT 6
6372 +#define DPNI_STASH_CTRL_SIZE 1
6373 +#define DPNI_HOLD_ACTIVE_SHIFT 7
6374 +#define DPNI_HOLD_ACTIVE_SIZE 1
6375 +
6376 +struct dpni_rsp_get_queue {
6377 + /* response word 0 */
6378 + __le64 pad0;
6379 + /* response word 1 */
6380 + __le32 dest_id;
6381 + __le16 pad1;
6382 + u8 dest_prio;
6383 + /* From LSB: dest_type:4, pad:2, flc_stash_ctrl:1, hold_active:1 */
6384 + u8 flags;
6385 + /* response word 2 */
6386 + __le64 flc;
6387 + /* response word 3 */
6388 + __le64 user_context;
6389 + /* response word 4 */
6390 + __le32 fqid;
6391 + __le16 qdbin;
6392 +};
6393 +
6394 +struct dpni_cmd_set_queue {
6395 + /* cmd word 0 */
6396 + u8 qtype;
6397 + u8 tc;
6398 + u8 index;
6399 + u8 options;
6400 + __le32 pad0;
6401 + /* cmd word 1 */
6402 + __le32 dest_id;
6403 + __le16 pad1;
6404 + u8 dest_prio;
6405 + u8 flags;
6406 + /* cmd word 2 */
6407 + __le64 flc;
6408 + /* cmd word 3 */
6409 + __le64 user_context;
6410 +};
6411 +
6412 +struct dpni_cmd_add_fs_entry {
6413 + /* cmd word 0 */
6414 + u16 options;
6415 + u8 tc_id;
6416 + u8 key_size;
6417 + u16 index;
6418 + u16 flow_id;
6419 + /* cmd word 1 */
6420 + u64 key_iova;
6421 + /* cmd word 2 */
6422 + u64 mask_iova;
6423 + /* cmd word 3 */
6424 + u64 flc;
6425 +};
6426 +
6427 +struct dpni_cmd_remove_fs_entry {
6428 + /* cmd word 0 */
6429 + __le16 pad0;
6430 + u8 tc_id;
6431 + u8 key_size;
6432 + __le32 pad1;
6433 + /* cmd word 1 */
6434 + u64 key_iova;
6435 + /* cmd word 2 */
6436 + u64 mask_iova;
6437 +};
6438 +
6439 +struct dpni_cmd_set_taildrop {
6440 + /* cmd word 0 */
6441 + u8 congestion_point;
6442 + u8 qtype;
6443 + u8 tc;
6444 + u8 index;
6445 + __le32 pad0;
6446 + /* cmd word 1 */
6447 + /* Only least significant bit is relevant */
6448 + u8 enable;
6449 + u8 pad1;
6450 + u8 units;
6451 + u8 pad2;
6452 + __le32 threshold;
6453 +};
6454 +
6455 +struct dpni_cmd_get_taildrop {
6456 + u8 congestion_point;
6457 + u8 qtype;
6458 + u8 tc;
6459 + u8 index;
6460 +};
6461 +
6462 +struct dpni_rsp_get_taildrop {
6463 + /* cmd word 0 */
6464 + __le64 pad0;
6465 + /* cmd word 1 */
6466 + /* only least significant bit is relevant */
6467 + u8 enable;
6468 + u8 pad1;
6469 + u8 units;
6470 + u8 pad2;
6471 + __le32 threshold;
6472 +};
6473 +
6474 +#define DPNI_DEST_TYPE_SHIFT 0
6475 +#define DPNI_DEST_TYPE_SIZE 4
6476 +#define DPNI_CONG_UNITS_SHIFT 4
6477 +#define DPNI_CONG_UNITS_SIZE 2
6478 +
6479 +struct dpni_cmd_set_congestion_notification {
6480 + /* cmd word 0 */
6481 + u8 qtype;
6482 + u8 tc;
6483 + u8 pad[6];
6484 + /* cmd word 1 */
6485 + u32 dest_id;
6486 + u16 notification_mode;
6487 + u8 dest_priority;
6488 + /* from LSB: dest_type: 4 units:2 */
6489 + u8 type_units;
6490 + /* cmd word 2 */
6491 + u64 message_iova;
6492 + /* cmd word 3 */
6493 + u64 message_ctx;
6494 + /* cmd word 4 */
6495 + u32 threshold_entry;
6496 + u32 threshold_exit;
6497 +};
6498 +
6499 +#endif /* _FSL_DPNI_CMD_H */
6500 diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni.c b/drivers/staging/fsl-dpaa2/ethernet/dpni.c
6501 new file mode 100644
6502 index 00000000..3c23e4dc
6503 --- /dev/null
6504 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.c
6505 @@ -0,0 +1,1770 @@
6506 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
6507 + * Copyright 2016 NXP
6508 + *
6509 + * Redistribution and use in source and binary forms, with or without
6510 + * modification, are permitted provided that the following conditions are met:
6511 + * * Redistributions of source code must retain the above copyright
6512 + * notice, this list of conditions and the following disclaimer.
6513 + * * Redistributions in binary form must reproduce the above copyright
6514 + * notice, this list of conditions and the following disclaimer in the
6515 + * documentation and/or other materials provided with the distribution.
6516 + * * Neither the name of the above-listed copyright holders nor the
6517 + * names of any contributors may be used to endorse or promote products
6518 + * derived from this software without specific prior written permission.
6519 + *
6520 + *
6521 + * ALTERNATIVELY, this software may be distributed under the terms of the
6522 + * GNU General Public License ("GPL") as published by the Free Software
6523 + * Foundation, either version 2 of that License or (at your option) any
6524 + * later version.
6525 + *
6526 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
6527 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
6528 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
6529 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
6530 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
6531 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
6532 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
6533 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
6534 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
6535 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
6536 + * POSSIBILITY OF SUCH DAMAGE.
6537 + */
6538 +#include "../../fsl-mc/include/mc-sys.h"
6539 +#include "../../fsl-mc/include/mc-cmd.h"
6540 +#include "dpni.h"
6541 +#include "dpni-cmd.h"
6542 +
6543 +/**
6544 + * dpni_prepare_key_cfg() - function prepare extract parameters
6545 + * @cfg: defining a full Key Generation profile (rule)
6546 + * @key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA
6547 + *
6548 + * This function has to be called before the following functions:
6549 + * - dpni_set_rx_tc_dist()
6550 + * - dpni_set_qos_table()
6551 + */
6552 +int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, u8 *key_cfg_buf)
6553 +{
6554 + int i, j;
6555 + struct dpni_ext_set_rx_tc_dist *dpni_ext;
6556 + struct dpni_dist_extract *extr;
6557 +
6558 + if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS)
6559 + return -EINVAL;
6560 +
6561 + dpni_ext = (struct dpni_ext_set_rx_tc_dist *)key_cfg_buf;
6562 + dpni_ext->num_extracts = cfg->num_extracts;
6563 +
6564 + for (i = 0; i < cfg->num_extracts; i++) {
6565 + extr = &dpni_ext->extracts[i];
6566 +
6567 + switch (cfg->extracts[i].type) {
6568 + case DPKG_EXTRACT_FROM_HDR:
6569 + extr->prot = cfg->extracts[i].extract.from_hdr.prot;
6570 + dpni_set_field(extr->efh_type, EFH_TYPE,
6571 + cfg->extracts[i].extract.from_hdr.type);
6572 + extr->size = cfg->extracts[i].extract.from_hdr.size;
6573 + extr->offset = cfg->extracts[i].extract.from_hdr.offset;
6574 + extr->field = cpu_to_le32(
6575 + cfg->extracts[i].extract.from_hdr.field);
6576 + extr->hdr_index =
6577 + cfg->extracts[i].extract.from_hdr.hdr_index;
6578 + break;
6579 + case DPKG_EXTRACT_FROM_DATA:
6580 + extr->size = cfg->extracts[i].extract.from_data.size;
6581 + extr->offset =
6582 + cfg->extracts[i].extract.from_data.offset;
6583 + break;
6584 + case DPKG_EXTRACT_FROM_PARSE:
6585 + extr->size = cfg->extracts[i].extract.from_parse.size;
6586 + extr->offset =
6587 + cfg->extracts[i].extract.from_parse.offset;
6588 + break;
6589 + default:
6590 + return -EINVAL;
6591 + }
6592 +
6593 + extr->num_of_byte_masks = cfg->extracts[i].num_of_byte_masks;
6594 + dpni_set_field(extr->extract_type, EXTRACT_TYPE,
6595 + cfg->extracts[i].type);
6596 +
6597 + for (j = 0; j < DPKG_NUM_OF_MASKS; j++) {
6598 + extr->masks[j].mask = cfg->extracts[i].masks[j].mask;
6599 + extr->masks[j].offset =
6600 + cfg->extracts[i].masks[j].offset;
6601 + }
6602 + }
6603 +
6604 + return 0;
6605 +}
6606 +
6607 +/**
6608 + * dpni_open() - Open a control session for the specified object
6609 + * @mc_io: Pointer to MC portal's I/O object
6610 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
6611 + * @dpni_id: DPNI unique ID
6612 + * @token: Returned token; use in subsequent API calls
6613 + *
6614 + * This function can be used to open a control session for an
6615 + * already created object; an object may have been declared in
6616 + * the DPL or by calling the dpni_create() function.
6617 + * This function returns a unique authentication token,
6618 + * associated with the specific object ID and the specific MC
6619 + * portal; this token must be used in all subsequent commands for
6620 + * this specific object.
6621 + *
6622 + * Return: '0' on Success; Error code otherwise.
6623 + */
6624 +int dpni_open(struct fsl_mc_io *mc_io,
6625 + u32 cmd_flags,
6626 + int dpni_id,
6627 + u16 *token)
6628 +{
6629 + struct mc_command cmd = { 0 };
6630 + struct dpni_cmd_open *cmd_params;
6631 +
6632 + int err;
6633 +
6634 + /* prepare command */
6635 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_OPEN,
6636 + cmd_flags,
6637 + 0);
6638 + cmd_params = (struct dpni_cmd_open *)cmd.params;
6639 + cmd_params->dpni_id = cpu_to_le32(dpni_id);
6640 +
6641 + /* send command to mc*/
6642 + err = mc_send_command(mc_io, &cmd);
6643 + if (err)
6644 + return err;
6645 +
6646 + /* retrieve response parameters */
6647 + *token = mc_cmd_hdr_read_token(&cmd);
6648 +
6649 + return 0;
6650 +}
6651 +
6652 +/**
6653 + * dpni_close() - Close the control session of the object
6654 + * @mc_io: Pointer to MC portal's I/O object
6655 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
6656 + * @token: Token of DPNI object
6657 + *
6658 + * After this function is called, no further operations are
6659 + * allowed on the object without opening a new control session.
6660 + *
6661 + * Return: '0' on Success; Error code otherwise.
6662 + */
6663 +int dpni_close(struct fsl_mc_io *mc_io,
6664 + u32 cmd_flags,
6665 + u16 token)
6666 +{
6667 + struct mc_command cmd = { 0 };
6668 +
6669 + /* prepare command */
6670 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE,
6671 + cmd_flags,
6672 + token);
6673 +
6674 + /* send command to mc*/
6675 + return mc_send_command(mc_io, &cmd);
6676 +}
6677 +
6678 +/**
6679 + * dpni_set_pools() - Set buffer pools configuration
6680 + * @mc_io: Pointer to MC portal's I/O object
6681 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
6682 + * @token: Token of DPNI object
6683 + * @cfg: Buffer pools configuration
6684 + *
6685 + * mandatory for DPNI operation
6686 + * warning:Allowed only when DPNI is disabled
6687 + *
6688 + * Return: '0' on Success; Error code otherwise.
6689 + */
6690 +int dpni_set_pools(struct fsl_mc_io *mc_io,
6691 + u32 cmd_flags,
6692 + u16 token,
6693 + const struct dpni_pools_cfg *cfg)
6694 +{
6695 + struct mc_command cmd = { 0 };
6696 + struct dpni_cmd_set_pools *cmd_params;
6697 + int i;
6698 +
6699 + /* prepare command */
6700 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_POOLS,
6701 + cmd_flags,
6702 + token);
6703 + cmd_params = (struct dpni_cmd_set_pools *)cmd.params;
6704 + cmd_params->num_dpbp = cfg->num_dpbp;
6705 + for (i = 0; i < DPNI_MAX_DPBP; i++) {
6706 + cmd_params->dpbp_id[i] = cpu_to_le32(cfg->pools[i].dpbp_id);
6707 + cmd_params->buffer_size[i] =
6708 + cpu_to_le16(cfg->pools[i].buffer_size);
6709 + cmd_params->backup_pool_mask |=
6710 + DPNI_BACKUP_POOL(cfg->pools[i].backup_pool, i);
6711 + }
6712 +
6713 + /* send command to mc*/
6714 + return mc_send_command(mc_io, &cmd);
6715 +}
6716 +
6717 +/**
6718 + * dpni_enable() - Enable the DPNI, allow sending and receiving frames.
6719 + * @mc_io: Pointer to MC portal's I/O object
6720 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
6721 + * @token: Token of DPNI object
6722 + *
6723 + * Return: '0' on Success; Error code otherwise.
6724 + */
6725 +int dpni_enable(struct fsl_mc_io *mc_io,
6726 + u32 cmd_flags,
6727 + u16 token)
6728 +{
6729 + struct mc_command cmd = { 0 };
6730 +
6731 + /* prepare command */
6732 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE,
6733 + cmd_flags,
6734 + token);
6735 +
6736 + /* send command to mc*/
6737 + return mc_send_command(mc_io, &cmd);
6738 +}
6739 +
6740 +/**
6741 + * dpni_disable() - Disable the DPNI, stop sending and receiving frames.
6742 + * @mc_io: Pointer to MC portal's I/O object
6743 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
6744 + * @token: Token of DPNI object
6745 + *
6746 + * Return: '0' on Success; Error code otherwise.
6747 + */
6748 +int dpni_disable(struct fsl_mc_io *mc_io,
6749 + u32 cmd_flags,
6750 + u16 token)
6751 +{
6752 + struct mc_command cmd = { 0 };
6753 +
6754 + /* prepare command */
6755 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE,
6756 + cmd_flags,
6757 + token);
6758 +
6759 + /* send command to mc*/
6760 + return mc_send_command(mc_io, &cmd);
6761 +}
6762 +
6763 +/**
6764 + * dpni_is_enabled() - Check if the DPNI is enabled.
6765 + * @mc_io: Pointer to MC portal's I/O object
6766 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
6767 + * @token: Token of DPNI object
6768 + * @en: Returns '1' if object is enabled; '0' otherwise
6769 + *
6770 + * Return: '0' on Success; Error code otherwise.
6771 + */
6772 +int dpni_is_enabled(struct fsl_mc_io *mc_io,
6773 + u32 cmd_flags,
6774 + u16 token,
6775 + int *en)
6776 +{
6777 + struct mc_command cmd = { 0 };
6778 + struct dpni_rsp_is_enabled *rsp_params;
6779 + int err;
6780 +
6781 + /* prepare command */
6782 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_IS_ENABLED,
6783 + cmd_flags,
6784 + token);
6785 +
6786 + /* send command to mc*/
6787 + err = mc_send_command(mc_io, &cmd);
6788 + if (err)
6789 + return err;
6790 +
6791 + /* retrieve response parameters */
6792 + rsp_params = (struct dpni_rsp_is_enabled *)cmd.params;
6793 + *en = dpni_get_field(rsp_params->enabled, ENABLE);
6794 +
6795 + return 0;
6796 +}
6797 +
6798 +/**
6799 + * dpni_reset() - Reset the DPNI, returns the object to initial state.
6800 + * @mc_io: Pointer to MC portal's I/O object
6801 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
6802 + * @token: Token of DPNI object
6803 + *
6804 + * Return: '0' on Success; Error code otherwise.
6805 + */
6806 +int dpni_reset(struct fsl_mc_io *mc_io,
6807 + u32 cmd_flags,
6808 + u16 token)
6809 +{
6810 + struct mc_command cmd = { 0 };
6811 +
6812 + /* prepare command */
6813 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET,
6814 + cmd_flags,
6815 + token);
6816 +
6817 + /* send command to mc*/
6818 + return mc_send_command(mc_io, &cmd);
6819 +}
6820 +
6821 +/**
6822 + * dpni_set_irq_enable() - Set overall interrupt state.
6823 + * @mc_io: Pointer to MC portal's I/O object
6824 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
6825 + * @token: Token of DPNI object
6826 + * @irq_index: The interrupt index to configure
6827 + * @en: Interrupt state: - enable = 1, disable = 0
6828 + *
6829 + * Allows GPP software to control when interrupts are generated.
6830 + * Each interrupt can have up to 32 causes. The enable/disable control's the
6831 + * overall interrupt state. if the interrupt is disabled no causes will cause
6832 + * an interrupt.
6833 + *
6834 + * Return: '0' on Success; Error code otherwise.
6835 + */
6836 +int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
6837 + u32 cmd_flags,
6838 + u16 token,
6839 + u8 irq_index,
6840 + u8 en)
6841 +{
6842 + struct mc_command cmd = { 0 };
6843 + struct dpni_cmd_set_irq_enable *cmd_params;
6844 +
6845 + /* prepare command */
6846 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_ENABLE,
6847 + cmd_flags,
6848 + token);
6849 + cmd_params = (struct dpni_cmd_set_irq_enable *)cmd.params;
6850 + dpni_set_field(cmd_params->enable, ENABLE, en);
6851 + cmd_params->irq_index = irq_index;
6852 +
6853 + /* send command to mc*/
6854 + return mc_send_command(mc_io, &cmd);
6855 +}
6856 +
6857 +/**
6858 + * dpni_get_irq_enable() - Get overall interrupt state
6859 + * @mc_io: Pointer to MC portal's I/O object
6860 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
6861 + * @token: Token of DPNI object
6862 + * @irq_index: The interrupt index to configure
6863 + * @en: Returned interrupt state - enable = 1, disable = 0
6864 + *
6865 + * Return: '0' on Success; Error code otherwise.
6866 + */
6867 +int dpni_get_irq_enable(struct fsl_mc_io *mc_io,
6868 + u32 cmd_flags,
6869 + u16 token,
6870 + u8 irq_index,
6871 + u8 *en)
6872 +{
6873 + struct mc_command cmd = { 0 };
6874 + struct dpni_cmd_get_irq_enable *cmd_params;
6875 + struct dpni_rsp_get_irq_enable *rsp_params;
6876 +
6877 + int err;
6878 +
6879 + /* prepare command */
6880 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_ENABLE,
6881 + cmd_flags,
6882 + token);
6883 + cmd_params = (struct dpni_cmd_get_irq_enable *)cmd.params;
6884 + cmd_params->irq_index = irq_index;
6885 +
6886 + /* send command to mc*/
6887 + err = mc_send_command(mc_io, &cmd);
6888 + if (err)
6889 + return err;
6890 +
6891 + /* retrieve response parameters */
6892 + rsp_params = (struct dpni_rsp_get_irq_enable *)cmd.params;
6893 + *en = dpni_get_field(rsp_params->enabled, ENABLE);
6894 +
6895 + return 0;
6896 +}
6897 +
6898 +/**
6899 + * dpni_set_irq_mask() - Set interrupt mask.
6900 + * @mc_io: Pointer to MC portal's I/O object
6901 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
6902 + * @token: Token of DPNI object
6903 + * @irq_index: The interrupt index to configure
6904 + * @mask: event mask to trigger interrupt;
6905 + * each bit:
6906 + * 0 = ignore event
6907 + * 1 = consider event for asserting IRQ
6908 + *
6909 + * Every interrupt can have up to 32 causes and the interrupt model supports
6910 + * masking/unmasking each cause independently
6911 + *
6912 + * Return: '0' on Success; Error code otherwise.
6913 + */
6914 +int dpni_set_irq_mask(struct fsl_mc_io *mc_io,
6915 + u32 cmd_flags,
6916 + u16 token,
6917 + u8 irq_index,
6918 + u32 mask)
6919 +{
6920 + struct mc_command cmd = { 0 };
6921 + struct dpni_cmd_set_irq_mask *cmd_params;
6922 +
6923 + /* prepare command */
6924 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_MASK,
6925 + cmd_flags,
6926 + token);
6927 + cmd_params = (struct dpni_cmd_set_irq_mask *)cmd.params;
6928 + cmd_params->mask = cpu_to_le32(mask);
6929 + cmd_params->irq_index = irq_index;
6930 +
6931 + /* send command to mc*/
6932 + return mc_send_command(mc_io, &cmd);
6933 +}
6934 +
6935 +/**
6936 + * dpni_get_irq_mask() - Get interrupt mask.
6937 + * @mc_io: Pointer to MC portal's I/O object
6938 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
6939 + * @token: Token of DPNI object
6940 + * @irq_index: The interrupt index to configure
6941 + * @mask: Returned event mask to trigger interrupt
6942 + *
6943 + * Every interrupt can have up to 32 causes and the interrupt model supports
6944 + * masking/unmasking each cause independently
6945 + *
6946 + * Return: '0' on Success; Error code otherwise.
6947 + */
6948 +int dpni_get_irq_mask(struct fsl_mc_io *mc_io,
6949 + u32 cmd_flags,
6950 + u16 token,
6951 + u8 irq_index,
6952 + u32 *mask)
6953 +{
6954 + struct mc_command cmd = { 0 };
6955 + struct dpni_cmd_get_irq_mask *cmd_params;
6956 + struct dpni_rsp_get_irq_mask *rsp_params;
6957 + int err;
6958 +
6959 + /* prepare command */
6960 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_MASK,
6961 + cmd_flags,
6962 + token);
6963 + cmd_params = (struct dpni_cmd_get_irq_mask *)cmd.params;
6964 + cmd_params->irq_index = irq_index;
6965 +
6966 + /* send command to mc*/
6967 + err = mc_send_command(mc_io, &cmd);
6968 + if (err)
6969 + return err;
6970 +
6971 + /* retrieve response parameters */
6972 + rsp_params = (struct dpni_rsp_get_irq_mask *)cmd.params;
6973 + *mask = le32_to_cpu(rsp_params->mask);
6974 +
6975 + return 0;
6976 +}
6977 +
6978 +/**
6979 + * dpni_get_irq_status() - Get the current status of any pending interrupts.
6980 + * @mc_io: Pointer to MC portal's I/O object
6981 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
6982 + * @token: Token of DPNI object
6983 + * @irq_index: The interrupt index to configure
6984 + * @status: Returned interrupts status - one bit per cause:
6985 + * 0 = no interrupt pending
6986 + * 1 = interrupt pending
6987 + *
6988 + * Return: '0' on Success; Error code otherwise.
6989 + */
6990 +int dpni_get_irq_status(struct fsl_mc_io *mc_io,
6991 + u32 cmd_flags,
6992 + u16 token,
6993 + u8 irq_index,
6994 + u32 *status)
6995 +{
6996 + struct mc_command cmd = { 0 };
6997 + struct dpni_cmd_get_irq_status *cmd_params;
6998 + struct dpni_rsp_get_irq_status *rsp_params;
6999 + int err;
7000 +
7001 + /* prepare command */
7002 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_STATUS,
7003 + cmd_flags,
7004 + token);
7005 + cmd_params = (struct dpni_cmd_get_irq_status *)cmd.params;
7006 + cmd_params->status = cpu_to_le32(*status);
7007 + cmd_params->irq_index = irq_index;
7008 +
7009 + /* send command to mc*/
7010 + err = mc_send_command(mc_io, &cmd);
7011 + if (err)
7012 + return err;
7013 +
7014 + /* retrieve response parameters */
7015 + rsp_params = (struct dpni_rsp_get_irq_status *)cmd.params;
7016 + *status = le32_to_cpu(rsp_params->status);
7017 +
7018 + return 0;
7019 +}
7020 +
7021 +/**
7022 + * dpni_clear_irq_status() - Clear a pending interrupt's status
7023 + * @mc_io: Pointer to MC portal's I/O object
7024 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7025 + * @token: Token of DPNI object
7026 + * @irq_index: The interrupt index to configure
7027 + * @status: bits to clear (W1C) - one bit per cause:
7028 + * 0 = don't change
7029 + * 1 = clear status bit
7030 + *
7031 + * Return: '0' on Success; Error code otherwise.
7032 + */
7033 +int dpni_clear_irq_status(struct fsl_mc_io *mc_io,
7034 + u32 cmd_flags,
7035 + u16 token,
7036 + u8 irq_index,
7037 + u32 status)
7038 +{
7039 + struct mc_command cmd = { 0 };
7040 + struct dpni_cmd_clear_irq_status *cmd_params;
7041 +
7042 + /* prepare command */
7043 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLEAR_IRQ_STATUS,
7044 + cmd_flags,
7045 + token);
7046 + cmd_params = (struct dpni_cmd_clear_irq_status *)cmd.params;
7047 + cmd_params->irq_index = irq_index;
7048 + cmd_params->status = cpu_to_le32(status);
7049 +
7050 + /* send command to mc*/
7051 + return mc_send_command(mc_io, &cmd);
7052 +}
7053 +
7054 +/**
7055 + * dpni_get_attributes() - Retrieve DPNI attributes.
7056 + * @mc_io: Pointer to MC portal's I/O object
7057 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7058 + * @token: Token of DPNI object
7059 + * @attr: Object's attributes
7060 + *
7061 + * Return: '0' on Success; Error code otherwise.
7062 + */
7063 +int dpni_get_attributes(struct fsl_mc_io *mc_io,
7064 + u32 cmd_flags,
7065 + u16 token,
7066 + struct dpni_attr *attr)
7067 +{
7068 + struct mc_command cmd = { 0 };
7069 + struct dpni_rsp_get_attr *rsp_params;
7070 +
7071 + int err;
7072 +
7073 + /* prepare command */
7074 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_ATTR,
7075 + cmd_flags,
7076 + token);
7077 +
7078 + /* send command to mc*/
7079 + err = mc_send_command(mc_io, &cmd);
7080 + if (err)
7081 + return err;
7082 +
7083 + /* retrieve response parameters */
7084 + rsp_params = (struct dpni_rsp_get_attr *)cmd.params;
7085 + attr->options = le32_to_cpu(rsp_params->options);
7086 + attr->num_queues = rsp_params->num_queues;
7087 + attr->num_tcs = rsp_params->num_tcs;
7088 + attr->mac_filter_entries = rsp_params->mac_filter_entries;
7089 + attr->vlan_filter_entries = rsp_params->vlan_filter_entries;
7090 + attr->qos_entries = rsp_params->qos_entries;
7091 + attr->fs_entries = le16_to_cpu(rsp_params->fs_entries);
7092 + attr->qos_key_size = rsp_params->qos_key_size;
7093 + attr->fs_key_size = rsp_params->fs_key_size;
7094 + attr->wriop_version = le16_to_cpu(rsp_params->wriop_version);
7095 +
7096 + return 0;
7097 +}
7098 +
7099 +/**
7100 + * dpni_set_errors_behavior() - Set errors behavior
7101 + * @mc_io: Pointer to MC portal's I/O object
7102 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7103 + * @token: Token of DPNI object
7104 + * @cfg: Errors configuration
7105 + *
7106 + * this function may be called numerous times with different
7107 + * error masks
7108 + *
7109 + * Return: '0' on Success; Error code otherwise.
7110 + */
7111 +int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
7112 + u32 cmd_flags,
7113 + u16 token,
7114 + struct dpni_error_cfg *cfg)
7115 +{
7116 + struct mc_command cmd = { 0 };
7117 + struct dpni_cmd_set_errors_behavior *cmd_params;
7118 +
7119 + /* prepare command */
7120 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_ERRORS_BEHAVIOR,
7121 + cmd_flags,
7122 + token);
7123 + cmd_params = (struct dpni_cmd_set_errors_behavior *)cmd.params;
7124 + cmd_params->errors = cpu_to_le32(cfg->errors);
7125 + dpni_set_field(cmd_params->flags, ERROR_ACTION, cfg->error_action);
7126 + dpni_set_field(cmd_params->flags, FRAME_ANN, cfg->set_frame_annotation);
7127 +
7128 + /* send command to mc*/
7129 + return mc_send_command(mc_io, &cmd);
7130 +}
7131 +
7132 +/**
7133 + * dpni_get_buffer_layout() - Retrieve buffer layout attributes.
7134 + * @mc_io: Pointer to MC portal's I/O object
7135 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7136 + * @token: Token of DPNI object
7137 + * @qtype: Type of queue to retrieve configuration for
7138 + * @layout: Returns buffer layout attributes
7139 + *
7140 + * Return: '0' on Success; Error code otherwise.
7141 + */
7142 +int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
7143 + u32 cmd_flags,
7144 + u16 token,
7145 + enum dpni_queue_type qtype,
7146 + struct dpni_buffer_layout *layout)
7147 +{
7148 + struct mc_command cmd = { 0 };
7149 + struct dpni_cmd_get_buffer_layout *cmd_params;
7150 + struct dpni_rsp_get_buffer_layout *rsp_params;
7151 + int err;
7152 +
7153 + /* prepare command */
7154 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_BUFFER_LAYOUT,
7155 + cmd_flags,
7156 + token);
7157 + cmd_params = (struct dpni_cmd_get_buffer_layout *)cmd.params;
7158 + cmd_params->qtype = qtype;
7159 +
7160 + /* send command to mc*/
7161 + err = mc_send_command(mc_io, &cmd);
7162 + if (err)
7163 + return err;
7164 +
7165 + /* retrieve response parameters */
7166 + rsp_params = (struct dpni_rsp_get_buffer_layout *)cmd.params;
7167 + layout->pass_timestamp = dpni_get_field(rsp_params->flags, PASS_TS);
7168 + layout->pass_parser_result = dpni_get_field(rsp_params->flags, PASS_PR);
7169 + layout->pass_frame_status = dpni_get_field(rsp_params->flags, PASS_FS);
7170 + layout->private_data_size = le16_to_cpu(rsp_params->private_data_size);
7171 + layout->data_align = le16_to_cpu(rsp_params->data_align);
7172 + layout->data_head_room = le16_to_cpu(rsp_params->head_room);
7173 + layout->data_tail_room = le16_to_cpu(rsp_params->tail_room);
7174 +
7175 + return 0;
7176 +}
7177 +
7178 +/**
7179 + * dpni_set_buffer_layout() - Set buffer layout configuration.
7180 + * @mc_io: Pointer to MC portal's I/O object
7181 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7182 + * @token: Token of DPNI object
7183 + * @qtype: Type of queue this configuration applies to
7184 + * @layout: Buffer layout configuration
7185 + *
7186 + * Return: '0' on Success; Error code otherwise.
7187 + *
7188 + * @warning Allowed only when DPNI is disabled
7189 + */
7190 +int dpni_set_buffer_layout(struct fsl_mc_io *mc_io,
7191 + u32 cmd_flags,
7192 + u16 token,
7193 + enum dpni_queue_type qtype,
7194 + const struct dpni_buffer_layout *layout)
7195 +{
7196 + struct mc_command cmd = { 0 };
7197 + struct dpni_cmd_set_buffer_layout *cmd_params;
7198 +
7199 + /* prepare command */
7200 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_BUFFER_LAYOUT,
7201 + cmd_flags,
7202 + token);
7203 + cmd_params = (struct dpni_cmd_set_buffer_layout *)cmd.params;
7204 + cmd_params->qtype = qtype;
7205 + cmd_params->options = cpu_to_le16(layout->options);
7206 + dpni_set_field(cmd_params->flags, PASS_TS, layout->pass_timestamp);
7207 + dpni_set_field(cmd_params->flags, PASS_PR, layout->pass_parser_result);
7208 + dpni_set_field(cmd_params->flags, PASS_FS, layout->pass_frame_status);
7209 + cmd_params->private_data_size = cpu_to_le16(layout->private_data_size);
7210 + cmd_params->data_align = cpu_to_le16(layout->data_align);
7211 + cmd_params->head_room = cpu_to_le16(layout->data_head_room);
7212 + cmd_params->tail_room = cpu_to_le16(layout->data_tail_room);
7213 +
7214 + /* send command to mc*/
7215 + return mc_send_command(mc_io, &cmd);
7216 +}
7217 +
7218 +/**
7219 + * dpni_set_offload() - Set DPNI offload configuration.
7220 + * @mc_io: Pointer to MC portal's I/O object
7221 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7222 + * @token: Token of DPNI object
7223 + * @type: Type of DPNI offload
7224 + * @config: Offload configuration.
7225 + * For checksum offloads, non-zero value enables the offload
7226 + *
7227 + * Return: '0' on Success; Error code otherwise.
7228 + *
7229 + * @warning Allowed only when DPNI is disabled
7230 + */
7231 +
7232 +int dpni_set_offload(struct fsl_mc_io *mc_io,
7233 + u32 cmd_flags,
7234 + u16 token,
7235 + enum dpni_offload type,
7236 + u32 config)
7237 +{
7238 + struct mc_command cmd = { 0 };
7239 + struct dpni_cmd_set_offload *cmd_params;
7240 +
7241 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_OFFLOAD,
7242 + cmd_flags,
7243 + token);
7244 + cmd_params = (struct dpni_cmd_set_offload *)cmd.params;
7245 + cmd_params->dpni_offload = type;
7246 + cmd_params->config = cpu_to_le32(config);
7247 +
7248 + return mc_send_command(mc_io, &cmd);
7249 +}
7250 +
7251 +int dpni_get_offload(struct fsl_mc_io *mc_io,
7252 + u32 cmd_flags,
7253 + u16 token,
7254 + enum dpni_offload type,
7255 + u32 *config)
7256 +{
7257 + struct mc_command cmd = { 0 };
7258 + struct dpni_cmd_get_offload *cmd_params;
7259 + struct dpni_rsp_get_offload *rsp_params;
7260 + int err;
7261 +
7262 + /* prepare command */
7263 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_OFFLOAD,
7264 + cmd_flags,
7265 + token);
7266 + cmd_params = (struct dpni_cmd_get_offload *)cmd.params;
7267 + cmd_params->dpni_offload = type;
7268 +
7269 + /* send command to mc*/
7270 + err = mc_send_command(mc_io, &cmd);
7271 + if (err)
7272 + return err;
7273 +
7274 + /* retrieve response parameters */
7275 + rsp_params = (struct dpni_rsp_get_offload *)cmd.params;
7276 + *config = le32_to_cpu(rsp_params->config);
7277 +
7278 + return 0;
7279 +}
7280 +
7281 +/**
7282 + * dpni_get_qdid() - Get the Queuing Destination ID (QDID) that should be used
7283 + * for enqueue operations
7284 + * @mc_io: Pointer to MC portal's I/O object
7285 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7286 + * @token: Token of DPNI object
7287 + * @qtype: Type of queue to receive QDID for
7288 + * @qdid: Returned virtual QDID value that should be used as an argument
7289 + * in all enqueue operations
7290 + *
7291 + * Return: '0' on Success; Error code otherwise.
7292 + */
7293 +int dpni_get_qdid(struct fsl_mc_io *mc_io,
7294 + u32 cmd_flags,
7295 + u16 token,
7296 + enum dpni_queue_type qtype,
7297 + u16 *qdid)
7298 +{
7299 + struct mc_command cmd = { 0 };
7300 + struct dpni_cmd_get_qdid *cmd_params;
7301 + struct dpni_rsp_get_qdid *rsp_params;
7302 + int err;
7303 +
7304 + /* prepare command */
7305 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QDID,
7306 + cmd_flags,
7307 + token);
7308 + cmd_params = (struct dpni_cmd_get_qdid *)cmd.params;
7309 + cmd_params->qtype = qtype;
7310 +
7311 + /* send command to mc*/
7312 + err = mc_send_command(mc_io, &cmd);
7313 + if (err)
7314 + return err;
7315 +
7316 + /* retrieve response parameters */
7317 + rsp_params = (struct dpni_rsp_get_qdid *)cmd.params;
7318 + *qdid = le16_to_cpu(rsp_params->qdid);
7319 +
7320 + return 0;
7321 +}
7322 +
7323 +/**
7324 + * dpni_get_tx_data_offset() - Get the Tx data offset (from start of buffer)
7325 + * @mc_io: Pointer to MC portal's I/O object
7326 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7327 + * @token: Token of DPNI object
7328 + * @data_offset: Tx data offset (from start of buffer)
7329 + *
7330 + * Return: '0' on Success; Error code otherwise.
7331 + */
7332 +int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
7333 + u32 cmd_flags,
7334 + u16 token,
7335 + u16 *data_offset)
7336 +{
7337 + struct mc_command cmd = { 0 };
7338 + struct dpni_rsp_get_tx_data_offset *rsp_params;
7339 + int err;
7340 +
7341 + /* prepare command */
7342 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_DATA_OFFSET,
7343 + cmd_flags,
7344 + token);
7345 +
7346 + /* send command to mc*/
7347 + err = mc_send_command(mc_io, &cmd);
7348 + if (err)
7349 + return err;
7350 +
7351 + /* retrieve response parameters */
7352 + rsp_params = (struct dpni_rsp_get_tx_data_offset *)cmd.params;
7353 + *data_offset = le16_to_cpu(rsp_params->data_offset);
7354 +
7355 + return 0;
7356 +}
7357 +
7358 +/**
7359 + * dpni_set_link_cfg() - set the link configuration.
7360 + * @mc_io: Pointer to MC portal's I/O object
7361 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7362 + * @token: Token of DPNI object
7363 + * @cfg: Link configuration
7364 + *
7365 + * Return: '0' on Success; Error code otherwise.
7366 + */
7367 +int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
7368 + u32 cmd_flags,
7369 + u16 token,
7370 + const struct dpni_link_cfg *cfg)
7371 +{
7372 + struct mc_command cmd = { 0 };
7373 + struct dpni_cmd_set_link_cfg *cmd_params;
7374 +
7375 + /* prepare command */
7376 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG,
7377 + cmd_flags,
7378 + token);
7379 + cmd_params = (struct dpni_cmd_set_link_cfg *)cmd.params;
7380 + cmd_params->rate = cpu_to_le32(cfg->rate);
7381 + cmd_params->options = cpu_to_le64(cfg->options);
7382 +
7383 + /* send command to mc*/
7384 + return mc_send_command(mc_io, &cmd);
7385 +}
7386 +
7387 +/**
7388 + * dpni_get_link_state() - Return the link state (either up or down)
7389 + * @mc_io: Pointer to MC portal's I/O object
7390 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7391 + * @token: Token of DPNI object
7392 + * @state: Returned link state;
7393 + *
7394 + * Return: '0' on Success; Error code otherwise.
7395 + */
7396 +int dpni_get_link_state(struct fsl_mc_io *mc_io,
7397 + u32 cmd_flags,
7398 + u16 token,
7399 + struct dpni_link_state *state)
7400 +{
7401 + struct mc_command cmd = { 0 };
7402 + struct dpni_rsp_get_link_state *rsp_params;
7403 + int err;
7404 +
7405 + /* prepare command */
7406 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_STATE,
7407 + cmd_flags,
7408 + token);
7409 +
7410 + /* send command to mc*/
7411 + err = mc_send_command(mc_io, &cmd);
7412 + if (err)
7413 + return err;
7414 +
7415 + /* retrieve response parameters */
7416 + rsp_params = (struct dpni_rsp_get_link_state *)cmd.params;
7417 + state->up = dpni_get_field(rsp_params->flags, LINK_STATE);
7418 + state->rate = le32_to_cpu(rsp_params->rate);
7419 + state->options = le64_to_cpu(rsp_params->options);
7420 +
7421 + return 0;
7422 +}
7423 +
7424 +/**
7425 + * dpni_set_tx_shaping() - Set the transmit shaping
7426 + * @mc_io: Pointer to MC portal's I/O object
7427 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7428 + * @token: Token of DPNI object
7429 + * @tx_shaper: tx shaping configuration
7430 + *
7431 + * Return: '0' on Success; Error code otherwise.
7432 + */
7433 +int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
7434 + u32 cmd_flags,
7435 + u16 token,
7436 + const struct dpni_tx_shaping_cfg *tx_shaper)
7437 +{
7438 + struct mc_command cmd = { 0 };
7439 + struct dpni_cmd_set_tx_shaping *cmd_params;
7440 +
7441 + /* prepare command */
7442 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SHAPING,
7443 + cmd_flags,
7444 + token);
7445 + cmd_params = (struct dpni_cmd_set_tx_shaping *)cmd.params;
7446 + cmd_params->max_burst_size = cpu_to_le16(tx_shaper->max_burst_size);
7447 + cmd_params->rate_limit = cpu_to_le32(tx_shaper->rate_limit);
7448 +
7449 + /* send command to mc*/
7450 + return mc_send_command(mc_io, &cmd);
7451 +}
7452 +
7453 +/**
7454 + * dpni_set_max_frame_length() - Set the maximum received frame length.
7455 + * @mc_io: Pointer to MC portal's I/O object
7456 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7457 + * @token: Token of DPNI object
7458 + * @max_frame_length: Maximum received frame length (in
7459 + * bytes); frame is discarded if its
7460 + * length exceeds this value
7461 + *
7462 + * Return: '0' on Success; Error code otherwise.
7463 + */
7464 +int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
7465 + u32 cmd_flags,
7466 + u16 token,
7467 + u16 max_frame_length)
7468 +{
7469 + struct mc_command cmd = { 0 };
7470 + struct dpni_cmd_set_max_frame_length *cmd_params;
7471 +
7472 + /* prepare command */
7473 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MAX_FRAME_LENGTH,
7474 + cmd_flags,
7475 + token);
7476 + cmd_params = (struct dpni_cmd_set_max_frame_length *)cmd.params;
7477 + cmd_params->max_frame_length = cpu_to_le16(max_frame_length);
7478 +
7479 + /* send command to mc*/
7480 + return mc_send_command(mc_io, &cmd);
7481 +}
7482 +
7483 +/**
7484 + * dpni_get_max_frame_length() - Get the maximum received frame length.
7485 + * @mc_io: Pointer to MC portal's I/O object
7486 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7487 + * @token: Token of DPNI object
7488 + * @max_frame_length: Maximum received frame length (in
7489 + * bytes); frame is discarded if its
7490 + * length exceeds this value
7491 + *
7492 + * Return: '0' on Success; Error code otherwise.
7493 + */
7494 +int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
7495 + u32 cmd_flags,
7496 + u16 token,
7497 + u16 *max_frame_length)
7498 +{
7499 + struct mc_command cmd = { 0 };
7500 + struct dpni_rsp_get_max_frame_length *rsp_params;
7501 + int err;
7502 +
7503 + /* prepare command */
7504 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MAX_FRAME_LENGTH,
7505 + cmd_flags,
7506 + token);
7507 +
7508 + /* send command to mc*/
7509 + err = mc_send_command(mc_io, &cmd);
7510 + if (err)
7511 + return err;
7512 +
7513 + /* retrieve response parameters */
7514 + rsp_params = (struct dpni_rsp_get_max_frame_length *)cmd.params;
7515 + *max_frame_length = le16_to_cpu(rsp_params->max_frame_length);
7516 +
7517 + return 0;
7518 +}
7519 +
7520 +/**
7521 + * dpni_set_multicast_promisc() - Enable/disable multicast promiscuous mode
7522 + * @mc_io: Pointer to MC portal's I/O object
7523 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7524 + * @token: Token of DPNI object
7525 + * @en: Set to '1' to enable; '0' to disable
7526 + *
7527 + * Return: '0' on Success; Error code otherwise.
7528 + */
7529 +int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
7530 + u32 cmd_flags,
7531 + u16 token,
7532 + int en)
7533 +{
7534 + struct mc_command cmd = { 0 };
7535 + struct dpni_cmd_set_multicast_promisc *cmd_params;
7536 +
7537 + /* prepare command */
7538 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MCAST_PROMISC,
7539 + cmd_flags,
7540 + token);
7541 + cmd_params = (struct dpni_cmd_set_multicast_promisc *)cmd.params;
7542 + dpni_set_field(cmd_params->enable, ENABLE, en);
7543 +
7544 + /* send command to mc*/
7545 + return mc_send_command(mc_io, &cmd);
7546 +}
7547 +
7548 +/**
7549 + * dpni_get_multicast_promisc() - Get multicast promiscuous mode
7550 + * @mc_io: Pointer to MC portal's I/O object
7551 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7552 + * @token: Token of DPNI object
7553 + * @en: Returns '1' if enabled; '0' otherwise
7554 + *
7555 + * Return: '0' on Success; Error code otherwise.
7556 + */
7557 +int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
7558 + u32 cmd_flags,
7559 + u16 token,
7560 + int *en)
7561 +{
7562 + struct mc_command cmd = { 0 };
7563 + struct dpni_rsp_get_multicast_promisc *rsp_params;
7564 + int err;
7565 +
7566 + /* prepare command */
7567 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MCAST_PROMISC,
7568 + cmd_flags,
7569 + token);
7570 +
7571 + /* send command to mc*/
7572 + err = mc_send_command(mc_io, &cmd);
7573 + if (err)
7574 + return err;
7575 +
7576 + /* retrieve response parameters */
7577 + rsp_params = (struct dpni_rsp_get_multicast_promisc *)cmd.params;
7578 + *en = dpni_get_field(rsp_params->enabled, ENABLE);
7579 +
7580 + return 0;
7581 +}
7582 +
7583 +/**
7584 + * dpni_set_unicast_promisc() - Enable/disable unicast promiscuous mode
7585 + * @mc_io: Pointer to MC portal's I/O object
7586 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7587 + * @token: Token of DPNI object
7588 + * @en: Set to '1' to enable; '0' to disable
7589 + *
7590 + * Return: '0' on Success; Error code otherwise.
7591 + */
7592 +int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
7593 + u32 cmd_flags,
7594 + u16 token,
7595 + int en)
7596 +{
7597 + struct mc_command cmd = { 0 };
7598 + struct dpni_cmd_set_unicast_promisc *cmd_params;
7599 +
7600 + /* prepare command */
7601 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_UNICAST_PROMISC,
7602 + cmd_flags,
7603 + token);
7604 + cmd_params = (struct dpni_cmd_set_unicast_promisc *)cmd.params;
7605 + dpni_set_field(cmd_params->enable, ENABLE, en);
7606 +
7607 + /* send command to mc*/
7608 + return mc_send_command(mc_io, &cmd);
7609 +}
7610 +
7611 +/**
7612 + * dpni_get_unicast_promisc() - Get unicast promiscuous mode
7613 + * @mc_io: Pointer to MC portal's I/O object
7614 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7615 + * @token: Token of DPNI object
7616 + * @en: Returns '1' if enabled; '0' otherwise
7617 + *
7618 + * Return: '0' on Success; Error code otherwise.
7619 + */
7620 +int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
7621 + u32 cmd_flags,
7622 + u16 token,
7623 + int *en)
7624 +{
7625 + struct mc_command cmd = { 0 };
7626 + struct dpni_rsp_get_unicast_promisc *rsp_params;
7627 + int err;
7628 +
7629 + /* prepare command */
7630 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_UNICAST_PROMISC,
7631 + cmd_flags,
7632 + token);
7633 +
7634 + /* send command to mc*/
7635 + err = mc_send_command(mc_io, &cmd);
7636 + if (err)
7637 + return err;
7638 +
7639 + /* retrieve response parameters */
7640 + rsp_params = (struct dpni_rsp_get_unicast_promisc *)cmd.params;
7641 + *en = dpni_get_field(rsp_params->enabled, ENABLE);
7642 +
7643 + return 0;
7644 +}
7645 +
7646 +/**
7647 + * dpni_set_primary_mac_addr() - Set the primary MAC address
7648 + * @mc_io: Pointer to MC portal's I/O object
7649 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7650 + * @token: Token of DPNI object
7651 + * @mac_addr: MAC address to set as primary address
7652 + *
7653 + * Return: '0' on Success; Error code otherwise.
7654 + */
7655 +int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
7656 + u32 cmd_flags,
7657 + u16 token,
7658 + const u8 mac_addr[6])
7659 +{
7660 + struct mc_command cmd = { 0 };
7661 + struct dpni_cmd_set_primary_mac_addr *cmd_params;
7662 + int i;
7663 +
7664 + /* prepare command */
7665 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_PRIM_MAC,
7666 + cmd_flags,
7667 + token);
7668 + cmd_params = (struct dpni_cmd_set_primary_mac_addr *)cmd.params;
7669 + for (i = 0; i < 6; i++)
7670 + cmd_params->mac_addr[i] = mac_addr[5 - i];
7671 +
7672 + /* send command to mc*/
7673 + return mc_send_command(mc_io, &cmd);
7674 +}
7675 +
7676 +/**
7677 + * dpni_get_primary_mac_addr() - Get the primary MAC address
7678 + * @mc_io: Pointer to MC portal's I/O object
7679 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7680 + * @token: Token of DPNI object
7681 + * @mac_addr: Returned MAC address
7682 + *
7683 + * Return: '0' on Success; Error code otherwise.
7684 + */
7685 +int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
7686 + u32 cmd_flags,
7687 + u16 token,
7688 + u8 mac_addr[6])
7689 +{
7690 + struct mc_command cmd = { 0 };
7691 + struct dpni_rsp_get_primary_mac_addr *rsp_params;
7692 + int i, err;
7693 +
7694 + /* prepare command */
7695 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PRIM_MAC,
7696 + cmd_flags,
7697 + token);
7698 +
7699 + /* send command to mc*/
7700 + err = mc_send_command(mc_io, &cmd);
7701 + if (err)
7702 + return err;
7703 +
7704 + /* retrieve response parameters */
7705 + rsp_params = (struct dpni_rsp_get_primary_mac_addr *)cmd.params;
7706 + for (i = 0; i < 6; i++)
7707 + mac_addr[5 - i] = rsp_params->mac_addr[i];
7708 +
7709 + return 0;
7710 +}
7711 +
7712 +/**
7713 + * dpni_get_port_mac_addr() - Retrieve MAC address associated to the physical
7714 + * port the DPNI is attached to
7715 + * @mc_io: Pointer to MC portal's I/O object
7716 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7717 + * @token: Token of DPNI object
7718 + * @mac_addr: MAC address of the physical port, if any, otherwise 0
7719 + *
7720 + * The primary MAC address is not cleared by this operation.
7721 + *
7722 + * Return: '0' on Success; Error code otherwise.
7723 + */
7724 +int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
7725 + u32 cmd_flags,
7726 + u16 token,
7727 + u8 mac_addr[6])
7728 +{
7729 + struct mc_command cmd = { 0 };
7730 + struct dpni_rsp_get_port_mac_addr *rsp_params;
7731 + int i, err;
7732 +
7733 + /* prepare command */
7734 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PORT_MAC_ADDR,
7735 + cmd_flags,
7736 + token);
7737 +
7738 + /* send command to mc*/
7739 + err = mc_send_command(mc_io, &cmd);
7740 + if (err)
7741 + return err;
7742 +
7743 + /* retrieve response parameters */
7744 + rsp_params = (struct dpni_rsp_get_port_mac_addr *)cmd.params;
7745 + for (i = 0; i < 6; i++)
7746 + mac_addr[5 - i] = rsp_params->mac_addr[i];
7747 +
7748 + return 0;
7749 +}
7750 +
7751 +/**
7752 + * dpni_add_mac_addr() - Add MAC address filter
7753 + * @mc_io: Pointer to MC portal's I/O object
7754 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7755 + * @token: Token of DPNI object
7756 + * @mac_addr: MAC address to add
7757 + *
7758 + * Return: '0' on Success; Error code otherwise.
7759 + */
7760 +int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
7761 + u32 cmd_flags,
7762 + u16 token,
7763 + const u8 mac_addr[6])
7764 +{
7765 + struct mc_command cmd = { 0 };
7766 + struct dpni_cmd_add_mac_addr *cmd_params;
7767 + int i;
7768 +
7769 + /* prepare command */
7770 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_MAC_ADDR,
7771 + cmd_flags,
7772 + token);
7773 + cmd_params = (struct dpni_cmd_add_mac_addr *)cmd.params;
7774 + for (i = 0; i < 6; i++)
7775 + cmd_params->mac_addr[i] = mac_addr[5 - i];
7776 +
7777 + /* send command to mc*/
7778 + return mc_send_command(mc_io, &cmd);
7779 +}
7780 +
7781 +/**
7782 + * dpni_remove_mac_addr() - Remove MAC address filter
7783 + * @mc_io: Pointer to MC portal's I/O object
7784 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7785 + * @token: Token of DPNI object
7786 + * @mac_addr: MAC address to remove
7787 + *
7788 + * Return: '0' on Success; Error code otherwise.
7789 + */
7790 +int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
7791 + u32 cmd_flags,
7792 + u16 token,
7793 + const u8 mac_addr[6])
7794 +{
7795 + struct mc_command cmd = { 0 };
7796 + struct dpni_cmd_remove_mac_addr *cmd_params;
7797 + int i;
7798 +
7799 + /* prepare command */
7800 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_MAC_ADDR,
7801 + cmd_flags,
7802 + token);
7803 + cmd_params = (struct dpni_cmd_remove_mac_addr *)cmd.params;
7804 + for (i = 0; i < 6; i++)
7805 + cmd_params->mac_addr[i] = mac_addr[5 - i];
7806 +
7807 + /* send command to mc*/
7808 + return mc_send_command(mc_io, &cmd);
7809 +}
7810 +
7811 +/**
7812 + * dpni_clear_mac_filters() - Clear all unicast and/or multicast MAC filters
7813 + * @mc_io: Pointer to MC portal's I/O object
7814 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7815 + * @token: Token of DPNI object
7816 + * @unicast: Set to '1' to clear unicast addresses
7817 + * @multicast: Set to '1' to clear multicast addresses
7818 + *
7819 + * The primary MAC address is not cleared by this operation.
7820 + *
7821 + * Return: '0' on Success; Error code otherwise.
7822 + */
7823 +int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
7824 + u32 cmd_flags,
7825 + u16 token,
7826 + int unicast,
7827 + int multicast)
7828 +{
7829 + struct mc_command cmd = { 0 };
7830 + struct dpni_cmd_clear_mac_filters *cmd_params;
7831 +
7832 + /* prepare command */
7833 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_MAC_FILTERS,
7834 + cmd_flags,
7835 + token);
7836 + cmd_params = (struct dpni_cmd_clear_mac_filters *)cmd.params;
7837 + dpni_set_field(cmd_params->flags, UNICAST_FILTERS, unicast);
7838 + dpni_set_field(cmd_params->flags, MULTICAST_FILTERS, multicast);
7839 +
7840 + /* send command to mc*/
7841 + return mc_send_command(mc_io, &cmd);
7842 +}
7843 +
7844 +/**
7845 + * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration
7846 + * @mc_io: Pointer to MC portal's I/O object
7847 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7848 + * @token: Token of DPNI object
7849 + * @tc_id: Traffic class selection (0-7)
7850 + * @cfg: Traffic class distribution configuration
7851 + *
7852 + * warning: if 'dist_mode != DPNI_DIST_MODE_NONE', call dpni_prepare_key_cfg()
7853 + * first to prepare the key_cfg_iova parameter
7854 + *
7855 + * Return: '0' on Success; error code otherwise.
7856 + */
7857 +int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
7858 + u32 cmd_flags,
7859 + u16 token,
7860 + u8 tc_id,
7861 + const struct dpni_rx_tc_dist_cfg *cfg)
7862 +{
7863 + struct mc_command cmd = { 0 };
7864 + struct dpni_cmd_set_rx_tc_dist *cmd_params;
7865 +
7866 + /* prepare command */
7867 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_DIST,
7868 + cmd_flags,
7869 + token);
7870 + cmd_params = (struct dpni_cmd_set_rx_tc_dist *)cmd.params;
7871 + cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
7872 + cmd_params->tc_id = tc_id;
7873 + dpni_set_field(cmd_params->flags, DIST_MODE, cfg->dist_mode);
7874 + dpni_set_field(cmd_params->flags, MISS_ACTION, cfg->fs_cfg.miss_action);
7875 + cmd_params->default_flow_id = cpu_to_le16(cfg->fs_cfg.default_flow_id);
7876 + cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
7877 +
7878 + /* send command to mc*/
7879 + return mc_send_command(mc_io, &cmd);
7880 +}
7881 +
7882 +/**
7883 + * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class
7884 + * (to select a flow ID)
7885 + * @mc_io: Pointer to MC portal's I/O object
7886 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7887 + * @token: Token of DPNI object
7888 + * @tc_id: Traffic class selection (0-7)
7889 + * @index: Location in the QoS table where to insert the entry.
7890 + * Only relevant if MASKING is enabled for QoS
7891 + * classification on this DPNI, it is ignored for exact match.
7892 + * @cfg: Flow steering rule to add
7893 + * @action: Action to be taken as result of a classification hit
7894 + *
7895 + * Return: '0' on Success; Error code otherwise.
7896 + */
7897 +int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
7898 + u32 cmd_flags,
7899 + u16 token,
7900 + u8 tc_id,
7901 + u16 index,
7902 + const struct dpni_rule_cfg *cfg,
7903 + const struct dpni_fs_action_cfg *action)
7904 +{
7905 + struct dpni_cmd_add_fs_entry *cmd_params;
7906 + struct mc_command cmd = { 0 };
7907 +
7908 + /* prepare command */
7909 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT,
7910 + cmd_flags,
7911 + token);
7912 + cmd_params = (struct dpni_cmd_add_fs_entry *)cmd.params;
7913 + cmd_params->tc_id = tc_id;
7914 + cmd_params->key_size = cfg->key_size;
7915 + cmd_params->index = cpu_to_le16(index);
7916 + cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
7917 + cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
7918 + cmd_params->options = cpu_to_le16(action->options);
7919 + cmd_params->flow_id = cpu_to_le16(action->flow_id);
7920 + cmd_params->flc = cpu_to_le64(action->flc);
7921 +
7922 + /* send command to mc*/
7923 + return mc_send_command(mc_io, &cmd);
7924 +}
7925 +
7926 +/**
7927 + * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific
7928 + * traffic class
7929 + * @mc_io: Pointer to MC portal's I/O object
7930 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7931 + * @token: Token of DPNI object
7932 + * @tc_id: Traffic class selection (0-7)
7933 + * @cfg: Flow steering rule to remove
7934 + *
7935 + * Return: '0' on Success; Error code otherwise.
7936 + */
7937 +int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
7938 + u32 cmd_flags,
7939 + u16 token,
7940 + u8 tc_id,
7941 + const struct dpni_rule_cfg *cfg)
7942 +{
7943 + struct dpni_cmd_remove_fs_entry *cmd_params;
7944 + struct mc_command cmd = { 0 };
7945 +
7946 + /* prepare command */
7947 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT,
7948 + cmd_flags,
7949 + token);
7950 + cmd_params = (struct dpni_cmd_remove_fs_entry *)cmd.params;
7951 + cmd_params->tc_id = tc_id;
7952 + cmd_params->key_size = cfg->key_size;
7953 + cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
7954 + cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
7955 +
7956 + /* send command to mc*/
7957 + return mc_send_command(mc_io, &cmd);
7958 +}
7959 +
7960 +/**
7961 + * dpni_set_congestion_notification() - Set traffic class congestion
7962 + * notification configuration
7963 + * @mc_io: Pointer to MC portal's I/O object
7964 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7965 + * @token: Token of DPNI object
7966 + * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported
7967 + * @tc_id: Traffic class selection (0-7)
7968 + * @cfg: congestion notification configuration
7969 + *
7970 + * Return: '0' on Success; error code otherwise.
7971 + */
7972 +int dpni_set_congestion_notification(struct fsl_mc_io *mc_io,
7973 + u32 cmd_flags,
7974 + u16 token,
7975 + enum dpni_queue_type qtype,
7976 + u8 tc_id,
7977 + const struct dpni_congestion_notification_cfg *cfg)
7978 +{
7979 + struct dpni_cmd_set_congestion_notification *cmd_params;
7980 + struct mc_command cmd = { 0 };
7981 +
7982 + /* prepare command */
7983 + cmd.header = mc_encode_cmd_header(
7984 + DPNI_CMDID_SET_CONGESTION_NOTIFICATION,
7985 + cmd_flags,
7986 + token);
7987 + cmd_params = (struct dpni_cmd_set_congestion_notification *)cmd.params;
7988 + cmd_params->qtype = qtype;
7989 + cmd_params->tc = tc_id;
7990 + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
7991 + cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
7992 + cmd_params->dest_priority = cfg->dest_cfg.priority;
7993 + dpni_set_field(cmd_params->type_units, DEST_TYPE,
7994 + cfg->dest_cfg.dest_type);
7995 + dpni_set_field(cmd_params->type_units, CONG_UNITS, cfg->units);
7996 + cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
7997 + cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
7998 + cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
7999 + cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
8000 +
8001 + /* send command to mc*/
8002 + return mc_send_command(mc_io, &cmd);
8003 +}
8004 +
8005 +/**
8006 + * dpni_set_queue() - Set queue parameters
8007 + * @mc_io: Pointer to MC portal's I/O object
8008 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8009 + * @token: Token of DPNI object
8010 + * @qtype: Type of queue - all queue types are supported, although
8011 + * the command is ignored for Tx
8012 + * @tc: Traffic class, in range 0 to NUM_TCS - 1
8013 + * @index: Selects the specific queue out of the set allocated for the
8014 + * same TC. Value must be in range 0 to NUM_QUEUES - 1
8015 + * @options: A combination of DPNI_QUEUE_OPT_ values that control what
8016 + * configuration options are set on the queue
8017 + * @queue: Queue structure
8018 + *
8019 + * Return: '0' on Success; Error code otherwise.
8020 + */
8021 +int dpni_set_queue(struct fsl_mc_io *mc_io,
8022 + u32 cmd_flags,
8023 + u16 token,
8024 + enum dpni_queue_type qtype,
8025 + u8 tc,
8026 + u8 index,
8027 + u8 options,
8028 + const struct dpni_queue *queue)
8029 +{
8030 + struct mc_command cmd = { 0 };
8031 + struct dpni_cmd_set_queue *cmd_params;
8032 +
8033 + /* prepare command */
8034 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QUEUE,
8035 + cmd_flags,
8036 + token);
8037 + cmd_params = (struct dpni_cmd_set_queue *)cmd.params;
8038 + cmd_params->qtype = qtype;
8039 + cmd_params->tc = tc;
8040 + cmd_params->index = index;
8041 + cmd_params->options = options;
8042 + cmd_params->dest_id = cpu_to_le32(queue->destination.id);
8043 + cmd_params->dest_prio = queue->destination.priority;
8044 + dpni_set_field(cmd_params->flags, DEST_TYPE, queue->destination.type);
8045 + dpni_set_field(cmd_params->flags, STASH_CTRL, queue->flc.stash_control);
8046 + dpni_set_field(cmd_params->flags, HOLD_ACTIVE,
8047 + queue->destination.hold_active);
8048 + cmd_params->flc = cpu_to_le64(queue->flc.value);
8049 + cmd_params->user_context = cpu_to_le64(queue->user_context);
8050 +
8051 + /* send command to mc */
8052 + return mc_send_command(mc_io, &cmd);
8053 +}
8054 +
8055 +/**
8056 + * dpni_get_queue() - Get queue parameters
8057 + * @mc_io: Pointer to MC portal's I/O object
8058 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8059 + * @token: Token of DPNI object
8060 + * @qtype: Type of queue - all queue types are supported
8061 + * @tc: Traffic class, in range 0 to NUM_TCS - 1
8062 + * @index: Selects the specific queue out of the set allocated for the
8063 + * same TC. Value must be in range 0 to NUM_QUEUES - 1
8064 + * @queue: Queue configuration structure
8065 + * @qid: Queue identification
8066 + *
8067 + * Return: '0' on Success; Error code otherwise.
8068 + */
8069 +int dpni_get_queue(struct fsl_mc_io *mc_io,
8070 + u32 cmd_flags,
8071 + u16 token,
8072 + enum dpni_queue_type qtype,
8073 + u8 tc,
8074 + u8 index,
8075 + struct dpni_queue *queue,
8076 + struct dpni_queue_id *qid)
8077 +{
8078 + struct mc_command cmd = { 0 };
8079 + struct dpni_cmd_get_queue *cmd_params;
8080 + struct dpni_rsp_get_queue *rsp_params;
8081 + int err;
8082 +
8083 + /* prepare command */
8084 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QUEUE,
8085 + cmd_flags,
8086 + token);
8087 + cmd_params = (struct dpni_cmd_get_queue *)cmd.params;
8088 + cmd_params->qtype = qtype;
8089 + cmd_params->tc = tc;
8090 + cmd_params->index = index;
8091 +
8092 + /* send command to mc */
8093 + err = mc_send_command(mc_io, &cmd);
8094 + if (err)
8095 + return err;
8096 +
8097 + /* retrieve response parameters */
8098 + rsp_params = (struct dpni_rsp_get_queue *)cmd.params;
8099 + queue->destination.id = le32_to_cpu(rsp_params->dest_id);
8100 + queue->destination.priority = rsp_params->dest_prio;
8101 + queue->destination.type = dpni_get_field(rsp_params->flags,
8102 + DEST_TYPE);
8103 + queue->flc.stash_control = dpni_get_field(rsp_params->flags,
8104 + STASH_CTRL);
8105 + queue->destination.hold_active = dpni_get_field(rsp_params->flags,
8106 + HOLD_ACTIVE);
8107 + queue->flc.value = le64_to_cpu(rsp_params->flc);
8108 + queue->user_context = le64_to_cpu(rsp_params->user_context);
8109 + qid->fqid = le32_to_cpu(rsp_params->fqid);
8110 + qid->qdbin = le16_to_cpu(rsp_params->qdbin);
8111 +
8112 + return 0;
8113 +}
8114 +
8115 +/**
8116 + * dpni_get_statistics() - Get DPNI statistics
8117 + * @mc_io: Pointer to MC portal's I/O object
8118 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8119 + * @token: Token of DPNI object
8120 + * @page: Selects the statistics page to retrieve, see
8121 + * DPNI_GET_STATISTICS output. Pages are numbered 0 to 2.
8122 + * @stat: Structure containing the statistics
8123 + *
8124 + * Return: '0' on Success; Error code otherwise.
8125 + */
8126 +int dpni_get_statistics(struct fsl_mc_io *mc_io,
8127 + u32 cmd_flags,
8128 + u16 token,
8129 + u8 page,
8130 + union dpni_statistics *stat)
8131 +{
8132 + struct mc_command cmd = { 0 };
8133 + struct dpni_cmd_get_statistics *cmd_params;
8134 + struct dpni_rsp_get_statistics *rsp_params;
8135 + int i, err;
8136 +
8137 + /* prepare command */
8138 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_STATISTICS,
8139 + cmd_flags,
8140 + token);
8141 + cmd_params = (struct dpni_cmd_get_statistics *)cmd.params;
8142 + cmd_params->page_number = page;
8143 +
8144 + /* send command to mc */
8145 + err = mc_send_command(mc_io, &cmd);
8146 + if (err)
8147 + return err;
8148 +
8149 + /* retrieve response parameters */
8150 + rsp_params = (struct dpni_rsp_get_statistics *)cmd.params;
8151 + for (i = 0; i < DPNI_STATISTICS_CNT; i++)
8152 + stat->raw.counter[i] = le64_to_cpu(rsp_params->counter[i]);
8153 +
8154 + return 0;
8155 +}
8156 +
8157 +/**
8158 + * dpni_reset_statistics() - Clears DPNI statistics
8159 + * @mc_io: Pointer to MC portal's I/O object
8160 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8161 + * @token: Token of DPNI object
8162 + *
8163 + * Return: '0' on Success; Error code otherwise.
8164 + */
8165 +int dpni_reset_statistics(struct fsl_mc_io *mc_io,
8166 + u32 cmd_flags,
8167 + u16 token)
8168 +{
8169 + struct mc_command cmd = { 0 };
8170 +
8171 + /* prepare command */
8172 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET_STATISTICS,
8173 + cmd_flags,
8174 + token);
8175 +
8176 + /* send command to mc*/
8177 + return mc_send_command(mc_io, &cmd);
8178 +}
8179 +
8180 +/**
8181 + * dpni_set_taildrop() - Set taildrop per queue or TC
8182 + * @mc_io: Pointer to MC portal's I/O object
8183 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8184 + * @token: Token of DPNI object
8185 + * @cg_point: Congestion point
8186 + * @q_type: Queue type on which the taildrop is configured.
8187 + * Only Rx queues are supported for now
8188 + * @tc: Traffic class to apply this taildrop to
8189 + * @q_index: Index of the queue if the DPNI supports multiple queues for
8190 + * traffic distribution. Ignored if CONGESTION_POINT is not 0.
8191 + * @taildrop: Taildrop structure
8192 + *
8193 + * Return: '0' on Success; Error code otherwise.
8194 + */
8195 +int dpni_set_taildrop(struct fsl_mc_io *mc_io,
8196 + u32 cmd_flags,
8197 + u16 token,
8198 + enum dpni_congestion_point cg_point,
8199 + enum dpni_queue_type qtype,
8200 + u8 tc,
8201 + u8 index,
8202 + struct dpni_taildrop *taildrop)
8203 +{
8204 + struct mc_command cmd = { 0 };
8205 + struct dpni_cmd_set_taildrop *cmd_params;
8206 +
8207 + /* prepare command */
8208 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TAILDROP,
8209 + cmd_flags,
8210 + token);
8211 + cmd_params = (struct dpni_cmd_set_taildrop *)cmd.params;
8212 + cmd_params->congestion_point = cg_point;
8213 + cmd_params->qtype = qtype;
8214 + cmd_params->tc = tc;
8215 + cmd_params->index = index;
8216 + dpni_set_field(cmd_params->enable, ENABLE, taildrop->enable);
8217 + cmd_params->units = taildrop->units;
8218 + cmd_params->threshold = cpu_to_le32(taildrop->threshold);
8219 +
8220 + /* send command to mc */
8221 + return mc_send_command(mc_io, &cmd);
8222 +}
8223 +
8224 +/**
8225 + * dpni_get_taildrop() - Get taildrop information
8226 + * @mc_io: Pointer to MC portal's I/O object
8227 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8228 + * @token: Token of DPNI object
8229 + * @cg_point: Congestion point
8230 + * @q_type: Queue type on which the taildrop is configured.
8231 + * Only Rx queues are supported for now
8232 + * @tc: Traffic class to apply this taildrop to
8233 + * @q_index: Index of the queue if the DPNI supports multiple queues for
8234 + * traffic distribution. Ignored if CONGESTION_POINT is not 0.
8235 + * @taildrop: Taildrop structure
8236 + *
8237 + * Return: '0' on Success; Error code otherwise.
8238 + */
8239 +int dpni_get_taildrop(struct fsl_mc_io *mc_io,
8240 + u32 cmd_flags,
8241 + u16 token,
8242 + enum dpni_congestion_point cg_point,
8243 + enum dpni_queue_type qtype,
8244 + u8 tc,
8245 + u8 index,
8246 + struct dpni_taildrop *taildrop)
8247 +{
8248 + struct mc_command cmd = { 0 };
8249 + struct dpni_cmd_get_taildrop *cmd_params;
8250 + struct dpni_rsp_get_taildrop *rsp_params;
8251 + int err;
8252 +
8253 + /* prepare command */
8254 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TAILDROP,
8255 + cmd_flags,
8256 + token);
8257 + cmd_params = (struct dpni_cmd_get_taildrop *)cmd.params;
8258 + cmd_params->congestion_point = cg_point;
8259 + cmd_params->qtype = qtype;
8260 + cmd_params->tc = tc;
8261 + cmd_params->index = index;
8262 +
8263 + /* send command to mc */
8264 + err = mc_send_command(mc_io, &cmd);
8265 + if (err)
8266 + return err;
8267 +
8268 + /* retrieve response parameters */
8269 + rsp_params = (struct dpni_rsp_get_taildrop *)cmd.params;
8270 + taildrop->enable = dpni_get_field(rsp_params->enable, ENABLE);
8271 + taildrop->units = rsp_params->units;
8272 + taildrop->threshold = le32_to_cpu(rsp_params->threshold);
8273 +
8274 + return 0;
8275 +}
8276 diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni.h b/drivers/staging/fsl-dpaa2/ethernet/dpni.h
8277 new file mode 100644
8278 index 00000000..600c3574
8279 --- /dev/null
8280 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.h
8281 @@ -0,0 +1,989 @@
8282 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
8283 + * Copyright 2016 NXP
8284 + *
8285 + * Redistribution and use in source and binary forms, with or without
8286 + * modification, are permitted provided that the following conditions are met:
8287 + * * Redistributions of source code must retain the above copyright
8288 + * notice, this list of conditions and the following disclaimer.
8289 + * * Redistributions in binary form must reproduce the above copyright
8290 + * notice, this list of conditions and the following disclaimer in the
8291 + * documentation and/or other materials provided with the distribution.
8292 + * * Neither the name of the above-listed copyright holders nor the
8293 + * names of any contributors may be used to endorse or promote products
8294 + * derived from this software without specific prior written permission.
8295 + *
8296 + *
8297 + * ALTERNATIVELY, this software may be distributed under the terms of the
8298 + * GNU General Public License ("GPL") as published by the Free Software
8299 + * Foundation, either version 2 of that License or (at your option) any
8300 + * later version.
8301 + *
8302 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
8303 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
8304 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
8305 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
8306 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
8307 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
8308 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
8309 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
8310 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
8311 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
8312 + * POSSIBILITY OF SUCH DAMAGE.
8313 + */
8314 +#ifndef __FSL_DPNI_H
8315 +#define __FSL_DPNI_H
8316 +
8317 +#include "dpkg.h"
8318 +
8319 +struct fsl_mc_io;
8320 +
8321 +/**
8322 + * Data Path Network Interface API
8323 + * Contains initialization APIs and runtime control APIs for DPNI
8324 + */
8325 +
8326 +/** General DPNI macros */
8327 +
8328 +/**
8329 + * Maximum number of traffic classes
8330 + */
8331 +#define DPNI_MAX_TC 8
8332 +/**
8333 + * Maximum number of buffer pools per DPNI
8334 + */
8335 +#define DPNI_MAX_DPBP 8
8336 +
8337 +/**
8338 + * All traffic classes considered; see dpni_set_queue()
8339 + */
8340 +#define DPNI_ALL_TCS (u8)(-1)
8341 +/**
8342 + * All flows within traffic class considered; see dpni_set_queue()
8343 + */
8344 +#define DPNI_ALL_TC_FLOWS (u16)(-1)
8345 +/**
8346 + * Generate new flow ID; see dpni_set_queue()
8347 + */
8348 +#define DPNI_NEW_FLOW_ID (u16)(-1)
8349 +
8350 +/**
8351 + * Tx traffic is always released to a buffer pool on transmit, there are no
8352 + * resources allocated to have the frames confirmed back to the source after
8353 + * transmission.
8354 + */
8355 +#define DPNI_OPT_TX_FRM_RELEASE 0x000001
8356 +/**
8357 + * Disables support for MAC address filtering for addresses other than primary
8358 + * MAC address. This affects both unicast and multicast. Promiscuous mode can
8359 + * still be enabled/disabled for both unicast and multicast. If promiscuous mode
8360 + * is disabled, only traffic matching the primary MAC address will be accepted.
8361 + */
8362 +#define DPNI_OPT_NO_MAC_FILTER 0x000002
8363 +/**
8364 + * Allocate policers for this DPNI. They can be used to rate-limit traffic per
8365 + * traffic class (TC) basis.
8366 + */
8367 +#define DPNI_OPT_HAS_POLICING 0x000004
8368 +/**
8369 + * Congestion can be managed in several ways, allowing the buffer pool to
8370 + * deplete on ingress, taildrop on each queue or use congestion groups for sets
8371 + * of queues. If set, it configures a single congestion groups across all TCs.
8372 + * If reset, a congestion group is allocated for each TC. Only relevant if the
8373 + * DPNI has multiple traffic classes.
8374 + */
8375 +#define DPNI_OPT_SHARED_CONGESTION 0x000008
8376 +/**
8377 + * Enables TCAM for Flow Steering and QoS look-ups. If not specified, all
8378 + * look-ups are exact match. Note that TCAM is not available on LS1088 and its
8379 + * variants. Setting this bit on these SoCs will trigger an error.
8380 + */
8381 +#define DPNI_OPT_HAS_KEY_MASKING 0x000010
8382 +/**
8383 + * Disables the flow steering table.
8384 + */
8385 +#define DPNI_OPT_NO_FS 0x000020
8386 +
8387 +int dpni_open(struct fsl_mc_io *mc_io,
8388 + u32 cmd_flags,
8389 + int dpni_id,
8390 + u16 *token);
8391 +
8392 +int dpni_close(struct fsl_mc_io *mc_io,
8393 + u32 cmd_flags,
8394 + u16 token);
8395 +
8396 +/**
8397 + * struct dpni_pools_cfg - Structure representing buffer pools configuration
8398 + * @num_dpbp: Number of DPBPs
8399 + * @pools: Array of buffer pools parameters; The number of valid entries
8400 + * must match 'num_dpbp' value
8401 + */
8402 +struct dpni_pools_cfg {
8403 + u8 num_dpbp;
8404 + /**
8405 + * struct pools - Buffer pools parameters
8406 + * @dpbp_id: DPBP object ID
8407 + * @buffer_size: Buffer size
8408 + * @backup_pool: Backup pool
8409 + */
8410 + struct {
8411 + int dpbp_id;
8412 + u16 buffer_size;
8413 + int backup_pool;
8414 + } pools[DPNI_MAX_DPBP];
8415 +};
8416 +
8417 +int dpni_set_pools(struct fsl_mc_io *mc_io,
8418 + u32 cmd_flags,
8419 + u16 token,
8420 + const struct dpni_pools_cfg *cfg);
8421 +
8422 +int dpni_enable(struct fsl_mc_io *mc_io,
8423 + u32 cmd_flags,
8424 + u16 token);
8425 +
8426 +int dpni_disable(struct fsl_mc_io *mc_io,
8427 + u32 cmd_flags,
8428 + u16 token);
8429 +
8430 +int dpni_is_enabled(struct fsl_mc_io *mc_io,
8431 + u32 cmd_flags,
8432 + u16 token,
8433 + int *en);
8434 +
8435 +int dpni_reset(struct fsl_mc_io *mc_io,
8436 + u32 cmd_flags,
8437 + u16 token);
8438 +
8439 +/**
8440 + * DPNI IRQ Index and Events
8441 + */
8442 +
8443 +/**
8444 + * IRQ index
8445 + */
8446 +#define DPNI_IRQ_INDEX 0
8447 +/**
8448 + * IRQ event - indicates a change in link state
8449 + */
8450 +#define DPNI_IRQ_EVENT_LINK_CHANGED 0x00000001
8451 +
8452 +int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
8453 + u32 cmd_flags,
8454 + u16 token,
8455 + u8 irq_index,
8456 + u8 en);
8457 +
8458 +int dpni_get_irq_enable(struct fsl_mc_io *mc_io,
8459 + u32 cmd_flags,
8460 + u16 token,
8461 + u8 irq_index,
8462 + u8 *en);
8463 +
8464 +int dpni_set_irq_mask(struct fsl_mc_io *mc_io,
8465 + u32 cmd_flags,
8466 + u16 token,
8467 + u8 irq_index,
8468 + u32 mask);
8469 +
8470 +int dpni_get_irq_mask(struct fsl_mc_io *mc_io,
8471 + u32 cmd_flags,
8472 + u16 token,
8473 + u8 irq_index,
8474 + u32 *mask);
8475 +
8476 +int dpni_get_irq_status(struct fsl_mc_io *mc_io,
8477 + u32 cmd_flags,
8478 + u16 token,
8479 + u8 irq_index,
8480 + u32 *status);
8481 +
8482 +int dpni_clear_irq_status(struct fsl_mc_io *mc_io,
8483 + u32 cmd_flags,
8484 + u16 token,
8485 + u8 irq_index,
8486 + u32 status);
8487 +
8488 +/**
8489 + * struct dpni_attr - Structure representing DPNI attributes
8490 + * @options: Any combination of the following options:
8491 + * DPNI_OPT_TX_FRM_RELEASE
8492 + * DPNI_OPT_NO_MAC_FILTER
8493 + * DPNI_OPT_HAS_POLICING
8494 + * DPNI_OPT_SHARED_CONGESTION
8495 + * DPNI_OPT_HAS_KEY_MASKING
8496 + * DPNI_OPT_NO_FS
8497 + * @num_queues: Number of Tx and Rx queues used for traffic distribution.
8498 + * @num_tcs: Number of traffic classes (TCs), reserved for the DPNI.
8499 + * @mac_filter_entries: Number of entries in the MAC address filtering table.
8500 + * @vlan_filter_entries: Number of entries in the VLAN address filtering table.
8501 + * @qos_entries: Number of entries in the QoS classification table.
8502 + * @fs_entries: Number of entries in the flow steering table.
8503 + * @qos_key_size: Size, in bytes, of the QoS look-up key. Defining a key larger
8504 + * than this when adding QoS entries will result in an error.
8505 + * @fs_key_size: Size, in bytes, of the flow steering look-up key. Defining a
8506 + * key larger than this when composing the hash + FS key will
8507 + * result in an error.
8508 + * @wriop_version: Version of WRIOP HW block. The 3 version values are stored
8509 + * on 6, 5, 5 bits respectively.
8510 + */
8511 +struct dpni_attr {
8512 + u32 options;
8513 + u8 num_queues;
8514 + u8 num_tcs;
8515 + u8 mac_filter_entries;
8516 + u8 vlan_filter_entries;
8517 + u8 qos_entries;
8518 + u16 fs_entries;
8519 + u8 qos_key_size;
8520 + u8 fs_key_size;
8521 + u16 wriop_version;
8522 +};
8523 +
8524 +int dpni_get_attributes(struct fsl_mc_io *mc_io,
8525 + u32 cmd_flags,
8526 + u16 token,
8527 + struct dpni_attr *attr);
8528 +
8529 +/**
8530 + * DPNI errors
8531 + */
8532 +
8533 +/**
8534 + * Extract out of frame header error
8535 + */
8536 +#define DPNI_ERROR_EOFHE 0x00020000
8537 +/**
8538 + * Frame length error
8539 + */
8540 +#define DPNI_ERROR_FLE 0x00002000
8541 +/**
8542 + * Frame physical error
8543 + */
8544 +#define DPNI_ERROR_FPE 0x00001000
8545 +/**
8546 + * Parsing header error
8547 + */
8548 +#define DPNI_ERROR_PHE 0x00000020
8549 +/**
8550 + * Parser L3 checksum error
8551 + */
8552 +#define DPNI_ERROR_L3CE 0x00000004
8553 +/**
8554 + * Parser L3 checksum error
8555 + */
8556 +#define DPNI_ERROR_L4CE 0x00000001
8557 +
8558 +/**
8559 + * enum dpni_error_action - Defines DPNI behavior for errors
8560 + * @DPNI_ERROR_ACTION_DISCARD: Discard the frame
8561 + * @DPNI_ERROR_ACTION_CONTINUE: Continue with the normal flow
8562 + * @DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE: Send the frame to the error queue
8563 + */
8564 +enum dpni_error_action {
8565 + DPNI_ERROR_ACTION_DISCARD = 0,
8566 + DPNI_ERROR_ACTION_CONTINUE = 1,
8567 + DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE = 2
8568 +};
8569 +
8570 +/**
8571 + * struct dpni_error_cfg - Structure representing DPNI errors treatment
8572 + * @errors: Errors mask; use 'DPNI_ERROR__<X>
8573 + * @error_action: The desired action for the errors mask
8574 + * @set_frame_annotation: Set to '1' to mark the errors in frame annotation
8575 + * status (FAS); relevant only for the non-discard action
8576 + */
8577 +struct dpni_error_cfg {
8578 + u32 errors;
8579 + enum dpni_error_action error_action;
8580 + int set_frame_annotation;
8581 +};
8582 +
8583 +int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
8584 + u32 cmd_flags,
8585 + u16 token,
8586 + struct dpni_error_cfg *cfg);
8587 +
8588 +/**
8589 + * DPNI buffer layout modification options
8590 + */
8591 +
8592 +/**
8593 + * Select to modify the time-stamp setting
8594 + */
8595 +#define DPNI_BUF_LAYOUT_OPT_TIMESTAMP 0x00000001
8596 +/**
8597 + * Select to modify the parser-result setting; not applicable for Tx
8598 + */
8599 +#define DPNI_BUF_LAYOUT_OPT_PARSER_RESULT 0x00000002
8600 +/**
8601 + * Select to modify the frame-status setting
8602 + */
8603 +#define DPNI_BUF_LAYOUT_OPT_FRAME_STATUS 0x00000004
8604 +/**
8605 + * Select to modify the private-data-size setting
8606 + */
8607 +#define DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE 0x00000008
8608 +/**
8609 + * Select to modify the data-alignment setting
8610 + */
8611 +#define DPNI_BUF_LAYOUT_OPT_DATA_ALIGN 0x00000010
8612 +/**
8613 + * Select to modify the data-head-room setting
8614 + */
8615 +#define DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM 0x00000020
8616 +/**
8617 + * Select to modify the data-tail-room setting
8618 + */
8619 +#define DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM 0x00000040
8620 +
8621 +/**
8622 + * struct dpni_buffer_layout - Structure representing DPNI buffer layout
8623 + * @options: Flags representing the suggested modifications to the buffer
8624 + * layout; Use any combination of 'DPNI_BUF_LAYOUT_OPT_<X>' flags
8625 + * @pass_timestamp: Pass timestamp value
8626 + * @pass_parser_result: Pass parser results
8627 + * @pass_frame_status: Pass frame status
8628 + * @private_data_size: Size kept for private data (in bytes)
8629 + * @data_align: Data alignment
8630 + * @data_head_room: Data head room
8631 + * @data_tail_room: Data tail room
8632 + */
8633 +struct dpni_buffer_layout {
8634 + u32 options;
8635 + int pass_timestamp;
8636 + int pass_parser_result;
8637 + int pass_frame_status;
8638 + u16 private_data_size;
8639 + u16 data_align;
8640 + u16 data_head_room;
8641 + u16 data_tail_room;
8642 +};
8643 +
8644 +/**
8645 + * enum dpni_queue_type - Identifies a type of queue targeted by the command
8646 + * @DPNI_QUEUE_RX: Rx queue
8647 + * @DPNI_QUEUE_TX: Tx queue
8648 + * @DPNI_QUEUE_TX_CONFIRM: Tx confirmation queue
8649 + * @DPNI_QUEUE_RX_ERR: Rx error queue
8650 + */enum dpni_queue_type {
8651 + DPNI_QUEUE_RX,
8652 + DPNI_QUEUE_TX,
8653 + DPNI_QUEUE_TX_CONFIRM,
8654 + DPNI_QUEUE_RX_ERR,
8655 +};
8656 +
8657 +int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
8658 + u32 cmd_flags,
8659 + u16 token,
8660 + enum dpni_queue_type qtype,
8661 + struct dpni_buffer_layout *layout);
8662 +
8663 +int dpni_set_buffer_layout(struct fsl_mc_io *mc_io,
8664 + u32 cmd_flags,
8665 + u16 token,
8666 + enum dpni_queue_type qtype,
8667 + const struct dpni_buffer_layout *layout);
8668 +
8669 +/**
8670 + * enum dpni_offload - Identifies a type of offload targeted by the command
8671 + * @DPNI_OFF_RX_L3_CSUM: Rx L3 checksum validation
8672 + * @DPNI_OFF_RX_L4_CSUM: Rx L4 checksum validation
8673 + * @DPNI_OFF_TX_L3_CSUM: Tx L3 checksum generation
8674 + * @DPNI_OFF_TX_L4_CSUM: Tx L4 checksum generation
8675 + */
8676 +enum dpni_offload {
8677 + DPNI_OFF_RX_L3_CSUM,
8678 + DPNI_OFF_RX_L4_CSUM,
8679 + DPNI_OFF_TX_L3_CSUM,
8680 + DPNI_OFF_TX_L4_CSUM,
8681 +};
8682 +
8683 +int dpni_set_offload(struct fsl_mc_io *mc_io,
8684 + u32 cmd_flags,
8685 + u16 token,
8686 + enum dpni_offload type,
8687 + u32 config);
8688 +
8689 +int dpni_get_offload(struct fsl_mc_io *mc_io,
8690 + u32 cmd_flags,
8691 + u16 token,
8692 + enum dpni_offload type,
8693 + u32 *config);
8694 +
8695 +int dpni_get_qdid(struct fsl_mc_io *mc_io,
8696 + u32 cmd_flags,
8697 + u16 token,
8698 + enum dpni_queue_type qtype,
8699 + u16 *qdid);
8700 +
8701 +int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
8702 + u32 cmd_flags,
8703 + u16 token,
8704 + u16 *data_offset);
8705 +
8706 +#define DPNI_STATISTICS_CNT 7
8707 +
8708 +union dpni_statistics {
8709 + /**
8710 + * struct page_0 - Page_0 statistics structure
8711 + * @ingress_all_frames: Ingress frame count
8712 + * @ingress_all_bytes: Ingress byte count
8713 + * @ingress_multicast_frames: Ingress multicast frame count
8714 + * @ingress_multicast_bytes: Ingress multicast byte count
8715 + * @ingress_broadcast_frames: Ingress broadcast frame count
8716 + * @ingress_broadcast_bytes: Ingress broadcast byte count
8717 + */
8718 + struct {
8719 + u64 ingress_all_frames;
8720 + u64 ingress_all_bytes;
8721 + u64 ingress_multicast_frames;
8722 + u64 ingress_multicast_bytes;
8723 + u64 ingress_broadcast_frames;
8724 + u64 ingress_broadcast_bytes;
8725 + } page_0;
8726 + /**
8727 + * struct page_1 - Page_1 statistics structure
8728 + * @egress_all_frames: Egress frame count
8729 + * @egress_all_bytes: Egress byte count
8730 + * @egress_multicast_frames: Egress multicast frame count
8731 + * @egress_multicast_bytes: Egress multicast byte count
8732 + * @egress_broadcast_frames: Egress broadcast frame count
8733 + * @egress_broadcast_bytes: Egress broadcast byte count
8734 + */
8735 + struct {
8736 + u64 egress_all_frames;
8737 + u64 egress_all_bytes;
8738 + u64 egress_multicast_frames;
8739 + u64 egress_multicast_bytes;
8740 + u64 egress_broadcast_frames;
8741 + u64 egress_broadcast_bytes;
8742 + } page_1;
8743 + /**
8744 + * struct page_2 - Page_2 statistics structure
8745 + * @ingress_filtered_frames: Ingress filtered frame count
8746 + * @ingress_discarded_frames: Ingress discarded frame count
8747 + * @ingress_nobuffer_discards: Ingress discarded frame count
8748 + * due to lack of buffers
8749 + * @egress_discarded_frames: Egress discarded frame count
8750 + * @egress_confirmed_frames: Egress confirmed frame count
8751 + */
8752 + struct {
8753 + u64 ingress_filtered_frames;
8754 + u64 ingress_discarded_frames;
8755 + u64 ingress_nobuffer_discards;
8756 + u64 egress_discarded_frames;
8757 + u64 egress_confirmed_frames;
8758 + } page_2;
8759 + /**
8760 + * struct raw - raw statistics structure
8761 + */
8762 + struct {
8763 + u64 counter[DPNI_STATISTICS_CNT];
8764 + } raw;
8765 +};
8766 +
8767 +int dpni_get_statistics(struct fsl_mc_io *mc_io,
8768 + u32 cmd_flags,
8769 + u16 token,
8770 + u8 page,
8771 + union dpni_statistics *stat);
8772 +
8773 +int dpni_reset_statistics(struct fsl_mc_io *mc_io,
8774 + u32 cmd_flags,
8775 + u16 token);
8776 +
8777 +/**
8778 + * Enable auto-negotiation
8779 + */
8780 +#define DPNI_LINK_OPT_AUTONEG 0x0000000000000001ULL
8781 +/**
8782 + * Enable half-duplex mode
8783 + */
8784 +#define DPNI_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
8785 +/**
8786 + * Enable pause frames
8787 + */
8788 +#define DPNI_LINK_OPT_PAUSE 0x0000000000000004ULL
8789 +/**
8790 + * Enable a-symmetric pause frames
8791 + */
8792 +#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
8793 +
8794 +/**
8795 + * struct - Structure representing DPNI link configuration
8796 + * @rate: Rate
8797 + * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
8798 + */
8799 +struct dpni_link_cfg {
8800 + u32 rate;
8801 + u64 options;
8802 +};
8803 +
8804 +int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
8805 + u32 cmd_flags,
8806 + u16 token,
8807 + const struct dpni_link_cfg *cfg);
8808 +
8809 +/**
8810 + * struct dpni_link_state - Structure representing DPNI link state
8811 + * @rate: Rate
8812 + * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
8813 + * @up: Link state; '0' for down, '1' for up
8814 + */
8815 +struct dpni_link_state {
8816 + u32 rate;
8817 + u64 options;
8818 + int up;
8819 +};
8820 +
8821 +int dpni_get_link_state(struct fsl_mc_io *mc_io,
8822 + u32 cmd_flags,
8823 + u16 token,
8824 + struct dpni_link_state *state);
8825 +
8826 +/**
8827 + * struct dpni_tx_shaping - Structure representing DPNI tx shaping configuration
8828 + * @rate_limit: rate in Mbps
8829 + * @max_burst_size: burst size in bytes (up to 64KB)
8830 + */
8831 +struct dpni_tx_shaping_cfg {
8832 + u32 rate_limit;
8833 + u16 max_burst_size;
8834 +};
8835 +
8836 +int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
8837 + u32 cmd_flags,
8838 + u16 token,
8839 + const struct dpni_tx_shaping_cfg *tx_shaper);
8840 +
8841 +int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
8842 + u32 cmd_flags,
8843 + u16 token,
8844 + u16 max_frame_length);
8845 +
8846 +int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
8847 + u32 cmd_flags,
8848 + u16 token,
8849 + u16 *max_frame_length);
8850 +
8851 +int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
8852 + u32 cmd_flags,
8853 + u16 token,
8854 + int en);
8855 +
8856 +int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
8857 + u32 cmd_flags,
8858 + u16 token,
8859 + int *en);
8860 +
8861 +int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
8862 + u32 cmd_flags,
8863 + u16 token,
8864 + int en);
8865 +
8866 +int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
8867 + u32 cmd_flags,
8868 + u16 token,
8869 + int *en);
8870 +
8871 +int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
8872 + u32 cmd_flags,
8873 + u16 token,
8874 + const u8 mac_addr[6]);
8875 +
8876 +int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
8877 + u32 cmd_flags,
8878 + u16 token,
8879 + u8 mac_addr[6]);
8880 +
8881 +int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
8882 + u32 cm_flags,
8883 + u16 token,
8884 + u8 mac_addr[6]);
8885 +
8886 +int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
8887 + u32 cmd_flags,
8888 + u16 token,
8889 + const u8 mac_addr[6]);
8890 +
8891 +int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
8892 + u32 cmd_flags,
8893 + u16 token,
8894 + const u8 mac_addr[6]);
8895 +
8896 +int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
8897 + u32 cmd_flags,
8898 + u16 token,
8899 + int unicast,
8900 + int multicast);
8901 +
8902 +/**
8903 + * enum dpni_dist_mode - DPNI distribution mode
8904 + * @DPNI_DIST_MODE_NONE: No distribution
8905 + * @DPNI_DIST_MODE_HASH: Use hash distribution; only relevant if
8906 + * the 'DPNI_OPT_DIST_HASH' option was set at DPNI creation
8907 + * @DPNI_DIST_MODE_FS: Use explicit flow steering; only relevant if
8908 + * the 'DPNI_OPT_DIST_FS' option was set at DPNI creation
8909 + */
8910 +enum dpni_dist_mode {
8911 + DPNI_DIST_MODE_NONE = 0,
8912 + DPNI_DIST_MODE_HASH = 1,
8913 + DPNI_DIST_MODE_FS = 2
8914 +};
8915 +
8916 +/**
8917 + * enum dpni_fs_miss_action - DPNI Flow Steering miss action
8918 + * @DPNI_FS_MISS_DROP: In case of no-match, drop the frame
8919 + * @DPNI_FS_MISS_EXPLICIT_FLOWID: In case of no-match, use explicit flow-id
8920 + * @DPNI_FS_MISS_HASH: In case of no-match, distribute using hash
8921 + */
8922 +enum dpni_fs_miss_action {
8923 + DPNI_FS_MISS_DROP = 0,
8924 + DPNI_FS_MISS_EXPLICIT_FLOWID = 1,
8925 + DPNI_FS_MISS_HASH = 2
8926 +};
8927 +
8928 +/**
8929 + * struct dpni_fs_tbl_cfg - Flow Steering table configuration
8930 + * @miss_action: Miss action selection
8931 + * @default_flow_id: Used when 'miss_action = DPNI_FS_MISS_EXPLICIT_FLOWID'
8932 + */
8933 +struct dpni_fs_tbl_cfg {
8934 + enum dpni_fs_miss_action miss_action;
8935 + u16 default_flow_id;
8936 +};
8937 +
8938 +int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg,
8939 + u8 *key_cfg_buf);
8940 +
8941 +/**
8942 + * struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration
8943 + * @dist_size: Set the distribution size;
8944 + * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96,
8945 + * 112,128,192,224,256,384,448,512,768,896,1024
8946 + * @dist_mode: Distribution mode
8947 + * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
8948 + * the extractions to be used for the distribution key by calling
8949 + * dpni_prepare_key_cfg() relevant only when
8950 + * 'dist_mode != DPNI_DIST_MODE_NONE', otherwise it can be '0'
8951 + * @fs_cfg: Flow Steering table configuration; only relevant if
8952 + * 'dist_mode = DPNI_DIST_MODE_FS'
8953 + */
8954 +struct dpni_rx_tc_dist_cfg {
8955 + u16 dist_size;
8956 + enum dpni_dist_mode dist_mode;
8957 + u64 key_cfg_iova;
8958 + struct dpni_fs_tbl_cfg fs_cfg;
8959 +};
8960 +
8961 +int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
8962 + u32 cmd_flags,
8963 + u16 token,
8964 + u8 tc_id,
8965 + const struct dpni_rx_tc_dist_cfg *cfg);
8966 +
8967 +/**
8968 + * enum dpni_dest - DPNI destination types
8969 + * @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and
8970 + * does not generate FQDAN notifications; user is expected to
8971 + * dequeue from the queue based on polling or other user-defined
8972 + * method
8973 + * @DPNI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
8974 + * notifications to the specified DPIO; user is expected to dequeue
8975 + * from the queue only after notification is received
8976 + * @DPNI_DEST_DPCON: The queue is set in schedule mode and does not generate
8977 + * FQDAN notifications, but is connected to the specified DPCON
8978 + * object; user is expected to dequeue from the DPCON channel
8979 + */
8980 +enum dpni_dest {
8981 + DPNI_DEST_NONE = 0,
8982 + DPNI_DEST_DPIO = 1,
8983 + DPNI_DEST_DPCON = 2
8984 +};
8985 +
8986 +/**
8987 + * struct dpni_queue - Queue structure
8988 + * @user_context: User data, presented to the user along with any frames from
8989 + * this queue. Not relevant for Tx queues.
8990 + */
8991 +struct dpni_queue {
8992 +/**
8993 + * struct destination - Destination structure
8994 + * @id: ID of the destination, only relevant if DEST_TYPE is > 0.
8995 + * Identifies either a DPIO or a DPCON object. Not relevant for
8996 + * Tx queues.
8997 + * @type: May be one of the following:
8998 + * 0 - No destination, queue can be manually queried, but will not
8999 + * push traffic or notifications to a DPIO;
9000 + * 1 - The destination is a DPIO. When traffic becomes available in
9001 + * the queue a FQDAN (FQ data available notification) will be
9002 + * generated to selected DPIO;
9003 + * 2 - The destination is a DPCON. The queue is associated with a
9004 + * DPCON object for the purpose of scheduling between multiple
9005 + * queues. The DPCON may be independently configured to
9006 + * generate notifications. Not relevant for Tx queues.
9007 + * @hold_active: Hold active, maintains a queue scheduled for longer
9008 + * in a DPIO during dequeue to reduce spread of traffic.
9009 + * Only relevant if queues are not affined to a single DPIO.
9010 + */
9011 + struct {
9012 + u16 id;
9013 + enum dpni_dest type;
9014 + char hold_active;
9015 + u8 priority;
9016 + } destination;
9017 + u64 user_context;
9018 + struct {
9019 + u64 value;
9020 + char stash_control;
9021 + } flc;
9022 +};
9023 +
9024 +/**
9025 + * struct dpni_queue_id - Queue identification, used for enqueue commands
9026 + * or queue control
9027 + * @fqid: FQID used for enqueueing to and/or configuration of this specific FQ
9028 + * @qdbin: Queueing bin, used to enqueue using QDID, DQBIN, QPRI. Only relevant
9029 + * for Tx queues.
9030 + */
9031 +struct dpni_queue_id {
9032 + u32 fqid;
9033 + u16 qdbin;
9034 +};
9035 +
9036 +/**
9037 + * Set User Context
9038 + */
9039 +#define DPNI_QUEUE_OPT_USER_CTX 0x00000001
9040 +#define DPNI_QUEUE_OPT_DEST 0x00000002
9041 +#define DPNI_QUEUE_OPT_FLC 0x00000004
9042 +#define DPNI_QUEUE_OPT_HOLD_ACTIVE 0x00000008
9043 +
9044 +int dpni_set_queue(struct fsl_mc_io *mc_io,
9045 + u32 cmd_flags,
9046 + u16 token,
9047 + enum dpni_queue_type qtype,
9048 + u8 tc,
9049 + u8 index,
9050 + u8 options,
9051 + const struct dpni_queue *queue);
9052 +
9053 +int dpni_get_queue(struct fsl_mc_io *mc_io,
9054 + u32 cmd_flags,
9055 + u16 token,
9056 + enum dpni_queue_type qtype,
9057 + u8 tc,
9058 + u8 index,
9059 + struct dpni_queue *queue,
9060 + struct dpni_queue_id *qid);
9061 +
9062 +/**
9063 + * enum dpni_congestion_unit - DPNI congestion units
9064 + * @DPNI_CONGESTION_UNIT_BYTES: bytes units
9065 + * @DPNI_CONGESTION_UNIT_FRAMES: frames units
9066 + */
9067 +enum dpni_congestion_unit {
9068 + DPNI_CONGESTION_UNIT_BYTES = 0,
9069 + DPNI_CONGESTION_UNIT_FRAMES
9070 +};
9071 +
9072 +/**
9073 + * enum dpni_congestion_point - Structure representing congestion point
9074 + * @DPNI_CP_QUEUE: Set taildrop per queue, identified by QUEUE_TYPE, TC and
9075 + * QUEUE_INDEX
9076 + * @DPNI_CP_GROUP: Set taildrop per queue group. Depending on options used to
9077 + * define the DPNI this can be either per TC (default) or per
9078 + * interface (DPNI_OPT_SHARED_CONGESTION set at DPNI create).
9079 + * QUEUE_INDEX is ignored if this type is used.
9080 + */
9081 +enum dpni_congestion_point {
9082 + DPNI_CP_QUEUE,
9083 + DPNI_CP_GROUP,
9084 +};
9085 +
9086 +/**
9087 + * struct dpni_dest_cfg - Structure representing DPNI destination parameters
9088 + * @dest_type: Destination type
9089 + * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
9090 + * @priority: Priority selection within the DPIO or DPCON channel; valid values
9091 + * are 0-1 or 0-7, depending on the number of priorities in that
9092 + * channel; not relevant for 'DPNI_DEST_NONE' option
9093 + */
9094 +struct dpni_dest_cfg {
9095 + enum dpni_dest dest_type;
9096 + int dest_id;
9097 + u8 priority;
9098 +};
9099 +
9100 +/* DPNI congestion options */
9101 +
9102 +/**
9103 + * CSCN message is written to message_iova once entering a
9104 + * congestion state (see 'threshold_entry')
9105 + */
9106 +#define DPNI_CONG_OPT_WRITE_MEM_ON_ENTER 0x00000001
9107 +/**
9108 + * CSCN message is written to message_iova once exiting a
9109 + * congestion state (see 'threshold_exit')
9110 + */
9111 +#define DPNI_CONG_OPT_WRITE_MEM_ON_EXIT 0x00000002
9112 +/**
9113 + * CSCN write will attempt to allocate into a cache (coherent write);
9114 + * valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>' is selected
9115 + */
9116 +#define DPNI_CONG_OPT_COHERENT_WRITE 0x00000004
9117 +/**
9118 + * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
9119 + * DPIO/DPCON's WQ channel once entering a congestion state
9120 + * (see 'threshold_entry')
9121 + */
9122 +#define DPNI_CONG_OPT_NOTIFY_DEST_ON_ENTER 0x00000008
9123 +/**
9124 + * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
9125 + * DPIO/DPCON's WQ channel once exiting a congestion state
9126 + * (see 'threshold_exit')
9127 + */
9128 +#define DPNI_CONG_OPT_NOTIFY_DEST_ON_EXIT 0x00000010
9129 +/**
9130 + * if 'dest_cfg.dest_type != DPNI_DEST_NONE' when the CSCN is written to the
9131 + * sw-portal's DQRR, the DQRI interrupt is asserted immediately (if enabled)
9132 + */
9133 +#define DPNI_CONG_OPT_INTR_COALESCING_DISABLED 0x00000020
9134 +
9135 +/**
9136 + * struct dpni_congestion_notification_cfg - congestion notification
9137 + * configuration
9138 + * @units: units type
9139 + * @threshold_entry: above this threshold we enter a congestion state.
9140 + * set it to '0' to disable it
9141 + * @threshold_exit: below this threshold we exit the congestion state.
9142 + * @message_ctx: The context that will be part of the CSCN message
9143 + * @message_iova: I/O virtual address (must be in DMA-able memory),
9144 + * must be 16B aligned; valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>' is
9145 + * contained in 'options'
9146 + * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
9147 + * @notification_mode: Mask of available options; use 'DPNI_CONG_OPT_<X>' values
9148 + */
9149 +
9150 +struct dpni_congestion_notification_cfg {
9151 + enum dpni_congestion_unit units;
9152 + u32 threshold_entry;
9153 + u32 threshold_exit;
9154 + u64 message_ctx;
9155 + u64 message_iova;
9156 + struct dpni_dest_cfg dest_cfg;
9157 + u16 notification_mode;
9158 +};
9159 +
9160 +int dpni_set_congestion_notification(struct fsl_mc_io *mc_io,
9161 + u32 cmd_flags,
9162 + u16 token,
9163 + enum dpni_queue_type qtype,
9164 + u8 tc_id,
9165 + const struct dpni_congestion_notification_cfg *cfg);
9166 +
9167 +/**
9168 + * struct dpni_taildrop - Structure representing the taildrop
9169 + * @enable: Indicates whether the taildrop is active or not.
9170 + * @units: Indicates the unit of THRESHOLD. Queue taildrop only supports
9171 + * byte units, this field is ignored and assumed = 0 if
9172 + * CONGESTION_POINT is 0.
9173 + * @threshold: Threshold value, in units identified by UNITS field. Value 0
9174 + * cannot be used as a valid taildrop threshold, THRESHOLD must
9175 + * be > 0 if the taildrop is enabled.
9176 + */
9177 +struct dpni_taildrop {
9178 + char enable;
9179 + enum dpni_congestion_unit units;
9180 + u32 threshold;
9181 +};
9182 +
9183 +int dpni_set_taildrop(struct fsl_mc_io *mc_io,
9184 + u32 cmd_flags,
9185 + u16 token,
9186 + enum dpni_congestion_point cg_point,
9187 + enum dpni_queue_type q_type,
9188 + u8 tc,
9189 + u8 q_index,
9190 + struct dpni_taildrop *taildrop);
9191 +
9192 +int dpni_get_taildrop(struct fsl_mc_io *mc_io,
9193 + u32 cmd_flags,
9194 + u16 token,
9195 + enum dpni_congestion_point cg_point,
9196 + enum dpni_queue_type q_type,
9197 + u8 tc,
9198 + u8 q_index,
9199 + struct dpni_taildrop *taildrop);
9200 +
9201 +/**
9202 + * struct dpni_rule_cfg - Rule configuration for table lookup
9203 + * @key_iova: I/O virtual address of the key (must be in DMA-able memory)
9204 + * @mask_iova: I/O virtual address of the mask (must be in DMA-able memory)
9205 + * @key_size: key and mask size (in bytes)
9206 + */
9207 +struct dpni_rule_cfg {
9208 + u64 key_iova;
9209 + u64 mask_iova;
9210 + u8 key_size;
9211 +};
9212 +
9213 +/**
9214 + * Discard matching traffic. If set, this takes precedence over any other
9215 + * configuration and matching traffic is always discarded.
9216 + */
9217 + #define DPNI_FS_OPT_DISCARD 0x1
9218 +
9219 +/**
9220 + * Set FLC value. If set, flc member of truct dpni_fs_action_cfg is used to
9221 + * override the FLC value set per queue.
9222 + * For more details check the Frame Descriptor section in the hardware
9223 + * documentation.
9224 + */
9225 +#define DPNI_FS_OPT_SET_FLC 0x2
9226 +
9227 +/*
9228 + * Indicates whether the 6 lowest significant bits of FLC are used for stash
9229 + * control. If set, the 6 least significant bits in value are interpreted as
9230 + * follows:
9231 + * - bits 0-1: indicates the number of 64 byte units of context that are
9232 + * stashed. FLC value is interpreted as a memory address in this case,
9233 + * excluding the 6 LS bits.
9234 + * - bits 2-3: indicates the number of 64 byte units of frame annotation
9235 + * to be stashed. Annotation is placed at FD[ADDR].
9236 + * - bits 4-5: indicates the number of 64 byte units of frame data to be
9237 + * stashed. Frame data is placed at FD[ADDR] + FD[OFFSET].
9238 + * This flag is ignored if DPNI_FS_OPT_SET_FLC is not specified.
9239 + */
9240 +#define DPNI_FS_OPT_SET_STASH_CONTROL 0x4
9241 +
9242 +/**
9243 + * struct dpni_fs_action_cfg - Action configuration for table look-up
9244 + * @flc: FLC value for traffic matching this rule. Please check the Frame
9245 + * Descriptor section in the hardware documentation for more information.
9246 + * @flow_id: Identifies the Rx queue used for matching traffic. Supported
9247 + * values are in range 0 to num_queue-1.
9248 + * @options: Any combination of DPNI_FS_OPT_ values.
9249 + */
9250 +struct dpni_fs_action_cfg {
9251 + u64 flc;
9252 + u16 flow_id;
9253 + u16 options;
9254 +};
9255 +
9256 +int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
9257 + u32 cmd_flags,
9258 + u16 token,
9259 + u8 tc_id,
9260 + u16 index,
9261 + const struct dpni_rule_cfg *cfg,
9262 + const struct dpni_fs_action_cfg *action);
9263 +
9264 +int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
9265 + u32 cmd_flags,
9266 + u16 token,
9267 + u8 tc_id,
9268 + const struct dpni_rule_cfg *cfg);
9269 +
9270 +#endif /* __FSL_DPNI_H */
9271 diff --git a/drivers/staging/fsl-dpaa2/ethernet/net.h b/drivers/staging/fsl-dpaa2/ethernet/net.h
9272 new file mode 100644
9273 index 00000000..5020dee1
9274 --- /dev/null
9275 +++ b/drivers/staging/fsl-dpaa2/ethernet/net.h
9276 @@ -0,0 +1,480 @@
9277 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
9278 + *
9279 + * Redistribution and use in source and binary forms, with or without
9280 + * modification, are permitted provided that the following conditions are met:
9281 + * * Redistributions of source code must retain the above copyright
9282 + * notice, this list of conditions and the following disclaimer.
9283 + * * Redistributions in binary form must reproduce the above copyright
9284 + * notice, this list of conditions and the following disclaimer in the
9285 + * documentation and/or other materials provided with the distribution.
9286 + * * Neither the name of the above-listed copyright holders nor the
9287 + * names of any contributors may be used to endorse or promote products
9288 + * derived from this software without specific prior written permission.
9289 + *
9290 + *
9291 + * ALTERNATIVELY, this software may be distributed under the terms of the
9292 + * GNU General Public License ("GPL") as published by the Free Software
9293 + * Foundation, either version 2 of that License or (at your option) any
9294 + * later version.
9295 + *
9296 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
9297 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
9298 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
9299 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
9300 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
9301 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
9302 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
9303 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
9304 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
9305 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
9306 + * POSSIBILITY OF SUCH DAMAGE.
9307 + */
9308 +#ifndef __FSL_NET_H
9309 +#define __FSL_NET_H
9310 +
9311 +#define LAST_HDR_INDEX 0xFFFFFFFF
9312 +
9313 +/*****************************************************************************/
9314 +/* Protocol fields */
9315 +/*****************************************************************************/
9316 +
9317 +/************************* Ethernet fields *********************************/
9318 +#define NH_FLD_ETH_DA (1)
9319 +#define NH_FLD_ETH_SA (NH_FLD_ETH_DA << 1)
9320 +#define NH_FLD_ETH_LENGTH (NH_FLD_ETH_DA << 2)
9321 +#define NH_FLD_ETH_TYPE (NH_FLD_ETH_DA << 3)
9322 +#define NH_FLD_ETH_FINAL_CKSUM (NH_FLD_ETH_DA << 4)
9323 +#define NH_FLD_ETH_PADDING (NH_FLD_ETH_DA << 5)
9324 +#define NH_FLD_ETH_ALL_FIELDS ((NH_FLD_ETH_DA << 6) - 1)
9325 +
9326 +#define NH_FLD_ETH_ADDR_SIZE 6
9327 +
9328 +/*************************** VLAN fields ***********************************/
9329 +#define NH_FLD_VLAN_VPRI (1)
9330 +#define NH_FLD_VLAN_CFI (NH_FLD_VLAN_VPRI << 1)
9331 +#define NH_FLD_VLAN_VID (NH_FLD_VLAN_VPRI << 2)
9332 +#define NH_FLD_VLAN_LENGTH (NH_FLD_VLAN_VPRI << 3)
9333 +#define NH_FLD_VLAN_TYPE (NH_FLD_VLAN_VPRI << 4)
9334 +#define NH_FLD_VLAN_ALL_FIELDS ((NH_FLD_VLAN_VPRI << 5) - 1)
9335 +
9336 +#define NH_FLD_VLAN_TCI (NH_FLD_VLAN_VPRI | \
9337 + NH_FLD_VLAN_CFI | \
9338 + NH_FLD_VLAN_VID)
9339 +
9340 +/************************ IP (generic) fields ******************************/
9341 +#define NH_FLD_IP_VER (1)
9342 +#define NH_FLD_IP_DSCP (NH_FLD_IP_VER << 2)
9343 +#define NH_FLD_IP_ECN (NH_FLD_IP_VER << 3)
9344 +#define NH_FLD_IP_PROTO (NH_FLD_IP_VER << 4)
9345 +#define NH_FLD_IP_SRC (NH_FLD_IP_VER << 5)
9346 +#define NH_FLD_IP_DST (NH_FLD_IP_VER << 6)
9347 +#define NH_FLD_IP_TOS_TC (NH_FLD_IP_VER << 7)
9348 +#define NH_FLD_IP_ID (NH_FLD_IP_VER << 8)
9349 +#define NH_FLD_IP_ALL_FIELDS ((NH_FLD_IP_VER << 9) - 1)
9350 +
9351 +#define NH_FLD_IP_PROTO_SIZE 1
9352 +
9353 +/***************************** IPV4 fields *********************************/
9354 +#define NH_FLD_IPV4_VER (1)
9355 +#define NH_FLD_IPV4_HDR_LEN (NH_FLD_IPV4_VER << 1)
9356 +#define NH_FLD_IPV4_TOS (NH_FLD_IPV4_VER << 2)
9357 +#define NH_FLD_IPV4_TOTAL_LEN (NH_FLD_IPV4_VER << 3)
9358 +#define NH_FLD_IPV4_ID (NH_FLD_IPV4_VER << 4)
9359 +#define NH_FLD_IPV4_FLAG_D (NH_FLD_IPV4_VER << 5)
9360 +#define NH_FLD_IPV4_FLAG_M (NH_FLD_IPV4_VER << 6)
9361 +#define NH_FLD_IPV4_OFFSET (NH_FLD_IPV4_VER << 7)
9362 +#define NH_FLD_IPV4_TTL (NH_FLD_IPV4_VER << 8)
9363 +#define NH_FLD_IPV4_PROTO (NH_FLD_IPV4_VER << 9)
9364 +#define NH_FLD_IPV4_CKSUM (NH_FLD_IPV4_VER << 10)
9365 +#define NH_FLD_IPV4_SRC_IP (NH_FLD_IPV4_VER << 11)
9366 +#define NH_FLD_IPV4_DST_IP (NH_FLD_IPV4_VER << 12)
9367 +#define NH_FLD_IPV4_OPTS (NH_FLD_IPV4_VER << 13)
9368 +#define NH_FLD_IPV4_OPTS_COUNT (NH_FLD_IPV4_VER << 14)
9369 +#define NH_FLD_IPV4_ALL_FIELDS ((NH_FLD_IPV4_VER << 15) - 1)
9370 +
9371 +#define NH_FLD_IPV4_ADDR_SIZE 4
9372 +#define NH_FLD_IPV4_PROTO_SIZE 1
9373 +
9374 +/***************************** IPV6 fields *********************************/
9375 +#define NH_FLD_IPV6_VER (1)
9376 +#define NH_FLD_IPV6_TC (NH_FLD_IPV6_VER << 1)
9377 +#define NH_FLD_IPV6_SRC_IP (NH_FLD_IPV6_VER << 2)
9378 +#define NH_FLD_IPV6_DST_IP (NH_FLD_IPV6_VER << 3)
9379 +#define NH_FLD_IPV6_NEXT_HDR (NH_FLD_IPV6_VER << 4)
9380 +#define NH_FLD_IPV6_FL (NH_FLD_IPV6_VER << 5)
9381 +#define NH_FLD_IPV6_HOP_LIMIT (NH_FLD_IPV6_VER << 6)
9382 +#define NH_FLD_IPV6_ID (NH_FLD_IPV6_VER << 7)
9383 +#define NH_FLD_IPV6_ALL_FIELDS ((NH_FLD_IPV6_VER << 8) - 1)
9384 +
9385 +#define NH_FLD_IPV6_ADDR_SIZE 16
9386 +#define NH_FLD_IPV6_NEXT_HDR_SIZE 1
9387 +
9388 +/***************************** ICMP fields *********************************/
9389 +#define NH_FLD_ICMP_TYPE (1)
9390 +#define NH_FLD_ICMP_CODE (NH_FLD_ICMP_TYPE << 1)
9391 +#define NH_FLD_ICMP_CKSUM (NH_FLD_ICMP_TYPE << 2)
9392 +#define NH_FLD_ICMP_ID (NH_FLD_ICMP_TYPE << 3)
9393 +#define NH_FLD_ICMP_SQ_NUM (NH_FLD_ICMP_TYPE << 4)
9394 +#define NH_FLD_ICMP_ALL_FIELDS ((NH_FLD_ICMP_TYPE << 5) - 1)
9395 +
9396 +#define NH_FLD_ICMP_CODE_SIZE 1
9397 +#define NH_FLD_ICMP_TYPE_SIZE 1
9398 +
9399 +/***************************** IGMP fields *********************************/
9400 +#define NH_FLD_IGMP_VERSION (1)
9401 +#define NH_FLD_IGMP_TYPE (NH_FLD_IGMP_VERSION << 1)
9402 +#define NH_FLD_IGMP_CKSUM (NH_FLD_IGMP_VERSION << 2)
9403 +#define NH_FLD_IGMP_DATA (NH_FLD_IGMP_VERSION << 3)
9404 +#define NH_FLD_IGMP_ALL_FIELDS ((NH_FLD_IGMP_VERSION << 4) - 1)
9405 +
9406 +/***************************** TCP fields **********************************/
9407 +#define NH_FLD_TCP_PORT_SRC (1)
9408 +#define NH_FLD_TCP_PORT_DST (NH_FLD_TCP_PORT_SRC << 1)
9409 +#define NH_FLD_TCP_SEQ (NH_FLD_TCP_PORT_SRC << 2)
9410 +#define NH_FLD_TCP_ACK (NH_FLD_TCP_PORT_SRC << 3)
9411 +#define NH_FLD_TCP_OFFSET (NH_FLD_TCP_PORT_SRC << 4)
9412 +#define NH_FLD_TCP_FLAGS (NH_FLD_TCP_PORT_SRC << 5)
9413 +#define NH_FLD_TCP_WINDOW (NH_FLD_TCP_PORT_SRC << 6)
9414 +#define NH_FLD_TCP_CKSUM (NH_FLD_TCP_PORT_SRC << 7)
9415 +#define NH_FLD_TCP_URGPTR (NH_FLD_TCP_PORT_SRC << 8)
9416 +#define NH_FLD_TCP_OPTS (NH_FLD_TCP_PORT_SRC << 9)
9417 +#define NH_FLD_TCP_OPTS_COUNT (NH_FLD_TCP_PORT_SRC << 10)
9418 +#define NH_FLD_TCP_ALL_FIELDS ((NH_FLD_TCP_PORT_SRC << 11) - 1)
9419 +
9420 +#define NH_FLD_TCP_PORT_SIZE 2
9421 +
9422 +/***************************** UDP fields **********************************/
9423 +#define NH_FLD_UDP_PORT_SRC (1)
9424 +#define NH_FLD_UDP_PORT_DST (NH_FLD_UDP_PORT_SRC << 1)
9425 +#define NH_FLD_UDP_LEN (NH_FLD_UDP_PORT_SRC << 2)
9426 +#define NH_FLD_UDP_CKSUM (NH_FLD_UDP_PORT_SRC << 3)
9427 +#define NH_FLD_UDP_ALL_FIELDS ((NH_FLD_UDP_PORT_SRC << 4) - 1)
9428 +
9429 +#define NH_FLD_UDP_PORT_SIZE 2
9430 +
9431 +/*************************** UDP-lite fields *******************************/
9432 +#define NH_FLD_UDP_LITE_PORT_SRC (1)
9433 +#define NH_FLD_UDP_LITE_PORT_DST (NH_FLD_UDP_LITE_PORT_SRC << 1)
9434 +#define NH_FLD_UDP_LITE_ALL_FIELDS \
9435 + ((NH_FLD_UDP_LITE_PORT_SRC << 2) - 1)
9436 +
9437 +#define NH_FLD_UDP_LITE_PORT_SIZE 2
9438 +
9439 +/*************************** UDP-encap-ESP fields **************************/
9440 +#define NH_FLD_UDP_ENC_ESP_PORT_SRC (1)
9441 +#define NH_FLD_UDP_ENC_ESP_PORT_DST (NH_FLD_UDP_ENC_ESP_PORT_SRC << 1)
9442 +#define NH_FLD_UDP_ENC_ESP_LEN (NH_FLD_UDP_ENC_ESP_PORT_SRC << 2)
9443 +#define NH_FLD_UDP_ENC_ESP_CKSUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 3)
9444 +#define NH_FLD_UDP_ENC_ESP_SPI (NH_FLD_UDP_ENC_ESP_PORT_SRC << 4)
9445 +#define NH_FLD_UDP_ENC_ESP_SEQUENCE_NUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 5)
9446 +#define NH_FLD_UDP_ENC_ESP_ALL_FIELDS \
9447 + ((NH_FLD_UDP_ENC_ESP_PORT_SRC << 6) - 1)
9448 +
9449 +#define NH_FLD_UDP_ENC_ESP_PORT_SIZE 2
9450 +#define NH_FLD_UDP_ENC_ESP_SPI_SIZE 4
9451 +
9452 +/***************************** SCTP fields *********************************/
9453 +#define NH_FLD_SCTP_PORT_SRC (1)
9454 +#define NH_FLD_SCTP_PORT_DST (NH_FLD_SCTP_PORT_SRC << 1)
9455 +#define NH_FLD_SCTP_VER_TAG (NH_FLD_SCTP_PORT_SRC << 2)
9456 +#define NH_FLD_SCTP_CKSUM (NH_FLD_SCTP_PORT_SRC << 3)
9457 +#define NH_FLD_SCTP_ALL_FIELDS ((NH_FLD_SCTP_PORT_SRC << 4) - 1)
9458 +
9459 +#define NH_FLD_SCTP_PORT_SIZE 2
9460 +
9461 +/***************************** DCCP fields *********************************/
9462 +#define NH_FLD_DCCP_PORT_SRC (1)
9463 +#define NH_FLD_DCCP_PORT_DST (NH_FLD_DCCP_PORT_SRC << 1)
9464 +#define NH_FLD_DCCP_ALL_FIELDS ((NH_FLD_DCCP_PORT_SRC << 2) - 1)
9465 +
9466 +#define NH_FLD_DCCP_PORT_SIZE 2
9467 +
9468 +/***************************** IPHC fields *********************************/
9469 +#define NH_FLD_IPHC_CID (1)
9470 +#define NH_FLD_IPHC_CID_TYPE (NH_FLD_IPHC_CID << 1)
9471 +#define NH_FLD_IPHC_HCINDEX (NH_FLD_IPHC_CID << 2)
9472 +#define NH_FLD_IPHC_GEN (NH_FLD_IPHC_CID << 3)
9473 +#define NH_FLD_IPHC_D_BIT (NH_FLD_IPHC_CID << 4)
9474 +#define NH_FLD_IPHC_ALL_FIELDS ((NH_FLD_IPHC_CID << 5) - 1)
9475 +
9476 +/***************************** SCTP fields *********************************/
9477 +#define NH_FLD_SCTP_CHUNK_DATA_TYPE (1)
9478 +#define NH_FLD_SCTP_CHUNK_DATA_FLAGS (NH_FLD_SCTP_CHUNK_DATA_TYPE << 1)
9479 +#define NH_FLD_SCTP_CHUNK_DATA_LENGTH (NH_FLD_SCTP_CHUNK_DATA_TYPE << 2)
9480 +#define NH_FLD_SCTP_CHUNK_DATA_TSN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 3)
9481 +#define NH_FLD_SCTP_CHUNK_DATA_STREAM_ID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 4)
9482 +#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 5)
9483 +#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 6)
9484 +#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED (NH_FLD_SCTP_CHUNK_DATA_TYPE << 7)
9485 +#define NH_FLD_SCTP_CHUNK_DATA_BEGGINING (NH_FLD_SCTP_CHUNK_DATA_TYPE << 8)
9486 +#define NH_FLD_SCTP_CHUNK_DATA_END (NH_FLD_SCTP_CHUNK_DATA_TYPE << 9)
9487 +#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS \
9488 + ((NH_FLD_SCTP_CHUNK_DATA_TYPE << 10) - 1)
9489 +
9490 +/*************************** L2TPV2 fields *********************************/
9491 +#define NH_FLD_L2TPV2_TYPE_BIT (1)
9492 +#define NH_FLD_L2TPV2_LENGTH_BIT (NH_FLD_L2TPV2_TYPE_BIT << 1)
9493 +#define NH_FLD_L2TPV2_SEQUENCE_BIT (NH_FLD_L2TPV2_TYPE_BIT << 2)
9494 +#define NH_FLD_L2TPV2_OFFSET_BIT (NH_FLD_L2TPV2_TYPE_BIT << 3)
9495 +#define NH_FLD_L2TPV2_PRIORITY_BIT (NH_FLD_L2TPV2_TYPE_BIT << 4)
9496 +#define NH_FLD_L2TPV2_VERSION (NH_FLD_L2TPV2_TYPE_BIT << 5)
9497 +#define NH_FLD_L2TPV2_LEN (NH_FLD_L2TPV2_TYPE_BIT << 6)
9498 +#define NH_FLD_L2TPV2_TUNNEL_ID (NH_FLD_L2TPV2_TYPE_BIT << 7)
9499 +#define NH_FLD_L2TPV2_SESSION_ID (NH_FLD_L2TPV2_TYPE_BIT << 8)
9500 +#define NH_FLD_L2TPV2_NS (NH_FLD_L2TPV2_TYPE_BIT << 9)
9501 +#define NH_FLD_L2TPV2_NR (NH_FLD_L2TPV2_TYPE_BIT << 10)
9502 +#define NH_FLD_L2TPV2_OFFSET_SIZE (NH_FLD_L2TPV2_TYPE_BIT << 11)
9503 +#define NH_FLD_L2TPV2_FIRST_BYTE (NH_FLD_L2TPV2_TYPE_BIT << 12)
9504 +#define NH_FLD_L2TPV2_ALL_FIELDS \
9505 + ((NH_FLD_L2TPV2_TYPE_BIT << 13) - 1)
9506 +
9507 +/*************************** L2TPV3 fields *********************************/
9508 +#define NH_FLD_L2TPV3_CTRL_TYPE_BIT (1)
9509 +#define NH_FLD_L2TPV3_CTRL_LENGTH_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 1)
9510 +#define NH_FLD_L2TPV3_CTRL_SEQUENCE_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 2)
9511 +#define NH_FLD_L2TPV3_CTRL_VERSION (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 3)
9512 +#define NH_FLD_L2TPV3_CTRL_LENGTH (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 4)
9513 +#define NH_FLD_L2TPV3_CTRL_CONTROL (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 5)
9514 +#define NH_FLD_L2TPV3_CTRL_SENT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 6)
9515 +#define NH_FLD_L2TPV3_CTRL_RECV (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 7)
9516 +#define NH_FLD_L2TPV3_CTRL_FIRST_BYTE (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 8)
9517 +#define NH_FLD_L2TPV3_CTRL_ALL_FIELDS \
9518 + ((NH_FLD_L2TPV3_CTRL_TYPE_BIT << 9) - 1)
9519 +
9520 +#define NH_FLD_L2TPV3_SESS_TYPE_BIT (1)
9521 +#define NH_FLD_L2TPV3_SESS_VERSION (NH_FLD_L2TPV3_SESS_TYPE_BIT << 1)
9522 +#define NH_FLD_L2TPV3_SESS_ID (NH_FLD_L2TPV3_SESS_TYPE_BIT << 2)
9523 +#define NH_FLD_L2TPV3_SESS_COOKIE (NH_FLD_L2TPV3_SESS_TYPE_BIT << 3)
9524 +#define NH_FLD_L2TPV3_SESS_ALL_FIELDS \
9525 + ((NH_FLD_L2TPV3_SESS_TYPE_BIT << 4) - 1)
9526 +
9527 +/**************************** PPP fields ***********************************/
9528 +#define NH_FLD_PPP_PID (1)
9529 +#define NH_FLD_PPP_COMPRESSED (NH_FLD_PPP_PID << 1)
9530 +#define NH_FLD_PPP_ALL_FIELDS ((NH_FLD_PPP_PID << 2) - 1)
9531 +
9532 +/************************** PPPoE fields ***********************************/
9533 +#define NH_FLD_PPPOE_VER (1)
9534 +#define NH_FLD_PPPOE_TYPE (NH_FLD_PPPOE_VER << 1)
9535 +#define NH_FLD_PPPOE_CODE (NH_FLD_PPPOE_VER << 2)
9536 +#define NH_FLD_PPPOE_SID (NH_FLD_PPPOE_VER << 3)
9537 +#define NH_FLD_PPPOE_LEN (NH_FLD_PPPOE_VER << 4)
9538 +#define NH_FLD_PPPOE_SESSION (NH_FLD_PPPOE_VER << 5)
9539 +#define NH_FLD_PPPOE_PID (NH_FLD_PPPOE_VER << 6)
9540 +#define NH_FLD_PPPOE_ALL_FIELDS ((NH_FLD_PPPOE_VER << 7) - 1)
9541 +
9542 +/************************* PPP-Mux fields **********************************/
9543 +#define NH_FLD_PPPMUX_PID (1)
9544 +#define NH_FLD_PPPMUX_CKSUM (NH_FLD_PPPMUX_PID << 1)
9545 +#define NH_FLD_PPPMUX_COMPRESSED (NH_FLD_PPPMUX_PID << 2)
9546 +#define NH_FLD_PPPMUX_ALL_FIELDS ((NH_FLD_PPPMUX_PID << 3) - 1)
9547 +
9548 +/*********************** PPP-Mux sub-frame fields **************************/
9549 +#define NH_FLD_PPPMUX_SUBFRM_PFF (1)
9550 +#define NH_FLD_PPPMUX_SUBFRM_LXT (NH_FLD_PPPMUX_SUBFRM_PFF << 1)
9551 +#define NH_FLD_PPPMUX_SUBFRM_LEN (NH_FLD_PPPMUX_SUBFRM_PFF << 2)
9552 +#define NH_FLD_PPPMUX_SUBFRM_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 3)
9553 +#define NH_FLD_PPPMUX_SUBFRM_USE_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 4)
9554 +#define NH_FLD_PPPMUX_SUBFRM_ALL_FIELDS \
9555 + ((NH_FLD_PPPMUX_SUBFRM_PFF << 5) - 1)
9556 +
9557 +/*************************** LLC fields ************************************/
9558 +#define NH_FLD_LLC_DSAP (1)
9559 +#define NH_FLD_LLC_SSAP (NH_FLD_LLC_DSAP << 1)
9560 +#define NH_FLD_LLC_CTRL (NH_FLD_LLC_DSAP << 2)
9561 +#define NH_FLD_LLC_ALL_FIELDS ((NH_FLD_LLC_DSAP << 3) - 1)
9562 +
9563 +/*************************** NLPID fields **********************************/
9564 +#define NH_FLD_NLPID_NLPID (1)
9565 +#define NH_FLD_NLPID_ALL_FIELDS ((NH_FLD_NLPID_NLPID << 1) - 1)
9566 +
9567 +/*************************** SNAP fields ***********************************/
9568 +#define NH_FLD_SNAP_OUI (1)
9569 +#define NH_FLD_SNAP_PID (NH_FLD_SNAP_OUI << 1)
9570 +#define NH_FLD_SNAP_ALL_FIELDS ((NH_FLD_SNAP_OUI << 2) - 1)
9571 +
9572 +/*************************** LLC SNAP fields *******************************/
9573 +#define NH_FLD_LLC_SNAP_TYPE (1)
9574 +#define NH_FLD_LLC_SNAP_ALL_FIELDS ((NH_FLD_LLC_SNAP_TYPE << 1) - 1)
9575 +
9576 +#define NH_FLD_ARP_HTYPE (1)
9577 +#define NH_FLD_ARP_PTYPE (NH_FLD_ARP_HTYPE << 1)
9578 +#define NH_FLD_ARP_HLEN (NH_FLD_ARP_HTYPE << 2)
9579 +#define NH_FLD_ARP_PLEN (NH_FLD_ARP_HTYPE << 3)
9580 +#define NH_FLD_ARP_OPER (NH_FLD_ARP_HTYPE << 4)
9581 +#define NH_FLD_ARP_SHA (NH_FLD_ARP_HTYPE << 5)
9582 +#define NH_FLD_ARP_SPA (NH_FLD_ARP_HTYPE << 6)
9583 +#define NH_FLD_ARP_THA (NH_FLD_ARP_HTYPE << 7)
9584 +#define NH_FLD_ARP_TPA (NH_FLD_ARP_HTYPE << 8)
9585 +#define NH_FLD_ARP_ALL_FIELDS ((NH_FLD_ARP_HTYPE << 9) - 1)
9586 +
9587 +/*************************** RFC2684 fields ********************************/
9588 +#define NH_FLD_RFC2684_LLC (1)
9589 +#define NH_FLD_RFC2684_NLPID (NH_FLD_RFC2684_LLC << 1)
9590 +#define NH_FLD_RFC2684_OUI (NH_FLD_RFC2684_LLC << 2)
9591 +#define NH_FLD_RFC2684_PID (NH_FLD_RFC2684_LLC << 3)
9592 +#define NH_FLD_RFC2684_VPN_OUI (NH_FLD_RFC2684_LLC << 4)
9593 +#define NH_FLD_RFC2684_VPN_IDX (NH_FLD_RFC2684_LLC << 5)
9594 +#define NH_FLD_RFC2684_ALL_FIELDS ((NH_FLD_RFC2684_LLC << 6) - 1)
9595 +
9596 +/*************************** User defined fields ***************************/
9597 +#define NH_FLD_USER_DEFINED_SRCPORT (1)
9598 +#define NH_FLD_USER_DEFINED_PCDID (NH_FLD_USER_DEFINED_SRCPORT << 1)
9599 +#define NH_FLD_USER_DEFINED_ALL_FIELDS \
9600 + ((NH_FLD_USER_DEFINED_SRCPORT << 2) - 1)
9601 +
9602 +/*************************** Payload fields ********************************/
9603 +#define NH_FLD_PAYLOAD_BUFFER (1)
9604 +#define NH_FLD_PAYLOAD_SIZE (NH_FLD_PAYLOAD_BUFFER << 1)
9605 +#define NH_FLD_MAX_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 2)
9606 +#define NH_FLD_MIN_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 3)
9607 +#define NH_FLD_PAYLOAD_TYPE (NH_FLD_PAYLOAD_BUFFER << 4)
9608 +#define NH_FLD_FRAME_SIZE (NH_FLD_PAYLOAD_BUFFER << 5)
9609 +#define NH_FLD_PAYLOAD_ALL_FIELDS ((NH_FLD_PAYLOAD_BUFFER << 6) - 1)
9610 +
9611 +/*************************** GRE fields ************************************/
9612 +#define NH_FLD_GRE_TYPE (1)
9613 +#define NH_FLD_GRE_ALL_FIELDS ((NH_FLD_GRE_TYPE << 1) - 1)
9614 +
9615 +/*************************** MINENCAP fields *******************************/
9616 +#define NH_FLD_MINENCAP_SRC_IP (1)
9617 +#define NH_FLD_MINENCAP_DST_IP (NH_FLD_MINENCAP_SRC_IP << 1)
9618 +#define NH_FLD_MINENCAP_TYPE (NH_FLD_MINENCAP_SRC_IP << 2)
9619 +#define NH_FLD_MINENCAP_ALL_FIELDS \
9620 + ((NH_FLD_MINENCAP_SRC_IP << 3) - 1)
9621 +
9622 +/*************************** IPSEC AH fields *******************************/
9623 +#define NH_FLD_IPSEC_AH_SPI (1)
9624 +#define NH_FLD_IPSEC_AH_NH (NH_FLD_IPSEC_AH_SPI << 1)
9625 +#define NH_FLD_IPSEC_AH_ALL_FIELDS ((NH_FLD_IPSEC_AH_SPI << 2) - 1)
9626 +
9627 +/*************************** IPSEC ESP fields ******************************/
9628 +#define NH_FLD_IPSEC_ESP_SPI (1)
9629 +#define NH_FLD_IPSEC_ESP_SEQUENCE_NUM (NH_FLD_IPSEC_ESP_SPI << 1)
9630 +#define NH_FLD_IPSEC_ESP_ALL_FIELDS ((NH_FLD_IPSEC_ESP_SPI << 2) - 1)
9631 +
9632 +#define NH_FLD_IPSEC_ESP_SPI_SIZE 4
9633 +
9634 +/*************************** MPLS fields ***********************************/
9635 +#define NH_FLD_MPLS_LABEL_STACK (1)
9636 +#define NH_FLD_MPLS_LABEL_STACK_ALL_FIELDS \
9637 + ((NH_FLD_MPLS_LABEL_STACK << 1) - 1)
9638 +
9639 +/*************************** MACSEC fields *********************************/
9640 +#define NH_FLD_MACSEC_SECTAG (1)
9641 +#define NH_FLD_MACSEC_ALL_FIELDS ((NH_FLD_MACSEC_SECTAG << 1) - 1)
9642 +
9643 +/*************************** GTP fields ************************************/
9644 +#define NH_FLD_GTP_TEID (1)
9645 +
9646 +/* Protocol options */
9647 +
9648 +/* Ethernet options */
9649 +#define NH_OPT_ETH_BROADCAST 1
9650 +#define NH_OPT_ETH_MULTICAST 2
9651 +#define NH_OPT_ETH_UNICAST 3
9652 +#define NH_OPT_ETH_BPDU 4
9653 +
9654 +#define NH_ETH_IS_MULTICAST_ADDR(addr) (addr[0] & 0x01)
9655 +/* also applicable for broadcast */
9656 +
9657 +/* VLAN options */
9658 +#define NH_OPT_VLAN_CFI 1
9659 +
9660 +/* IPV4 options */
9661 +#define NH_OPT_IPV4_UNICAST 1
9662 +#define NH_OPT_IPV4_MULTICAST 2
9663 +#define NH_OPT_IPV4_BROADCAST 3
9664 +#define NH_OPT_IPV4_OPTION 4
9665 +#define NH_OPT_IPV4_FRAG 5
9666 +#define NH_OPT_IPV4_INITIAL_FRAG 6
9667 +
9668 +/* IPV6 options */
9669 +#define NH_OPT_IPV6_UNICAST 1
9670 +#define NH_OPT_IPV6_MULTICAST 2
9671 +#define NH_OPT_IPV6_OPTION 3
9672 +#define NH_OPT_IPV6_FRAG 4
9673 +#define NH_OPT_IPV6_INITIAL_FRAG 5
9674 +
9675 +/* General IP options (may be used for any version) */
9676 +#define NH_OPT_IP_FRAG 1
9677 +#define NH_OPT_IP_INITIAL_FRAG 2
9678 +#define NH_OPT_IP_OPTION 3
9679 +
9680 +/* Minenc. options */
9681 +#define NH_OPT_MINENCAP_SRC_ADDR_PRESENT 1
9682 +
9683 +/* GRE. options */
9684 +#define NH_OPT_GRE_ROUTING_PRESENT 1
9685 +
9686 +/* TCP options */
9687 +#define NH_OPT_TCP_OPTIONS 1
9688 +#define NH_OPT_TCP_CONTROL_HIGH_BITS 2
9689 +#define NH_OPT_TCP_CONTROL_LOW_BITS 3
9690 +
9691 +/* CAPWAP options */
9692 +#define NH_OPT_CAPWAP_DTLS 1
9693 +
9694 +enum net_prot {
9695 + NET_PROT_NONE = 0,
9696 + NET_PROT_PAYLOAD,
9697 + NET_PROT_ETH,
9698 + NET_PROT_VLAN,
9699 + NET_PROT_IPV4,
9700 + NET_PROT_IPV6,
9701 + NET_PROT_IP,
9702 + NET_PROT_TCP,
9703 + NET_PROT_UDP,
9704 + NET_PROT_UDP_LITE,
9705 + NET_PROT_IPHC,
9706 + NET_PROT_SCTP,
9707 + NET_PROT_SCTP_CHUNK_DATA,
9708 + NET_PROT_PPPOE,
9709 + NET_PROT_PPP,
9710 + NET_PROT_PPPMUX,
9711 + NET_PROT_PPPMUX_SUBFRM,
9712 + NET_PROT_L2TPV2,
9713 + NET_PROT_L2TPV3_CTRL,
9714 + NET_PROT_L2TPV3_SESS,
9715 + NET_PROT_LLC,
9716 + NET_PROT_LLC_SNAP,
9717 + NET_PROT_NLPID,
9718 + NET_PROT_SNAP,
9719 + NET_PROT_MPLS,
9720 + NET_PROT_IPSEC_AH,
9721 + NET_PROT_IPSEC_ESP,
9722 + NET_PROT_UDP_ENC_ESP, /* RFC 3948 */
9723 + NET_PROT_MACSEC,
9724 + NET_PROT_GRE,
9725 + NET_PROT_MINENCAP,
9726 + NET_PROT_DCCP,
9727 + NET_PROT_ICMP,
9728 + NET_PROT_IGMP,
9729 + NET_PROT_ARP,
9730 + NET_PROT_CAPWAP_DATA,
9731 + NET_PROT_CAPWAP_CTRL,
9732 + NET_PROT_RFC2684,
9733 + NET_PROT_ICMPV6,
9734 + NET_PROT_FCOE,
9735 + NET_PROT_FIP,
9736 + NET_PROT_ISCSI,
9737 + NET_PROT_GTP,
9738 + NET_PROT_USER_DEFINED_L2,
9739 + NET_PROT_USER_DEFINED_L3,
9740 + NET_PROT_USER_DEFINED_L4,
9741 + NET_PROT_USER_DEFINED_L5,
9742 + NET_PROT_USER_DEFINED_SHIM1,
9743 + NET_PROT_USER_DEFINED_SHIM2,
9744 +
9745 + NET_PROT_DUMMY_LAST
9746 +};
9747 +
9748 +/*! IEEE8021.Q */
9749 +#define NH_IEEE8021Q_ETYPE 0x8100
9750 +#define NH_IEEE8021Q_HDR(etype, pcp, dei, vlan_id) \
9751 + ((((u32)((etype) & 0xFFFF)) << 16) | \
9752 + (((u32)((pcp) & 0x07)) << 13) | \
9753 + (((u32)((dei) & 0x01)) << 12) | \
9754 + (((u32)((vlan_id) & 0xFFF))))
9755 +
9756 +#endif /* __FSL_NET_H */
9757 diff --git a/drivers/staging/fsl-dpaa2/ethsw/Kconfig b/drivers/staging/fsl-dpaa2/ethsw/Kconfig
9758 new file mode 100644
9759 index 00000000..06c70408
9760 --- /dev/null
9761 +++ b/drivers/staging/fsl-dpaa2/ethsw/Kconfig
9762 @@ -0,0 +1,6 @@
9763 +config FSL_DPAA2_ETHSW
9764 + tristate "DPAA2 Ethernet Switch"
9765 + depends on FSL_MC_BUS && FSL_DPAA2
9766 + default y
9767 + ---help---
9768 + Prototype driver for DPAA2 Ethernet Switch.
9769 diff --git a/drivers/staging/fsl-dpaa2/ethsw/Makefile b/drivers/staging/fsl-dpaa2/ethsw/Makefile
9770 new file mode 100644
9771 index 00000000..20eb3ac4
9772 --- /dev/null
9773 +++ b/drivers/staging/fsl-dpaa2/ethsw/Makefile
9774 @@ -0,0 +1,10 @@
9775 +
9776 +obj-$(CONFIG_FSL_DPAA2_ETHSW) += dpaa2-ethsw.o
9777 +
9778 +dpaa2-ethsw-objs := switch.o dpsw.o
9779 +
9780 +all:
9781 + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
9782 +
9783 +clean:
9784 + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
9785 diff --git a/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h b/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
9786 new file mode 100644
9787 index 00000000..f7374d1c
9788 --- /dev/null
9789 +++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
9790 @@ -0,0 +1,851 @@
9791 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
9792 + *
9793 + * Redistribution and use in source and binary forms, with or without
9794 + * modification, are permitted provided that the following conditions are met:
9795 + * * Redistributions of source code must retain the above copyright
9796 + * notice, this list of conditions and the following disclaimer.
9797 + * * Redistributions in binary form must reproduce the above copyright
9798 + * notice, this list of conditions and the following disclaimer in the
9799 + * documentation and/or other materials provided with the distribution.
9800 + * * Neither the name of the above-listed copyright holders nor the
9801 + * names of any contributors may be used to endorse or promote products
9802 + * derived from this software without specific prior written permission.
9803 + *
9804 + *
9805 + * ALTERNATIVELY, this software may be distributed under the terms of the
9806 + * GNU General Public License ("GPL") as published by the Free Software
9807 + * Foundation, either version 2 of that License or (at your option) any
9808 + * later version.
9809 + *
9810 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
9811 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
9812 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
9813 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
9814 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
9815 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
9816 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
9817 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
9818 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
9819 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
9820 + * POSSIBILITY OF SUCH DAMAGE.
9821 + */
9822 +#ifndef __FSL_DPSW_CMD_H
9823 +#define __FSL_DPSW_CMD_H
9824 +
9825 +/* DPSW Version */
9826 +#define DPSW_VER_MAJOR 8
9827 +#define DPSW_VER_MINOR 0
9828 +
9829 +#define DPSW_CMD_BASE_VERSION 1
9830 +#define DPSW_CMD_ID_OFFSET 4
9831 +
9832 +#define DPSW_CMD_ID(id) (((id) << DPSW_CMD_ID_OFFSET) | DPSW_CMD_BASE_VERSION)
9833 +
9834 +/* Command IDs */
9835 +#define DPSW_CMDID_CLOSE DPSW_CMD_ID(0x800)
9836 +#define DPSW_CMDID_OPEN DPSW_CMD_ID(0x802)
9837 +
9838 +#define DPSW_CMDID_GET_API_VERSION DPSW_CMD_ID(0xa02)
9839 +
9840 +#define DPSW_CMDID_ENABLE DPSW_CMD_ID(0x002)
9841 +#define DPSW_CMDID_DISABLE DPSW_CMD_ID(0x003)
9842 +#define DPSW_CMDID_GET_ATTR DPSW_CMD_ID(0x004)
9843 +#define DPSW_CMDID_RESET DPSW_CMD_ID(0x005)
9844 +#define DPSW_CMDID_IS_ENABLED DPSW_CMD_ID(0x006)
9845 +
9846 +#define DPSW_CMDID_SET_IRQ DPSW_CMD_ID(0x010)
9847 +#define DPSW_CMDID_GET_IRQ DPSW_CMD_ID(0x011)
9848 +#define DPSW_CMDID_SET_IRQ_ENABLE DPSW_CMD_ID(0x012)
9849 +#define DPSW_CMDID_GET_IRQ_ENABLE DPSW_CMD_ID(0x013)
9850 +#define DPSW_CMDID_SET_IRQ_MASK DPSW_CMD_ID(0x014)
9851 +#define DPSW_CMDID_GET_IRQ_MASK DPSW_CMD_ID(0x015)
9852 +#define DPSW_CMDID_GET_IRQ_STATUS DPSW_CMD_ID(0x016)
9853 +#define DPSW_CMDID_CLEAR_IRQ_STATUS DPSW_CMD_ID(0x017)
9854 +
9855 +#define DPSW_CMDID_SET_REFLECTION_IF DPSW_CMD_ID(0x022)
9856 +
9857 +#define DPSW_CMDID_ADD_CUSTOM_TPID DPSW_CMD_ID(0x024)
9858 +
9859 +#define DPSW_CMDID_REMOVE_CUSTOM_TPID DPSW_CMD_ID(0x026)
9860 +
9861 +#define DPSW_CMDID_IF_SET_TCI DPSW_CMD_ID(0x030)
9862 +#define DPSW_CMDID_IF_SET_STP DPSW_CMD_ID(0x031)
9863 +#define DPSW_CMDID_IF_SET_ACCEPTED_FRAMES DPSW_CMD_ID(0x032)
9864 +#define DPSW_CMDID_SET_IF_ACCEPT_ALL_VLAN DPSW_CMD_ID(0x033)
9865 +#define DPSW_CMDID_IF_GET_COUNTER DPSW_CMD_ID(0x034)
9866 +#define DPSW_CMDID_IF_SET_COUNTER DPSW_CMD_ID(0x035)
9867 +#define DPSW_CMDID_IF_SET_TX_SELECTION DPSW_CMD_ID(0x036)
9868 +#define DPSW_CMDID_IF_ADD_REFLECTION DPSW_CMD_ID(0x037)
9869 +#define DPSW_CMDID_IF_REMOVE_REFLECTION DPSW_CMD_ID(0x038)
9870 +#define DPSW_CMDID_IF_SET_FLOODING_METERING DPSW_CMD_ID(0x039)
9871 +#define DPSW_CMDID_IF_SET_METERING DPSW_CMD_ID(0x03A)
9872 +#define DPSW_CMDID_IF_SET_EARLY_DROP DPSW_CMD_ID(0x03B)
9873 +
9874 +#define DPSW_CMDID_IF_ENABLE DPSW_CMD_ID(0x03D)
9875 +#define DPSW_CMDID_IF_DISABLE DPSW_CMD_ID(0x03E)
9876 +
9877 +#define DPSW_CMDID_IF_GET_ATTR DPSW_CMD_ID(0x042)
9878 +
9879 +#define DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH DPSW_CMD_ID(0x044)
9880 +#define DPSW_CMDID_IF_GET_MAX_FRAME_LENGTH DPSW_CMD_ID(0x045)
9881 +#define DPSW_CMDID_IF_GET_LINK_STATE DPSW_CMD_ID(0x046)
9882 +#define DPSW_CMDID_IF_SET_FLOODING DPSW_CMD_ID(0x047)
9883 +#define DPSW_CMDID_IF_SET_BROADCAST DPSW_CMD_ID(0x048)
9884 +#define DPSW_CMDID_IF_SET_MULTICAST DPSW_CMD_ID(0x049)
9885 +#define DPSW_CMDID_IF_GET_TCI DPSW_CMD_ID(0x04A)
9886 +
9887 +#define DPSW_CMDID_IF_SET_LINK_CFG DPSW_CMD_ID(0x04C)
9888 +
9889 +#define DPSW_CMDID_VLAN_ADD DPSW_CMD_ID(0x060)
9890 +#define DPSW_CMDID_VLAN_ADD_IF DPSW_CMD_ID(0x061)
9891 +#define DPSW_CMDID_VLAN_ADD_IF_UNTAGGED DPSW_CMD_ID(0x062)
9892 +#define DPSW_CMDID_VLAN_ADD_IF_FLOODING DPSW_CMD_ID(0x063)
9893 +#define DPSW_CMDID_VLAN_REMOVE_IF DPSW_CMD_ID(0x064)
9894 +#define DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED DPSW_CMD_ID(0x065)
9895 +#define DPSW_CMDID_VLAN_REMOVE_IF_FLOODING DPSW_CMD_ID(0x066)
9896 +#define DPSW_CMDID_VLAN_REMOVE DPSW_CMD_ID(0x067)
9897 +#define DPSW_CMDID_VLAN_GET_IF DPSW_CMD_ID(0x068)
9898 +#define DPSW_CMDID_VLAN_GET_IF_FLOODING DPSW_CMD_ID(0x069)
9899 +#define DPSW_CMDID_VLAN_GET_IF_UNTAGGED DPSW_CMD_ID(0x06A)
9900 +#define DPSW_CMDID_VLAN_GET_ATTRIBUTES DPSW_CMD_ID(0x06B)
9901 +
9902 +#define DPSW_CMDID_FDB_GET_MULTICAST DPSW_CMD_ID(0x080)
9903 +#define DPSW_CMDID_FDB_GET_UNICAST DPSW_CMD_ID(0x081)
9904 +#define DPSW_CMDID_FDB_ADD DPSW_CMD_ID(0x082)
9905 +#define DPSW_CMDID_FDB_REMOVE DPSW_CMD_ID(0x083)
9906 +#define DPSW_CMDID_FDB_ADD_UNICAST DPSW_CMD_ID(0x084)
9907 +#define DPSW_CMDID_FDB_REMOVE_UNICAST DPSW_CMD_ID(0x085)
9908 +#define DPSW_CMDID_FDB_ADD_MULTICAST DPSW_CMD_ID(0x086)
9909 +#define DPSW_CMDID_FDB_REMOVE_MULTICAST DPSW_CMD_ID(0x087)
9910 +#define DPSW_CMDID_FDB_SET_LEARNING_MODE DPSW_CMD_ID(0x088)
9911 +#define DPSW_CMDID_FDB_GET_ATTR DPSW_CMD_ID(0x089)
9912 +
9913 +#define DPSW_CMDID_ACL_ADD DPSW_CMD_ID(0x090)
9914 +#define DPSW_CMDID_ACL_REMOVE DPSW_CMD_ID(0x091)
9915 +#define DPSW_CMDID_ACL_ADD_ENTRY DPSW_CMD_ID(0x092)
9916 +#define DPSW_CMDID_ACL_REMOVE_ENTRY DPSW_CMD_ID(0x093)
9917 +#define DPSW_CMDID_ACL_ADD_IF DPSW_CMD_ID(0x094)
9918 +#define DPSW_CMDID_ACL_REMOVE_IF DPSW_CMD_ID(0x095)
9919 +#define DPSW_CMDID_ACL_GET_ATTR DPSW_CMD_ID(0x096)
9920 +
9921 +#define DPSW_CMDID_CTRL_IF_GET_ATTR DPSW_CMD_ID(0x0A0)
9922 +#define DPSW_CMDID_CTRL_IF_SET_POOLS DPSW_CMD_ID(0x0A1)
9923 +#define DPSW_CMDID_CTRL_IF_ENABLE DPSW_CMD_ID(0x0A2)
9924 +#define DPSW_CMDID_CTRL_IF_DISABLE DPSW_CMD_ID(0x0A3)
9925 +
9926 +/* Macros for accessing command fields smaller than 1byte */
9927 +#define DPSW_MASK(field) \
9928 + GENMASK(DPSW_##field##_SHIFT + DPSW_##field##_SIZE - 1, \
9929 + DPSW_##field##_SHIFT)
9930 +#define dpsw_set_field(var, field, val) \
9931 + ((var) |= (((val) << DPSW_##field##_SHIFT) & DPSW_MASK(field)))
9932 +#define dpsw_get_field(var, field) \
9933 + (((var) & DPSW_MASK(field)) >> DPSW_##field##_SHIFT)
9934 +#define dpsw_get_bit(var, bit) \
9935 + (((var) >> (bit)) & GENMASK(0, 0))
9936 +
9937 +static inline u64 dpsw_set_bit(u64 var, unsigned int bit, u8 val)
9938 +{
9939 + var |= (u64)val << bit & GENMASK(bit, bit);
9940 + return var;
9941 +}
9942 +
9943 +struct dpsw_cmd_open {
9944 + __le32 dpsw_id;
9945 +};
9946 +
9947 +#define DPSW_COMPONENT_TYPE_SHIFT 0
9948 +#define DPSW_COMPONENT_TYPE_SIZE 4
9949 +
9950 +struct dpsw_cmd_create {
9951 + /* cmd word 0 */
9952 + __le16 num_ifs;
9953 + u8 max_fdbs;
9954 + u8 max_meters_per_if;
9955 + /* from LSB: only the first 4 bits */
9956 + u8 component_type;
9957 + u8 pad[3];
9958 + /* cmd word 1 */
9959 + __le16 max_vlans;
9960 + __le16 max_fdb_entries;
9961 + __le16 fdb_aging_time;
9962 + __le16 max_fdb_mc_groups;
9963 + /* cmd word 2 */
9964 + __le64 options;
9965 +};
9966 +
9967 +struct dpsw_cmd_destroy {
9968 + __le32 dpsw_id;
9969 +};
9970 +
9971 +#define DPSW_ENABLE_SHIFT 0
9972 +#define DPSW_ENABLE_SIZE 1
9973 +
9974 +struct dpsw_rsp_is_enabled {
9975 + /* from LSB: enable:1 */
9976 + u8 enabled;
9977 +};
9978 +
9979 +struct dpsw_cmd_set_irq {
9980 + /* cmd word 0 */
9981 + u8 irq_index;
9982 + u8 pad[3];
9983 + __le32 irq_val;
9984 + /* cmd word 1 */
9985 + __le64 irq_addr;
9986 + /* cmd word 2 */
9987 + __le32 irq_num;
9988 +};
9989 +
9990 +struct dpsw_cmd_get_irq {
9991 + __le32 pad;
9992 + u8 irq_index;
9993 +};
9994 +
9995 +struct dpsw_rsp_get_irq {
9996 + /* cmd word 0 */
9997 + __le32 irq_val;
9998 + __le32 pad;
9999 + /* cmd word 1 */
10000 + __le64 irq_addr;
10001 + /* cmd word 2 */
10002 + __le32 irq_num;
10003 + __le32 irq_type;
10004 +};
10005 +
10006 +struct dpsw_cmd_set_irq_enable {
10007 + u8 enable_state;
10008 + u8 pad[3];
10009 + u8 irq_index;
10010 +};
10011 +
10012 +struct dpsw_cmd_get_irq_enable {
10013 + __le32 pad;
10014 + u8 irq_index;
10015 +};
10016 +
10017 +struct dpsw_rsp_get_irq_enable {
10018 + u8 enable_state;
10019 +};
10020 +
10021 +struct dpsw_cmd_set_irq_mask {
10022 + __le32 mask;
10023 + u8 irq_index;
10024 +};
10025 +
10026 +struct dpsw_cmd_get_irq_mask {
10027 + __le32 pad;
10028 + u8 irq_index;
10029 +};
10030 +
10031 +struct dpsw_rsp_get_irq_mask {
10032 + __le32 mask;
10033 +};
10034 +
10035 +struct dpsw_cmd_get_irq_status {
10036 + __le32 status;
10037 + u8 irq_index;
10038 +};
10039 +
10040 +struct dpsw_rsp_get_irq_status {
10041 + __le32 status;
10042 +};
10043 +
10044 +struct dpsw_cmd_clear_irq_status {
10045 + __le32 status;
10046 + u8 irq_index;
10047 +};
10048 +
10049 +#define DPSW_COMPONENT_TYPE_SHIFT 0
10050 +#define DPSW_COMPONENT_TYPE_SIZE 4
10051 +
10052 +struct dpsw_rsp_get_attr {
10053 + /* cmd word 0 */
10054 + __le16 num_ifs;
10055 + u8 max_fdbs;
10056 + u8 num_fdbs;
10057 + __le16 max_vlans;
10058 + __le16 num_vlans;
10059 + /* cmd word 1 */
10060 + __le16 max_fdb_entries;
10061 + __le16 fdb_aging_time;
10062 + __le32 dpsw_id;
10063 + /* cmd word 2 */
10064 + __le16 mem_size;
10065 + __le16 max_fdb_mc_groups;
10066 + u8 max_meters_per_if;
10067 + /* from LSB only the ffirst 4 bits */
10068 + u8 component_type;
10069 + __le16 pad;
10070 + /* cmd word 3 */
10071 + __le64 options;
10072 +};
10073 +
10074 +struct dpsw_cmd_set_reflection_if {
10075 + __le16 if_id;
10076 +};
10077 +
10078 +struct dpsw_cmd_if_set_flooding {
10079 + __le16 if_id;
10080 + /* from LSB: enable:1 */
10081 + u8 enable;
10082 +};
10083 +
10084 +struct dpsw_cmd_if_set_broadcast {
10085 + __le16 if_id;
10086 + /* from LSB: enable:1 */
10087 + u8 enable;
10088 +};
10089 +
10090 +struct dpsw_cmd_if_set_multicast {
10091 + __le16 if_id;
10092 + /* from LSB: enable:1 */
10093 + u8 enable;
10094 +};
10095 +
10096 +#define DPSW_VLAN_ID_SHIFT 0
10097 +#define DPSW_VLAN_ID_SIZE 12
10098 +#define DPSW_DEI_SHIFT 12
10099 +#define DPSW_DEI_SIZE 1
10100 +#define DPSW_PCP_SHIFT 13
10101 +#define DPSW_PCP_SIZE 3
10102 +
10103 +struct dpsw_cmd_if_set_tci {
10104 + __le16 if_id;
10105 + /* from LSB: VLAN_ID:12 DEI:1 PCP:3 */
10106 + __le16 conf;
10107 +};
10108 +
10109 +struct dpsw_cmd_if_get_tci {
10110 + __le16 if_id;
10111 +};
10112 +
10113 +struct dpsw_rsp_if_get_tci {
10114 + __le16 pad;
10115 + __le16 vlan_id;
10116 + u8 dei;
10117 + u8 pcp;
10118 +};
10119 +
10120 +#define DPSW_STATE_SHIFT 0
10121 +#define DPSW_STATE_SIZE 4
10122 +
10123 +struct dpsw_cmd_if_set_stp {
10124 + __le16 if_id;
10125 + __le16 vlan_id;
10126 + /* only the first LSB 4 bits */
10127 + u8 state;
10128 +};
10129 +
10130 +#define DPSW_FRAME_TYPE_SHIFT 0
10131 +#define DPSW_FRAME_TYPE_SIZE 4
10132 +#define DPSW_UNACCEPTED_ACT_SHIFT 4
10133 +#define DPSW_UNACCEPTED_ACT_SIZE 4
10134 +
10135 +struct dpsw_cmd_if_set_accepted_frames {
10136 + __le16 if_id;
10137 + /* from LSB: type:4 unaccepted_act:4 */
10138 + u8 unaccepted;
10139 +};
10140 +
10141 +#define DPSW_ACCEPT_ALL_SHIFT 0
10142 +#define DPSW_ACCEPT_ALL_SIZE 1
10143 +
10144 +struct dpsw_cmd_if_set_accept_all_vlan {
10145 + __le16 if_id;
10146 + /* only the least significant bit */
10147 + u8 accept_all;
10148 +};
10149 +
10150 +#define DPSW_COUNTER_TYPE_SHIFT 0
10151 +#define DPSW_COUNTER_TYPE_SIZE 5
10152 +
10153 +struct dpsw_cmd_if_get_counter {
10154 + __le16 if_id;
10155 + /* from LSB: type:5 */
10156 + u8 type;
10157 +};
10158 +
10159 +struct dpsw_rsp_if_get_counter {
10160 + __le64 pad;
10161 + __le64 counter;
10162 +};
10163 +
10164 +struct dpsw_cmd_if_set_counter {
10165 + /* cmd word 0 */
10166 + __le16 if_id;
10167 + /* from LSB: type:5 */
10168 + u8 type;
10169 + /* cmd word 1 */
10170 + __le64 counter;
10171 +};
10172 +
10173 +#define DPSW_PRIORITY_SELECTOR_SHIFT 0
10174 +#define DPSW_PRIORITY_SELECTOR_SIZE 3
10175 +#define DPSW_SCHED_MODE_SHIFT 0
10176 +#define DPSW_SCHED_MODE_SIZE 4
10177 +
10178 +struct dpsw_cmd_if_set_tx_selection {
10179 + __le16 if_id;
10180 + /* from LSB: priority_selector:3 */
10181 + u8 priority_selector;
10182 + u8 pad[5];
10183 + u8 tc_id[8];
10184 +
10185 + struct dpsw_tc_sched {
10186 + __le16 delta_bandwidth;
10187 + u8 mode;
10188 + u8 pad;
10189 + } tc_sched[8];
10190 +};
10191 +
10192 +#define DPSW_FILTER_SHIFT 0
10193 +#define DPSW_FILTER_SIZE 2
10194 +
10195 +struct dpsw_cmd_if_reflection {
10196 + __le16 if_id;
10197 + __le16 vlan_id;
10198 + /* only 2 bits from the LSB */
10199 + u8 filter;
10200 +};
10201 +
10202 +#define DPSW_MODE_SHIFT 0
10203 +#define DPSW_MODE_SIZE 4
10204 +#define DPSW_UNITS_SHIFT 4
10205 +#define DPSW_UNITS_SIZE 4
10206 +
10207 +struct dpsw_cmd_if_set_flooding_metering {
10208 + /* cmd word 0 */
10209 + __le16 if_id;
10210 + u8 pad;
10211 + /* from LSB: mode:4 units:4 */
10212 + u8 mode_units;
10213 + __le32 cir;
10214 + /* cmd word 1 */
10215 + __le32 eir;
10216 + __le32 cbs;
10217 + /* cmd word 2 */
10218 + __le32 ebs;
10219 +};
10220 +
10221 +struct dpsw_cmd_if_set_metering {
10222 + /* cmd word 0 */
10223 + __le16 if_id;
10224 + u8 tc_id;
10225 + /* from LSB: mode:4 units:4 */
10226 + u8 mode_units;
10227 + __le32 cir;
10228 + /* cmd word 1 */
10229 + __le32 eir;
10230 + __le32 cbs;
10231 + /* cmd word 2 */
10232 + __le32 ebs;
10233 +};
10234 +
10235 +#define DPSW_EARLY_DROP_MODE_SHIFT 0
10236 +#define DPSW_EARLY_DROP_MODE_SIZE 2
10237 +#define DPSW_EARLY_DROP_UNIT_SHIFT 2
10238 +#define DPSW_EARLY_DROP_UNIT_SIZE 2
10239 +
10240 +struct dpsw_prep_early_drop {
10241 + /* from LSB: mode:2 units:2 */
10242 + u8 conf;
10243 + u8 pad0[3];
10244 + __le32 tail_drop_threshold;
10245 + u8 green_drop_probability;
10246 + u8 pad1[7];
10247 + __le64 green_max_threshold;
10248 + __le64 green_min_threshold;
10249 + __le64 pad2;
10250 + u8 yellow_drop_probability;
10251 + u8 pad3[7];
10252 + __le64 yellow_max_threshold;
10253 + __le64 yellow_min_threshold;
10254 +};
10255 +
10256 +struct dpsw_cmd_if_set_early_drop {
10257 + /* cmd word 0 */
10258 + u8 pad0;
10259 + u8 tc_id;
10260 + __le16 if_id;
10261 + __le32 pad1;
10262 + /* cmd word 1 */
10263 + __le64 early_drop_iova;
10264 +};
10265 +
10266 +struct dpsw_cmd_custom_tpid {
10267 + __le16 pad;
10268 + __le16 tpid;
10269 +};
10270 +
10271 +struct dpsw_cmd_if {
10272 + __le16 if_id;
10273 +};
10274 +
10275 +#define DPSW_ADMIT_UNTAGGED_SHIFT 0
10276 +#define DPSW_ADMIT_UNTAGGED_SIZE 4
10277 +#define DPSW_ENABLED_SHIFT 5
10278 +#define DPSW_ENABLED_SIZE 1
10279 +#define DPSW_ACCEPT_ALL_VLAN_SHIFT 6
10280 +#define DPSW_ACCEPT_ALL_VLAN_SIZE 1
10281 +
10282 +struct dpsw_rsp_if_get_attr {
10283 + /* cmd word 0 */
10284 + /* from LSB: admit_untagged:4 enabled:1 accept_all_vlan:1 */
10285 + u8 conf;
10286 + u8 pad1;
10287 + u8 num_tcs;
10288 + u8 pad2;
10289 + __le16 qdid;
10290 + /* cmd word 1 */
10291 + __le32 options;
10292 + __le32 pad3;
10293 + /* cmd word 2 */
10294 + __le32 rate;
10295 +};
10296 +
10297 +struct dpsw_cmd_if_set_max_frame_length {
10298 + __le16 if_id;
10299 + __le16 frame_length;
10300 +};
10301 +
10302 +struct dpsw_cmd_if_get_max_frame_length {
10303 + __le16 if_id;
10304 +};
10305 +
10306 +struct dpsw_rsp_if_get_max_frame_length {
10307 + __le16 pad;
10308 + __le16 frame_length;
10309 +};
10310 +
10311 +struct dpsw_cmd_if_set_link_cfg {
10312 + /* cmd word 0 */
10313 + __le16 if_id;
10314 + u8 pad[6];
10315 + /* cmd word 1 */
10316 + __le32 rate;
10317 + __le32 pad1;
10318 + /* cmd word 2 */
10319 + __le64 options;
10320 +};
10321 +
10322 +struct dpsw_cmd_if_get_link_state {
10323 + __le16 if_id;
10324 +};
10325 +
10326 +#define DPSW_UP_SHIFT 0
10327 +#define DPSW_UP_SIZE 1
10328 +
10329 +struct dpsw_rsp_if_get_link_state {
10330 + /* cmd word 0 */
10331 + __le32 pad0;
10332 + u8 up;
10333 + u8 pad1[3];
10334 + /* cmd word 1 */
10335 + __le32 rate;
10336 + __le32 pad2;
10337 + /* cmd word 2 */
10338 + __le64 options;
10339 +};
10340 +
10341 +struct dpsw_vlan_add {
10342 + __le16 fdb_id;
10343 + __le16 vlan_id;
10344 +};
10345 +
10346 +struct dpsw_cmd_vlan_manage_if {
10347 + /* cmd word 0 */
10348 + __le16 pad0;
10349 + __le16 vlan_id;
10350 + __le32 pad1;
10351 + /* cmd word 1 */
10352 + __le64 if_id[4];
10353 +};
10354 +
10355 +struct dpsw_cmd_vlan_remove {
10356 + __le16 pad;
10357 + __le16 vlan_id;
10358 +};
10359 +
10360 +struct dpsw_cmd_vlan_get_attr {
10361 + __le16 vlan_id;
10362 +};
10363 +
10364 +struct dpsw_rsp_vlan_get_attr {
10365 + /* cmd word 0 */
10366 + __le64 pad;
10367 + /* cmd word 1 */
10368 + __le16 fdb_id;
10369 + __le16 num_ifs;
10370 + __le16 num_untagged_ifs;
10371 + __le16 num_flooding_ifs;
10372 +};
10373 +
10374 +struct dpsw_cmd_vlan_get_if {
10375 + __le16 vlan_id;
10376 +};
10377 +
10378 +struct dpsw_rsp_vlan_get_if {
10379 + /* cmd word 0 */
10380 + __le16 pad0;
10381 + __le16 num_ifs;
10382 + u8 pad1[4];
10383 + /* cmd word 1 */
10384 + __le64 if_id[4];
10385 +};
10386 +
10387 +struct dpsw_cmd_vlan_get_if_untagged {
10388 + __le16 vlan_id;
10389 +};
10390 +
10391 +struct dpsw_rsp_vlan_get_if_untagged {
10392 + /* cmd word 0 */
10393 + __le16 pad0;
10394 + __le16 num_ifs;
10395 + u8 pad1[4];
10396 + /* cmd word 1 */
10397 + __le64 if_id[4];
10398 +};
10399 +
10400 +struct dpsw_cmd_vlan_get_if_flooding {
10401 + __le16 vlan_id;
10402 +};
10403 +
10404 +struct dpsw_rsp_vlan_get_if_flooding {
10405 + /* cmd word 0 */
10406 + __le16 pad0;
10407 + __le16 num_ifs;
10408 + u8 pad1[4];
10409 + /* cmd word 1 */
10410 + __le64 if_id[4];
10411 +};
10412 +
10413 +struct dpsw_cmd_fdb_add {
10414 + __le32 pad;
10415 + __le16 fdb_aging_time;
10416 + __le16 num_fdb_entries;
10417 +};
10418 +
10419 +struct dpsw_rsp_fdb_add {
10420 + __le16 fdb_id;
10421 +};
10422 +
10423 +struct dpsw_cmd_fdb_remove {
10424 + __le16 fdb_id;
10425 +};
10426 +
10427 +#define DPSW_ENTRY_TYPE_SHIFT 0
10428 +#define DPSW_ENTRY_TYPE_SIZE 4
10429 +
10430 +struct dpsw_cmd_fdb_add_unicast {
10431 + /* cmd word 0 */
10432 + __le16 fdb_id;
10433 + u8 mac_addr[6];
10434 + /* cmd word 1 */
10435 + u8 if_egress;
10436 + u8 pad;
10437 + /* only the first 4 bits from LSB */
10438 + u8 type;
10439 +};
10440 +
10441 +struct dpsw_cmd_fdb_get_unicast {
10442 + __le16 fdb_id;
10443 + u8 mac_addr[6];
10444 +};
10445 +
10446 +struct dpsw_rsp_fdb_get_unicast {
10447 + __le64 pad;
10448 + __le16 if_egress;
10449 + /* only first 4 bits from LSB */
10450 + u8 type;
10451 +};
10452 +
10453 +struct dpsw_cmd_fdb_remove_unicast {
10454 + /* cmd word 0 */
10455 + __le16 fdb_id;
10456 + u8 mac_addr[6];
10457 + /* cmd word 1 */
10458 + __le16 if_egress;
10459 + /* only the first 4 bits from LSB */
10460 + u8 type;
10461 +};
10462 +
10463 +struct dpsw_cmd_fdb_add_multicast {
10464 + /* cmd word 0 */
10465 + __le16 fdb_id;
10466 + __le16 num_ifs;
10467 + /* only the first 4 bits from LSB */
10468 + u8 type;
10469 + u8 pad[3];
10470 + /* cmd word 1 */
10471 + u8 mac_addr[6];
10472 + __le16 pad2;
10473 + /* cmd word 2 */
10474 + __le64 if_id[4];
10475 +};
10476 +
10477 +struct dpsw_cmd_fdb_get_multicast {
10478 + __le16 fdb_id;
10479 + u8 mac_addr[6];
10480 +};
10481 +
10482 +struct dpsw_rsp_fdb_get_multicast {
10483 + /* cmd word 0 */
10484 + __le64 pad0;
10485 + /* cmd word 1 */
10486 + __le16 num_ifs;
10487 + /* only the first 4 bits from LSB */
10488 + u8 type;
10489 + u8 pad1[5];
10490 + /* cmd word 2 */
10491 + __le64 if_id[4];
10492 +};
10493 +
10494 +struct dpsw_cmd_fdb_remove_multicast {
10495 + /* cmd word 0 */
10496 + __le16 fdb_id;
10497 + __le16 num_ifs;
10498 + /* only the first 4 bits from LSB */
10499 + u8 type;
10500 + u8 pad[3];
10501 + /* cmd word 1 */
10502 + u8 mac_addr[6];
10503 + __le16 pad2;
10504 + /* cmd word 2 */
10505 + __le64 if_id[4];
10506 +};
10507 +
10508 +#define DPSW_LEARNING_MODE_SHIFT 0
10509 +#define DPSW_LEARNING_MODE_SIZE 4
10510 +
10511 +struct dpsw_cmd_fdb_set_learning_mode {
10512 + __le16 fdb_id;
10513 + /* only the first 4 bits from LSB */
10514 + u8 mode;
10515 +};
10516 +
10517 +struct dpsw_cmd_fdb_get_attr {
10518 + __le16 fdb_id;
10519 +};
10520 +
10521 +struct dpsw_rsp_fdb_get_attr {
10522 + /* cmd word 0 */
10523 + __le16 pad;
10524 + __le16 max_fdb_entries;
10525 + __le16 fdb_aging_time;
10526 + __le16 num_fdb_mc_groups;
10527 + /* cmd word 1 */
10528 + __le16 max_fdb_mc_groups;
10529 + /* only the first 4 bits from LSB */
10530 + u8 learning_mode;
10531 +};
10532 +
10533 +struct dpsw_cmd_acl_add {
10534 + __le16 pad;
10535 + __le16 max_entries;
10536 +};
10537 +
10538 +struct dpsw_rsp_acl_add {
10539 + __le16 acl_id;
10540 +};
10541 +
10542 +struct dpsw_cmd_acl_remove {
10543 + __le16 acl_id;
10544 +};
10545 +
10546 +struct dpsw_prep_acl_entry {
10547 + u8 match_l2_dest_mac[6];
10548 + __le16 match_l2_tpid;
10549 +
10550 + u8 match_l2_source_mac[6];
10551 + __le16 match_l2_vlan_id;
10552 +
10553 + __le32 match_l3_dest_ip;
10554 + __le32 match_l3_source_ip;
10555 +
10556 + __le16 match_l4_dest_port;
10557 + __le16 match_l4_source_port;
10558 + __le16 match_l2_ether_type;
10559 + u8 match_l2_pcp_dei;
10560 + u8 match_l3_dscp;
10561 +
10562 + u8 mask_l2_dest_mac[6];
10563 + __le16 mask_l2_tpid;
10564 +
10565 + u8 mask_l2_source_mac[6];
10566 + __le16 mask_l2_vlan_id;
10567 +
10568 + __le32 mask_l3_dest_ip;
10569 + __le32 mask_l3_source_ip;
10570 +
10571 + __le16 mask_l4_dest_port;
10572 + __le16 mask_l4_source_port;
10573 + __le16 mask_l2_ether_type;
10574 + u8 mask_l2_pcp_dei;
10575 + u8 mask_l3_dscp;
10576 +
10577 + u8 match_l3_protocol;
10578 + u8 mask_l3_protocol;
10579 +};
10580 +
10581 +#define DPSW_RESULT_ACTION_SHIFT 0
10582 +#define DPSW_RESULT_ACTION_SIZE 4
10583 +
10584 +struct dpsw_cmd_acl_entry {
10585 + __le16 acl_id;
10586 + __le16 result_if_id;
10587 + __le32 precedence;
10588 + /* from LSB only the first 4 bits */
10589 + u8 result_action;
10590 + u8 pad[7];
10591 + __le64 pad2[4];
10592 + __le64 key_iova;
10593 +};
10594 +
10595 +struct dpsw_cmd_acl_if {
10596 + /* cmd word 0 */
10597 + __le16 acl_id;
10598 + __le16 num_ifs;
10599 + __le32 pad;
10600 + /* cmd word 1 */
10601 + __le64 if_id[4];
10602 +};
10603 +
10604 +struct dpsw_cmd_acl_get_attr {
10605 + __le16 acl_id;
10606 +};
10607 +
10608 +struct dpsw_rsp_acl_get_attr {
10609 + /* cmd word 0 */
10610 + __le64 pad;
10611 + /* cmd word 1 */
10612 + __le16 max_entries;
10613 + __le16 num_entries;
10614 + __le16 num_ifs;
10615 +};
10616 +
10617 +struct dpsw_rsp_ctrl_if_get_attr {
10618 + /* cmd word 0 */
10619 + __le64 pad;
10620 + /* cmd word 1 */
10621 + __le32 rx_fqid;
10622 + __le32 rx_err_fqid;
10623 + /* cmd word 2 */
10624 + __le32 tx_err_conf_fqid;
10625 +};
10626 +
10627 +struct dpsw_cmd_ctrl_if_set_pools {
10628 + u8 num_dpbp;
10629 + /* from LSB: POOL0_BACKUP_POOL:1 ... POOL7_BACKUP_POOL */
10630 + u8 backup_pool;
10631 + __le16 pad;
10632 + __le32 dpbp_id[8];
10633 + __le16 buffer_size[8];
10634 +};
10635 +
10636 +struct dpsw_rsp_get_api_version {
10637 + __le16 version_major;
10638 + __le16 version_minor;
10639 +};
10640 +
10641 +#endif /* __FSL_DPSW_CMD_H */
10642 diff --git a/drivers/staging/fsl-dpaa2/ethsw/dpsw.c b/drivers/staging/fsl-dpaa2/ethsw/dpsw.c
10643 new file mode 100644
10644 index 00000000..179e98c8
10645 --- /dev/null
10646 +++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw.c
10647 @@ -0,0 +1,2762 @@
10648 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
10649 + *
10650 + * Redistribution and use in source and binary forms, with or without
10651 + * modification, are permitted provided that the following conditions are met:
10652 + * * Redistributions of source code must retain the above copyright
10653 + * notice, this list of conditions and the following disclaimer.
10654 + * * Redistributions in binary form must reproduce the above copyright
10655 + * notice, this list of conditions and the following disclaimer in the
10656 + * documentation and/or other materials provided with the distribution.
10657 + * * Neither the name of the above-listed copyright holders nor the
10658 + * names of any contributors may be used to endorse or promote products
10659 + * derived from this software without specific prior written permission.
10660 + *
10661 + *
10662 + * ALTERNATIVELY, this software may be distributed under the terms of the
10663 + * GNU General Public License ("GPL") as published by the Free Software
10664 + * Foundation, either version 2 of that License or (at your option) any
10665 + * later version.
10666 + *
10667 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
10668 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
10669 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
10670 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
10671 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
10672 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
10673 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
10674 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
10675 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
10676 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
10677 + * POSSIBILITY OF SUCH DAMAGE.
10678 + */
10679 +#include "../../fsl-mc/include/mc-sys.h"
10680 +#include "../../fsl-mc/include/mc-cmd.h"
10681 +#include "dpsw.h"
10682 +#include "dpsw-cmd.h"
10683 +
10684 +static void build_if_id_bitmap(__le64 *bmap,
10685 + const u16 *id,
10686 + const u16 num_ifs) {
10687 + int i;
10688 +
10689 + for (i = 0; (i < num_ifs) && (i < DPSW_MAX_IF); i++)
10690 + bmap[id[i] / 64] = dpsw_set_bit(bmap[id[i] / 64],
10691 + (id[i] % 64),
10692 + 1);
10693 +}
10694 +
10695 +static void read_if_id_bitmap(u16 *if_id,
10696 + u16 *num_ifs,
10697 + __le64 *bmap) {
10698 + int bitmap[DPSW_MAX_IF] = { 0 };
10699 + int i, j = 0;
10700 + int count = 0;
10701 +
10702 + for (i = 0; i < DPSW_MAX_IF; i++) {
10703 + bitmap[i] = dpsw_get_bit(le64_to_cpu(bmap[i / 64]),
10704 + i % 64);
10705 + count += bitmap[i];
10706 + }
10707 +
10708 + *num_ifs = (u16)count;
10709 +
10710 + for (i = 0; (i < DPSW_MAX_IF) && (j < count); i++) {
10711 + if (bitmap[i]) {
10712 + if_id[j] = (u16)i;
10713 + j++;
10714 + }
10715 + }
10716 +}
10717 +
10718 +/**
10719 + * dpsw_open() - Open a control session for the specified object
10720 + * @mc_io: Pointer to MC portal's I/O object
10721 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10722 + * @dpsw_id: DPSW unique ID
10723 + * @token: Returned token; use in subsequent API calls
10724 + *
10725 + * This function can be used to open a control session for an
10726 + * already created object; an object may have been declared in
10727 + * the DPL or by calling the dpsw_create() function.
10728 + * This function returns a unique authentication token,
10729 + * associated with the specific object ID and the specific MC
10730 + * portal; this token must be used in all subsequent commands for
10731 + * this specific object
10732 + *
10733 + * Return: '0' on Success; Error code otherwise.
10734 + */
10735 +int dpsw_open(struct fsl_mc_io *mc_io,
10736 + u32 cmd_flags,
10737 + int dpsw_id,
10738 + u16 *token)
10739 +{
10740 + struct mc_command cmd = { 0 };
10741 + struct dpsw_cmd_open *cmd_params;
10742 + int err;
10743 +
10744 + /* prepare command */
10745 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_OPEN,
10746 + cmd_flags,
10747 + 0);
10748 + cmd_params = (struct dpsw_cmd_open *)cmd.params;
10749 + cmd_params->dpsw_id = cpu_to_le32(dpsw_id);
10750 +
10751 + /* send command to mc*/
10752 + err = mc_send_command(mc_io, &cmd);
10753 + if (err)
10754 + return err;
10755 +
10756 + /* retrieve response parameters */
10757 + *token = mc_cmd_hdr_read_token(&cmd);
10758 +
10759 + return 0;
10760 +}
10761 +
10762 +/**
10763 + * dpsw_close() - Close the control session of the object
10764 + * @mc_io: Pointer to MC portal's I/O object
10765 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10766 + * @token: Token of DPSW object
10767 + *
10768 + * After this function is called, no further operations are
10769 + * allowed on the object without opening a new control session.
10770 + *
10771 + * Return: '0' on Success; Error code otherwise.
10772 + */
10773 +int dpsw_close(struct fsl_mc_io *mc_io,
10774 + u32 cmd_flags,
10775 + u16 token)
10776 +{
10777 + struct mc_command cmd = { 0 };
10778 +
10779 + /* prepare command */
10780 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLOSE,
10781 + cmd_flags,
10782 + token);
10783 +
10784 + /* send command to mc*/
10785 + return mc_send_command(mc_io, &cmd);
10786 +}
10787 +
10788 +/**
10789 + * dpsw_enable() - Enable DPSW functionality
10790 + * @mc_io: Pointer to MC portal's I/O object
10791 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10792 + * @token: Token of DPSW object
10793 + *
10794 + * Return: Completion status. '0' on Success; Error code otherwise.
10795 + */
10796 +int dpsw_enable(struct fsl_mc_io *mc_io,
10797 + u32 cmd_flags,
10798 + u16 token)
10799 +{
10800 + struct mc_command cmd = { 0 };
10801 +
10802 + /* prepare command */
10803 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ENABLE,
10804 + cmd_flags,
10805 + token);
10806 +
10807 + /* send command to mc*/
10808 + return mc_send_command(mc_io, &cmd);
10809 +}
10810 +
10811 +/**
10812 + * dpsw_disable() - Disable DPSW functionality
10813 + * @mc_io: Pointer to MC portal's I/O object
10814 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10815 + * @token: Token of DPSW object
10816 + *
10817 + * Return: Completion status. '0' on Success; Error code otherwise.
10818 + */
10819 +int dpsw_disable(struct fsl_mc_io *mc_io,
10820 + u32 cmd_flags,
10821 + u16 token)
10822 +{
10823 + struct mc_command cmd = { 0 };
10824 +
10825 + /* prepare command */
10826 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_DISABLE,
10827 + cmd_flags,
10828 + token);
10829 +
10830 + /* send command to mc*/
10831 + return mc_send_command(mc_io, &cmd);
10832 +}
10833 +
10834 +/**
10835 + * dpsw_is_enabled() - Check if the DPSW is enabled
10836 + *
10837 + * @mc_io: Pointer to MC portal's I/O object
10838 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10839 + * @token: Token of DPSW object
10840 + * @en: Returns '1' if object is enabled; '0' otherwise
10841 + *
10842 + * Return: '0' on Success; Error code otherwise
10843 + */
10844 +int dpsw_is_enabled(struct fsl_mc_io *mc_io,
10845 + u32 cmd_flags,
10846 + u16 token,
10847 + int *en)
10848 +{
10849 + struct mc_command cmd = { 0 };
10850 + struct dpsw_rsp_is_enabled *cmd_rsp;
10851 + int err;
10852 +
10853 + /* prepare command */
10854 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IS_ENABLED, cmd_flags,
10855 + token);
10856 +
10857 + /* send command to mc*/
10858 + err = mc_send_command(mc_io, &cmd);
10859 + if (err)
10860 + return err;
10861 +
10862 + /* retrieve response parameters */
10863 + cmd_rsp = (struct dpsw_rsp_is_enabled *)cmd.params;
10864 + *en = dpsw_get_field(cmd_rsp->enabled, ENABLE);
10865 +
10866 + return 0;
10867 +}
10868 +
10869 +/**
10870 + * dpsw_reset() - Reset the DPSW, returns the object to initial state.
10871 + * @mc_io: Pointer to MC portal's I/O object
10872 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10873 + * @token: Token of DPSW object
10874 + *
10875 + * Return: '0' on Success; Error code otherwise.
10876 + */
10877 +int dpsw_reset(struct fsl_mc_io *mc_io,
10878 + u32 cmd_flags,
10879 + u16 token)
10880 +{
10881 + struct mc_command cmd = { 0 };
10882 +
10883 + /* prepare command */
10884 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_RESET,
10885 + cmd_flags,
10886 + token);
10887 +
10888 + /* send command to mc*/
10889 + return mc_send_command(mc_io, &cmd);
10890 +}
10891 +
10892 +/**
10893 + * dpsw_set_irq() - Set IRQ information for the DPSW to trigger an interrupt.
10894 + * @mc_io: Pointer to MC portal's I/O object
10895 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10896 + * @token: Token of DPSW object
10897 + * @irq_index: Identifies the interrupt index to configure
10898 + * @irq_cfg: IRQ configuration
10899 + *
10900 + * Return: '0' on Success; Error code otherwise.
10901 + */
10902 +int dpsw_set_irq(struct fsl_mc_io *mc_io,
10903 + u32 cmd_flags,
10904 + u16 token,
10905 + u8 irq_index,
10906 + struct dpsw_irq_cfg *irq_cfg)
10907 +{
10908 + struct mc_command cmd = { 0 };
10909 + struct dpsw_cmd_set_irq *cmd_params;
10910 +
10911 + /* prepare command */
10912 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ,
10913 + cmd_flags,
10914 + token);
10915 + cmd_params = (struct dpsw_cmd_set_irq *)cmd.params;
10916 + cmd_params->irq_index = irq_index;
10917 + cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
10918 + cmd_params->irq_addr = cpu_to_le64(irq_cfg->addr);
10919 + cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
10920 +
10921 + /* send command to mc*/
10922 + return mc_send_command(mc_io, &cmd);
10923 +}
10924 +
10925 +/**
10926 + * dpsw_get_irq() - Get IRQ information from the DPSW
10927 + *
10928 + * @mc_io: Pointer to MC portal's I/O object
10929 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10930 + * @token: Token of DPSW object
10931 + * @irq_index: The interrupt index to configure
10932 + * @type: Interrupt type: 0 represents message interrupt
10933 + * type (both irq_addr and irq_val are valid)
10934 + * @irq_cfg: IRQ attributes
10935 + *
10936 + * Return: '0' on Success; Error code otherwise.
10937 + */
10938 +int dpsw_get_irq(struct fsl_mc_io *mc_io,
10939 + u32 cmd_flags,
10940 + u16 token,
10941 + u8 irq_index,
10942 + int *type,
10943 + struct dpsw_irq_cfg *irq_cfg)
10944 +{
10945 + struct mc_command cmd = { 0 };
10946 + struct dpsw_cmd_get_irq *cmd_params;
10947 + struct dpsw_rsp_get_irq *rsp_params;
10948 + int err;
10949 +
10950 + /* prepare command */
10951 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ,
10952 + cmd_flags,
10953 + token);
10954 + cmd_params = (struct dpsw_cmd_get_irq *)cmd.params;
10955 + cmd_params->irq_index = irq_index;
10956 +
10957 + /* send command to mc*/
10958 + err = mc_send_command(mc_io, &cmd);
10959 + if (err)
10960 + return err;
10961 +
10962 + /* retrieve response parameters */
10963 + rsp_params = (struct dpsw_rsp_get_irq *)cmd.params;
10964 + irq_cfg->addr = le64_to_cpu(rsp_params->irq_addr);
10965 + irq_cfg->val = le32_to_cpu(rsp_params->irq_val);
10966 + irq_cfg->irq_num = le32_to_cpu(rsp_params->irq_num);
10967 + *type = le32_to_cpu(rsp_params->irq_type);
10968 +
10969 + return 0;
10970 +}
10971 +
10972 +/**
10973 + * dpsw_set_irq_enable() - Set overall interrupt state.
10974 + * @mc_io: Pointer to MC portal's I/O object
10975 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10976 + * @token: Token of DPCI object
10977 + * @irq_index: The interrupt index to configure
10978 + * @en: Interrupt state - enable = 1, disable = 0
10979 + *
10980 + * Allows GPP software to control when interrupts are generated.
10981 + * Each interrupt can have up to 32 causes. The enable/disable control's the
10982 + * overall interrupt state. if the interrupt is disabled no causes will cause
10983 + * an interrupt
10984 + *
10985 + * Return: '0' on Success; Error code otherwise.
10986 + */
10987 +int dpsw_set_irq_enable(struct fsl_mc_io *mc_io,
10988 + u32 cmd_flags,
10989 + u16 token,
10990 + u8 irq_index,
10991 + u8 en)
10992 +{
10993 + struct mc_command cmd = { 0 };
10994 + struct dpsw_cmd_set_irq_enable *cmd_params;
10995 +
10996 + /* prepare command */
10997 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_ENABLE,
10998 + cmd_flags,
10999 + token);
11000 + cmd_params = (struct dpsw_cmd_set_irq_enable *)cmd.params;
11001 + dpsw_set_field(cmd_params->enable_state, ENABLE, en);
11002 + cmd_params->irq_index = irq_index;
11003 +
11004 + /* send command to mc*/
11005 + return mc_send_command(mc_io, &cmd);
11006 +}
11007 +
11008 +/**
11009 + * dpsw_set_irq_mask() - Set interrupt mask.
11010 + * @mc_io: Pointer to MC portal's I/O object
11011 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11012 + * @token: Token of DPCI object
11013 + * @irq_index: The interrupt index to configure
11014 + * @mask: Event mask to trigger interrupt;
11015 + * each bit:
11016 + * 0 = ignore event
11017 + * 1 = consider event for asserting IRQ
11018 + *
11019 + * Every interrupt can have up to 32 causes and the interrupt model supports
11020 + * masking/unmasking each cause independently
11021 + *
11022 + * Return: '0' on Success; Error code otherwise.
11023 + */
11024 +int dpsw_set_irq_mask(struct fsl_mc_io *mc_io,
11025 + u32 cmd_flags,
11026 + u16 token,
11027 + u8 irq_index,
11028 + u32 mask)
11029 +{
11030 + struct mc_command cmd = { 0 };
11031 + struct dpsw_cmd_set_irq_mask *cmd_params;
11032 +
11033 + /* prepare command */
11034 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_MASK,
11035 + cmd_flags,
11036 + token);
11037 + cmd_params = (struct dpsw_cmd_set_irq_mask *)cmd.params;
11038 + cmd_params->mask = cpu_to_le32(mask);
11039 + cmd_params->irq_index = irq_index;
11040 +
11041 + /* send command to mc*/
11042 + return mc_send_command(mc_io, &cmd);
11043 +}
11044 +
11045 +/**
11046 + * dpsw_get_irq_status() - Get the current status of any pending interrupts
11047 + * @mc_io: Pointer to MC portal's I/O object
11048 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11049 + * @token: Token of DPSW object
11050 + * @irq_index: The interrupt index to configure
11051 + * @status: Returned interrupts status - one bit per cause:
11052 + * 0 = no interrupt pending
11053 + * 1 = interrupt pending
11054 + *
11055 + * Return: '0' on Success; Error code otherwise.
11056 + */
11057 +int dpsw_get_irq_status(struct fsl_mc_io *mc_io,
11058 + u32 cmd_flags,
11059 + u16 token,
11060 + u8 irq_index,
11061 + u32 *status)
11062 +{
11063 + struct mc_command cmd = { 0 };
11064 + struct dpsw_cmd_get_irq_status *cmd_params;
11065 + struct dpsw_rsp_get_irq_status *rsp_params;
11066 + int err;
11067 +
11068 + /* prepare command */
11069 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ_STATUS,
11070 + cmd_flags,
11071 + token);
11072 + cmd_params = (struct dpsw_cmd_get_irq_status *)cmd.params;
11073 + cmd_params->status = cpu_to_le32(*status);
11074 + cmd_params->irq_index = irq_index;
11075 +
11076 + /* send command to mc*/
11077 + err = mc_send_command(mc_io, &cmd);
11078 + if (err)
11079 + return err;
11080 +
11081 + /* retrieve response parameters */
11082 + rsp_params = (struct dpsw_rsp_get_irq_status *)cmd.params;
11083 + *status = le32_to_cpu(rsp_params->status);
11084 +
11085 + return 0;
11086 +}
11087 +
11088 +/**
11089 + * dpsw_clear_irq_status() - Clear a pending interrupt's status
11090 + * @mc_io: Pointer to MC portal's I/O object
11091 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11092 + * @token: Token of DPCI object
11093 + * @irq_index: The interrupt index to configure
11094 + * @status: bits to clear (W1C) - one bit per cause:
11095 + * 0 = don't change
11096 + * 1 = clear status bit
11097 + *
11098 + * Return: '0' on Success; Error code otherwise.
11099 + */
11100 +int dpsw_clear_irq_status(struct fsl_mc_io *mc_io,
11101 + u32 cmd_flags,
11102 + u16 token,
11103 + u8 irq_index,
11104 + u32 status)
11105 +{
11106 + struct mc_command cmd = { 0 };
11107 + struct dpsw_cmd_clear_irq_status *cmd_params;
11108 +
11109 + /* prepare command */
11110 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLEAR_IRQ_STATUS,
11111 + cmd_flags,
11112 + token);
11113 + cmd_params = (struct dpsw_cmd_clear_irq_status *)cmd.params;
11114 + cmd_params->status = cpu_to_le32(status);
11115 + cmd_params->irq_index = irq_index;
11116 +
11117 + /* send command to mc*/
11118 + return mc_send_command(mc_io, &cmd);
11119 +}
11120 +
11121 +/**
11122 + * dpsw_get_attributes() - Retrieve DPSW attributes
11123 + * @mc_io: Pointer to MC portal's I/O object
11124 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11125 + * @token: Token of DPSW object
11126 + * @attr: Returned DPSW attributes
11127 + *
11128 + * Return: Completion status. '0' on Success; Error code otherwise.
11129 + */
11130 +int dpsw_get_attributes(struct fsl_mc_io *mc_io,
11131 + u32 cmd_flags,
11132 + u16 token,
11133 + struct dpsw_attr *attr)
11134 +{
11135 + struct mc_command cmd = { 0 };
11136 + struct dpsw_rsp_get_attr *rsp_params;
11137 + int err;
11138 +
11139 + /* prepare command */
11140 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_ATTR,
11141 + cmd_flags,
11142 + token);
11143 +
11144 + /* send command to mc*/
11145 + err = mc_send_command(mc_io, &cmd);
11146 + if (err)
11147 + return err;
11148 +
11149 + /* retrieve response parameters */
11150 + rsp_params = (struct dpsw_rsp_get_attr *)cmd.params;
11151 + attr->num_ifs = le16_to_cpu(rsp_params->num_ifs);
11152 + attr->max_fdbs = rsp_params->max_fdbs;
11153 + attr->num_fdbs = rsp_params->num_fdbs;
11154 + attr->max_vlans = le16_to_cpu(rsp_params->max_vlans);
11155 + attr->num_vlans = le16_to_cpu(rsp_params->num_vlans);
11156 + attr->max_fdb_entries = le16_to_cpu(rsp_params->max_fdb_entries);
11157 + attr->fdb_aging_time = le16_to_cpu(rsp_params->fdb_aging_time);
11158 + attr->id = le32_to_cpu(rsp_params->dpsw_id);
11159 + attr->mem_size = le16_to_cpu(rsp_params->mem_size);
11160 + attr->max_fdb_mc_groups = le16_to_cpu(rsp_params->max_fdb_mc_groups);
11161 + attr->max_meters_per_if = rsp_params->max_meters_per_if;
11162 + attr->options = le64_to_cpu(rsp_params->options);
11163 + attr->component_type = dpsw_get_field(rsp_params->component_type,
11164 + COMPONENT_TYPE);
11165 +
11166 + return 0;
11167 +}
11168 +
11169 +/**
11170 + * dpsw_set_reflection_if() - Set target interface for reflected interfaces.
11171 + * @mc_io: Pointer to MC portal's I/O object
11172 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11173 + * @token: Token of DPSW object
11174 + * @if_id: Interface Id
11175 + *
11176 + * Only one reflection receive interface is allowed per switch
11177 + *
11178 + * Return: Completion status. '0' on Success; Error code otherwise.
11179 + */
11180 +int dpsw_set_reflection_if(struct fsl_mc_io *mc_io,
11181 + u32 cmd_flags,
11182 + u16 token,
11183 + u16 if_id)
11184 +{
11185 + struct mc_command cmd = { 0 };
11186 + struct dpsw_cmd_set_reflection_if *cmd_params;
11187 +
11188 + /* prepare command */
11189 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_REFLECTION_IF,
11190 + cmd_flags,
11191 + token);
11192 + cmd_params = (struct dpsw_cmd_set_reflection_if *)cmd.params;
11193 + cmd_params->if_id = cpu_to_le16(if_id);
11194 +
11195 + /* send command to mc*/
11196 + return mc_send_command(mc_io, &cmd);
11197 +}
11198 +
11199 +/**
11200 + * dpsw_if_set_link_cfg() - Set the link configuration.
11201 + * @mc_io: Pointer to MC portal's I/O object
11202 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11203 + * @token: Token of DPSW object
11204 + * @if_id: Interface id
11205 + * @cfg: Link configuration
11206 + *
11207 + * Return: '0' on Success; Error code otherwise.
11208 + */
11209 +int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io,
11210 + u32 cmd_flags,
11211 + u16 token,
11212 + u16 if_id,
11213 + struct dpsw_link_cfg *cfg)
11214 +{
11215 + struct mc_command cmd = { 0 };
11216 + struct dpsw_cmd_if_set_link_cfg *cmd_params;
11217 +
11218 + /* prepare command */
11219 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_LINK_CFG,
11220 + cmd_flags,
11221 + token);
11222 + cmd_params = (struct dpsw_cmd_if_set_link_cfg *)cmd.params;
11223 + cmd_params->if_id = cpu_to_le16(if_id);
11224 + cmd_params->rate = cpu_to_le32(cfg->rate);
11225 + cmd_params->options = cpu_to_le64(cfg->options);
11226 +
11227 + /* send command to mc*/
11228 + return mc_send_command(mc_io, &cmd);
11229 +}
11230 +
11231 +/**
11232 + * dpsw_if_get_link_state - Return the link state
11233 + * @mc_io: Pointer to MC portal's I/O object
11234 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11235 + * @token: Token of DPSW object
11236 + * @if_id: Interface id
11237 + * @state: Link state 1 - linkup, 0 - link down or disconnected
11238 + *
11239 + * @Return '0' on Success; Error code otherwise.
11240 + */
11241 +int dpsw_if_get_link_state(struct fsl_mc_io *mc_io,
11242 + u32 cmd_flags,
11243 + u16 token,
11244 + u16 if_id,
11245 + struct dpsw_link_state *state)
11246 +{
11247 + struct mc_command cmd = { 0 };
11248 + struct dpsw_cmd_if_get_link_state *cmd_params;
11249 + struct dpsw_rsp_if_get_link_state *rsp_params;
11250 + int err;
11251 +
11252 + /* prepare command */
11253 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_LINK_STATE,
11254 + cmd_flags,
11255 + token);
11256 + cmd_params = (struct dpsw_cmd_if_get_link_state *)cmd.params;
11257 + cmd_params->if_id = cpu_to_le16(if_id);
11258 +
11259 + /* send command to mc*/
11260 + err = mc_send_command(mc_io, &cmd);
11261 + if (err)
11262 + return err;
11263 +
11264 + /* retrieve response parameters */
11265 + rsp_params = (struct dpsw_rsp_if_get_link_state *)cmd.params;
11266 + state->rate = le32_to_cpu(rsp_params->rate);
11267 + state->options = le64_to_cpu(rsp_params->options);
11268 + state->up = dpsw_get_field(rsp_params->up, UP);
11269 +
11270 + return 0;
11271 +}
11272 +
11273 +/**
11274 + * dpsw_if_set_flooding() - Enable Disable flooding for particular interface
11275 + * @mc_io: Pointer to MC portal's I/O object
11276 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11277 + * @token: Token of DPSW object
11278 + * @if_id: Interface Identifier
11279 + * @en: 1 - enable, 0 - disable
11280 + *
11281 + * Return: Completion status. '0' on Success; Error code otherwise.
11282 + */
11283 +int dpsw_if_set_flooding(struct fsl_mc_io *mc_io,
11284 + u32 cmd_flags,
11285 + u16 token,
11286 + u16 if_id,
11287 + int en)
11288 +{
11289 + struct mc_command cmd = { 0 };
11290 + struct dpsw_cmd_if_set_flooding *cmd_params;
11291 +
11292 + /* prepare command */
11293 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_FLOODING,
11294 + cmd_flags,
11295 + token);
11296 + cmd_params = (struct dpsw_cmd_if_set_flooding *)cmd.params;
11297 + cmd_params->if_id = cpu_to_le16(if_id);
11298 + dpsw_set_field(cmd_params->enable, ENABLE, en);
11299 +
11300 + /* send command to mc*/
11301 + return mc_send_command(mc_io, &cmd);
11302 +}
11303 +
11304 +/**
11305 + * dpsw_if_set_broadcast() - Enable/disable broadcast for particular interface
11306 + * @mc_io: Pointer to MC portal's I/O object
11307 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11308 + * @token: Token of DPSW object
11309 + * @if_id: Interface Identifier
11310 + * @en: 1 - enable, 0 - disable
11311 + *
11312 + * Return: Completion status. '0' on Success; Error code otherwise.
11313 + */
11314 +int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io,
11315 + u32 cmd_flags,
11316 + u16 token,
11317 + u16 if_id,
11318 + int en)
11319 +{
11320 + struct mc_command cmd = { 0 };
11321 + struct dpsw_cmd_if_set_broadcast *cmd_params;
11322 +
11323 + /* prepare command */
11324 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_BROADCAST,
11325 + cmd_flags,
11326 + token);
11327 + cmd_params = (struct dpsw_cmd_if_set_broadcast *)cmd.params;
11328 + cmd_params->if_id = cpu_to_le16(if_id);
11329 + dpsw_set_field(cmd_params->enable, ENABLE, en);
11330 +
11331 + /* send command to mc*/
11332 + return mc_send_command(mc_io, &cmd);
11333 +}
11334 +
11335 +/**
11336 + * dpsw_if_set_multicast() - Enable/disable multicast for particular interface
11337 + * @mc_io: Pointer to MC portal's I/O object
11338 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11339 + * @token: Token of DPSW object
11340 + * @if_id: Interface Identifier
11341 + * @en: 1 - enable, 0 - disable
11342 + *
11343 + * Return: Completion status. '0' on Success; Error code otherwise.
11344 + */
11345 +int dpsw_if_set_multicast(struct fsl_mc_io *mc_io,
11346 + u32 cmd_flags,
11347 + u16 token,
11348 + u16 if_id,
11349 + int en)
11350 +{
11351 + struct mc_command cmd = { 0 };
11352 + struct dpsw_cmd_if_set_multicast *cmd_params;
11353 +
11354 + /* prepare command */
11355 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_MULTICAST,
11356 + cmd_flags,
11357 + token);
11358 + cmd_params = (struct dpsw_cmd_if_set_multicast *)cmd.params;
11359 + cmd_params->if_id = cpu_to_le16(if_id);
11360 + dpsw_set_field(cmd_params->enable, ENABLE, en);
11361 +
11362 + /* send command to mc*/
11363 + return mc_send_command(mc_io, &cmd);
11364 +}
11365 +
11366 +/**
11367 + * dpsw_if_set_tci() - Set default VLAN Tag Control Information (TCI)
11368 + * @mc_io: Pointer to MC portal's I/O object
11369 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11370 + * @token: Token of DPSW object
11371 + * @if_id: Interface Identifier
11372 + * @cfg: Tag Control Information Configuration
11373 + *
11374 + * Return: Completion status. '0' on Success; Error code otherwise.
11375 + */
11376 +int dpsw_if_set_tci(struct fsl_mc_io *mc_io,
11377 + u32 cmd_flags,
11378 + u16 token,
11379 + u16 if_id,
11380 + const struct dpsw_tci_cfg *cfg)
11381 +{
11382 + struct mc_command cmd = { 0 };
11383 + struct dpsw_cmd_if_set_tci *cmd_params;
11384 +
11385 + /* prepare command */
11386 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_TCI,
11387 + cmd_flags,
11388 + token);
11389 + cmd_params = (struct dpsw_cmd_if_set_tci *)cmd.params;
11390 + cmd_params->if_id = cpu_to_le16(if_id);
11391 + dpsw_set_field(cmd_params->conf, VLAN_ID, cfg->vlan_id);
11392 + dpsw_set_field(cmd_params->conf, DEI, cfg->dei);
11393 + dpsw_set_field(cmd_params->conf, PCP, cfg->pcp);
11394 + cmd_params->conf = cpu_to_le16(cmd_params->conf);
11395 +
11396 + /* send command to mc*/
11397 + return mc_send_command(mc_io, &cmd);
11398 +}
11399 +
11400 +/**
11401 + * dpsw_if_get_tci() - Get default VLAN Tag Control Information (TCI)
11402 + * @mc_io: Pointer to MC portal's I/O object
11403 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11404 + * @token: Token of DPSW object
11405 + * @if_id: Interface Identifier
11406 + * @cfg: Tag Control Information Configuration
11407 + *
11408 + * Return: Completion status. '0' on Success; Error code otherwise.
11409 + */
11410 +int dpsw_if_get_tci(struct fsl_mc_io *mc_io,
11411 + u32 cmd_flags,
11412 + u16 token,
11413 + u16 if_id,
11414 + struct dpsw_tci_cfg *cfg)
11415 +{
11416 + struct mc_command cmd = { 0 };
11417 + struct dpsw_cmd_if_get_tci *cmd_params;
11418 + struct dpsw_rsp_if_get_tci *rsp_params;
11419 + int err;
11420 +
11421 + /* prepare command */
11422 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_TCI,
11423 + cmd_flags,
11424 + token);
11425 + cmd_params = (struct dpsw_cmd_if_get_tci *)cmd.params;
11426 + cmd_params->if_id = cpu_to_le16(if_id);
11427 +
11428 + /* send command to mc*/
11429 + err = mc_send_command(mc_io, &cmd);
11430 + if (err)
11431 + return err;
11432 +
11433 + /* retrieve response parameters */
11434 + rsp_params = (struct dpsw_rsp_if_get_tci *)cmd.params;
11435 + cfg->pcp = rsp_params->pcp;
11436 + cfg->dei = rsp_params->dei;
11437 + cfg->vlan_id = le16_to_cpu(rsp_params->vlan_id);
11438 +
11439 + return 0;
11440 +}
11441 +
11442 +/**
11443 + * dpsw_if_set_stp() - Function sets Spanning Tree Protocol (STP) state.
11444 + * @mc_io: Pointer to MC portal's I/O object
11445 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11446 + * @token: Token of DPSW object
11447 + * @if_id: Interface Identifier
11448 + * @cfg: STP State configuration parameters
11449 + *
11450 + * The following STP states are supported -
11451 + * blocking, listening, learning, forwarding and disabled.
11452 + *
11453 + * Return: Completion status. '0' on Success; Error code otherwise.
11454 + */
11455 +int dpsw_if_set_stp(struct fsl_mc_io *mc_io,
11456 + u32 cmd_flags,
11457 + u16 token,
11458 + u16 if_id,
11459 + const struct dpsw_stp_cfg *cfg)
11460 +{
11461 + struct mc_command cmd = { 0 };
11462 + struct dpsw_cmd_if_set_stp *cmd_params;
11463 +
11464 + /* prepare command */
11465 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_STP,
11466 + cmd_flags,
11467 + token);
11468 + cmd_params = (struct dpsw_cmd_if_set_stp *)cmd.params;
11469 + cmd_params->if_id = cpu_to_le16(if_id);
11470 + cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id);
11471 + dpsw_set_field(cmd_params->state, STATE, cfg->state);
11472 +
11473 + /* send command to mc*/
11474 + return mc_send_command(mc_io, &cmd);
11475 +}
11476 +
11477 +/**
11478 + * dpsw_if_set_accepted_frames()
11479 + * @mc_io: Pointer to MC portal's I/O object
11480 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11481 + * @token: Token of DPSW object
11482 + * @if_id: Interface Identifier
11483 + * @cfg: Frame types configuration
11484 + *
11485 + * When is admit_only_vlan_tagged- the device will discard untagged
11486 + * frames or Priority-Tagged frames received on this interface.
11487 + * When admit_only_untagged- untagged frames or Priority-Tagged
11488 + * frames received on this interface will be accepted and assigned
11489 + * to a VID based on the PVID and VID Set for this interface.
11490 + * When admit_all - the device will accept VLAN tagged, untagged
11491 + * and priority tagged frames.
11492 + * The default is admit_all
11493 + *
11494 + * Return: Completion status. '0' on Success; Error code otherwise.
11495 + */
11496 +int dpsw_if_set_accepted_frames(struct fsl_mc_io *mc_io,
11497 + u32 cmd_flags,
11498 + u16 token,
11499 + u16 if_id,
11500 + const struct dpsw_accepted_frames_cfg *cfg)
11501 +{
11502 + struct mc_command cmd = { 0 };
11503 + struct dpsw_cmd_if_set_accepted_frames *cmd_params;
11504 +
11505 + /* prepare command */
11506 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_ACCEPTED_FRAMES,
11507 + cmd_flags,
11508 + token);
11509 + cmd_params = (struct dpsw_cmd_if_set_accepted_frames *)cmd.params;
11510 + cmd_params->if_id = cpu_to_le16(if_id);
11511 + dpsw_set_field(cmd_params->unaccepted, FRAME_TYPE, cfg->type);
11512 + dpsw_set_field(cmd_params->unaccepted, UNACCEPTED_ACT,
11513 + cfg->unaccept_act);
11514 +
11515 + /* send command to mc*/
11516 + return mc_send_command(mc_io, &cmd);
11517 +}
11518 +
11519 +/**
11520 + * dpsw_if_set_accept_all_vlan()
11521 + * @mc_io: Pointer to MC portal's I/O object
11522 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11523 + * @token: Token of DPSW object
11524 + * @if_id: Interface Identifier
11525 + * @accept_all: Accept or drop frames having different VLAN
11526 + *
11527 + * When this is accept (FALSE), the device will discard incoming
11528 + * frames for VLANs that do not include this interface in its
11529 + * Member set. When accept (TRUE), the interface will accept all incoming frames
11530 + *
11531 + * Return: Completion status. '0' on Success; Error code otherwise.
11532 + */
11533 +int dpsw_if_set_accept_all_vlan(struct fsl_mc_io *mc_io,
11534 + u32 cmd_flags,
11535 + u16 token,
11536 + u16 if_id,
11537 + int accept_all)
11538 +{
11539 + struct mc_command cmd = { 0 };
11540 + struct dpsw_cmd_if_set_accept_all_vlan *cmd_params;
11541 +
11542 + /* prepare command */
11543 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IF_ACCEPT_ALL_VLAN,
11544 + cmd_flags,
11545 + token);
11546 + cmd_params = (struct dpsw_cmd_if_set_accept_all_vlan *)cmd.params;
11547 + cmd_params->if_id = cpu_to_le16(if_id);
11548 + dpsw_set_field(cmd_params->accept_all, ACCEPT_ALL, accept_all);
11549 +
11550 + /* send command to mc*/
11551 + return mc_send_command(mc_io, &cmd);
11552 +}
11553 +
11554 +/**
11555 + * dpsw_if_get_counter() - Get specific counter of particular interface
11556 + * @mc_io: Pointer to MC portal's I/O object
11557 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11558 + * @token: Token of DPSW object
11559 + * @if_id: Interface Identifier
11560 + * @type: Counter type
11561 + * @counter: return value
11562 + *
11563 + * Return: Completion status. '0' on Success; Error code otherwise.
11564 + */
11565 +int dpsw_if_get_counter(struct fsl_mc_io *mc_io,
11566 + u32 cmd_flags,
11567 + u16 token,
11568 + u16 if_id,
11569 + enum dpsw_counter type,
11570 + u64 *counter)
11571 +{
11572 + struct mc_command cmd = { 0 };
11573 + struct dpsw_cmd_if_get_counter *cmd_params;
11574 + struct dpsw_rsp_if_get_counter *rsp_params;
11575 + int err;
11576 +
11577 + /* prepare command */
11578 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_COUNTER,
11579 + cmd_flags,
11580 + token);
11581 + cmd_params = (struct dpsw_cmd_if_get_counter *)cmd.params;
11582 + cmd_params->if_id = cpu_to_le16(if_id);
11583 + dpsw_set_field(cmd_params->type, COUNTER_TYPE, type);
11584 +
11585 + /* send command to mc*/
11586 + err = mc_send_command(mc_io, &cmd);
11587 + if (err)
11588 + return err;
11589 +
11590 + /* retrieve response parameters */
11591 + rsp_params = (struct dpsw_rsp_if_get_counter *)cmd.params;
11592 + *counter = le64_to_cpu(rsp_params->counter);
11593 +
11594 + return 0;
11595 +}
11596 +
11597 +/**
11598 + * dpsw_if_set_counter() - Set specific counter of particular interface
11599 + * @mc_io: Pointer to MC portal's I/O object
11600 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11601 + * @token: Token of DPSW object
11602 + * @if_id: Interface Identifier
11603 + * @type: Counter type
11604 + * @counter: New counter value
11605 + *
11606 + * Return: Completion status. '0' on Success; Error code otherwise.
11607 + */
11608 +int dpsw_if_set_counter(struct fsl_mc_io *mc_io,
11609 + u32 cmd_flags,
11610 + u16 token,
11611 + u16 if_id,
11612 + enum dpsw_counter type,
11613 + u64 counter)
11614 +{
11615 + struct mc_command cmd = { 0 };
11616 + struct dpsw_cmd_if_set_counter *cmd_params;
11617 +
11618 + /* prepare command */
11619 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_COUNTER,
11620 + cmd_flags,
11621 + token);
11622 + cmd_params = (struct dpsw_cmd_if_set_counter *)cmd.params;
11623 + cmd_params->if_id = cpu_to_le16(if_id);
11624 + cmd_params->counter = cpu_to_le64(counter);
11625 + dpsw_set_field(cmd_params->type, COUNTER_TYPE, type);
11626 +
11627 + /* send command to mc*/
11628 + return mc_send_command(mc_io, &cmd);
11629 +}
11630 +
11631 +/**
11632 + * dpsw_if_set_tx_selection() - Function is used for mapping variety
11633 + * of frame fields
11634 + * @mc_io: Pointer to MC portal's I/O object
11635 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11636 + * @token: Token of DPSW object
11637 + * @if_id: Interface Identifier
11638 + * @cfg: Traffic class mapping configuration
11639 + *
11640 + * Function is used for mapping variety of frame fields (DSCP, PCP)
11641 + * to Traffic Class. Traffic class is a number
11642 + * in the range from 0 to 7
11643 + *
11644 + * Return: Completion status. '0' on Success; Error code otherwise.
11645 + */
11646 +int dpsw_if_set_tx_selection(struct fsl_mc_io *mc_io,
11647 + u32 cmd_flags,
11648 + u16 token,
11649 + u16 if_id,
11650 + const struct dpsw_tx_selection_cfg *cfg)
11651 +{
11652 + struct dpsw_cmd_if_set_tx_selection *cmd_params;
11653 + struct mc_command cmd = { 0 };
11654 + int i;
11655 +
11656 + /* prepare command */
11657 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_TX_SELECTION,
11658 + cmd_flags,
11659 + token);
11660 + cmd_params = (struct dpsw_cmd_if_set_tx_selection *)cmd.params;
11661 + cmd_params->if_id = cpu_to_le16(if_id);
11662 + dpsw_set_field(cmd_params->priority_selector, PRIORITY_SELECTOR,
11663 + cfg->priority_selector);
11664 +
11665 + for (i = 0; i < 8; i++) {
11666 + cmd_params->tc_sched[i].delta_bandwidth =
11667 + cpu_to_le16(cfg->tc_sched[i].delta_bandwidth);
11668 + dpsw_set_field(cmd_params->tc_sched[i].mode, SCHED_MODE,
11669 + cfg->tc_sched[i].mode);
11670 + cmd_params->tc_id[i] = cfg->tc_id[i];
11671 + }
11672 +
11673 + /* send command to mc*/
11674 + return mc_send_command(mc_io, &cmd);
11675 +}
11676 +
11677 +/**
11678 + * dpsw_if_add_reflection() - Identify interface to be reflected or mirrored
11679 + * @mc_io: Pointer to MC portal's I/O object
11680 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11681 + * @token: Token of DPSW object
11682 + * @if_id: Interface Identifier
11683 + * @cfg: Reflection configuration
11684 + *
11685 + * Return: Completion status. '0' on Success; Error code otherwise.
11686 + */
11687 +int dpsw_if_add_reflection(struct fsl_mc_io *mc_io,
11688 + u32 cmd_flags,
11689 + u16 token,
11690 + u16 if_id,
11691 + const struct dpsw_reflection_cfg *cfg)
11692 +{
11693 + struct mc_command cmd = { 0 };
11694 + struct dpsw_cmd_if_reflection *cmd_params;
11695 +
11696 + /* prepare command */
11697 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ADD_REFLECTION,
11698 + cmd_flags,
11699 + token);
11700 + cmd_params = (struct dpsw_cmd_if_reflection *)cmd.params;
11701 + cmd_params->if_id = cpu_to_le16(if_id);
11702 + cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id);
11703 + dpsw_set_field(cmd_params->filter, FILTER, cfg->filter);
11704 +
11705 + /* send command to mc*/
11706 + return mc_send_command(mc_io, &cmd);
11707 +}
11708 +
11709 +/**
11710 + * dpsw_if_remove_reflection() - Remove interface to be reflected or mirrored
11711 + * @mc_io: Pointer to MC portal's I/O object
11712 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11713 + * @token: Token of DPSW object
11714 + * @if_id: Interface Identifier
11715 + * @cfg: Reflection configuration
11716 + *
11717 + * Return: Completion status. '0' on Success; Error code otherwise.
11718 + */
11719 +int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io,
11720 + u32 cmd_flags,
11721 + u16 token,
11722 + u16 if_id,
11723 + const struct dpsw_reflection_cfg *cfg)
11724 +{
11725 + struct mc_command cmd = { 0 };
11726 + struct dpsw_cmd_if_reflection *cmd_params;
11727 +
11728 + /* prepare command */
11729 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_REMOVE_REFLECTION,
11730 + cmd_flags,
11731 + token);
11732 + cmd_params = (struct dpsw_cmd_if_reflection *)cmd.params;
11733 + cmd_params->if_id = cpu_to_le16(if_id);
11734 + cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id);
11735 + dpsw_set_field(cmd_params->filter, FILTER, cfg->filter);
11736 +
11737 + /* send command to mc*/
11738 + return mc_send_command(mc_io, &cmd);
11739 +}
11740 +
11741 +/**
11742 + * dpsw_if_set_flooding_metering() - Set flooding metering
11743 + * @mc_io: Pointer to MC portal's I/O object
11744 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11745 + * @token: Token of DPSW object
11746 + * @if_id: Interface Identifier
11747 + * @cfg: Metering parameters
11748 + *
11749 + * Return: Completion status. '0' on Success; Error code otherwise.
11750 + */
11751 +int dpsw_if_set_flooding_metering(struct fsl_mc_io *mc_io,
11752 + u32 cmd_flags,
11753 + u16 token,
11754 + u16 if_id,
11755 + const struct dpsw_metering_cfg *cfg)
11756 +{
11757 + struct mc_command cmd = { 0 };
11758 + struct dpsw_cmd_if_set_flooding_metering *cmd_params;
11759 +
11760 + /* prepare command */
11761 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_FLOODING_METERING,
11762 + cmd_flags,
11763 + token);
11764 + cmd_params = (struct dpsw_cmd_if_set_flooding_metering *)cmd.params;
11765 + cmd_params->if_id = cpu_to_le16(if_id);
11766 + dpsw_set_field(cmd_params->mode_units, MODE, cfg->mode);
11767 + dpsw_set_field(cmd_params->mode_units, UNITS, cfg->units);
11768 + cmd_params->cir = cpu_to_le32(cfg->cir);
11769 + cmd_params->eir = cpu_to_le32(cfg->eir);
11770 + cmd_params->cbs = cpu_to_le32(cfg->cbs);
11771 + cmd_params->ebs = cpu_to_le32(cfg->ebs);
11772 +
11773 + /* send command to mc*/
11774 + return mc_send_command(mc_io, &cmd);
11775 +}
11776 +
11777 +/**
11778 + * dpsw_if_set_metering() - Set interface metering for flooding
11779 + * @mc_io: Pointer to MC portal's I/O object
11780 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11781 + * @token: Token of DPSW object
11782 + * @if_id: Interface Identifier
11783 + * @tc_id: Traffic class ID
11784 + * @cfg: Metering parameters
11785 + *
11786 + * Return: Completion status. '0' on Success; Error code otherwise.
11787 + */
11788 +int dpsw_if_set_metering(struct fsl_mc_io *mc_io,
11789 + u32 cmd_flags,
11790 + u16 token,
11791 + u16 if_id,
11792 + u8 tc_id,
11793 + const struct dpsw_metering_cfg *cfg)
11794 +{
11795 + struct mc_command cmd = { 0 };
11796 + struct dpsw_cmd_if_set_metering *cmd_params;
11797 +
11798 + /* prepare command */
11799 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_METERING,
11800 + cmd_flags,
11801 + token);
11802 + cmd_params = (struct dpsw_cmd_if_set_metering *)cmd.params;
11803 + cmd_params->if_id = cpu_to_le16(if_id);
11804 + cmd_params->tc_id = tc_id;
11805 + dpsw_set_field(cmd_params->mode_units, MODE, cfg->mode);
11806 + dpsw_set_field(cmd_params->mode_units, UNITS, cfg->units);
11807 + cmd_params->cir = cpu_to_le32(cfg->cir);
11808 + cmd_params->eir = cpu_to_le32(cfg->eir);
11809 + cmd_params->cbs = cpu_to_le32(cfg->cbs);
11810 + cmd_params->ebs = cpu_to_le32(cfg->ebs);
11811 +
11812 + /* send command to mc*/
11813 + return mc_send_command(mc_io, &cmd);
11814 +}
11815 +
11816 +/**
11817 + * dpsw_prepare_early_drop() - Prepare an early drop for setting in to interface
11818 + * @cfg: Early-drop configuration
11819 + * @early_drop_buf: Zeroed 256 bytes of memory before mapping it to DMA
11820 + *
11821 + * This function has to be called before dpsw_if_tc_set_early_drop
11822 + *
11823 + */
11824 +void dpsw_prepare_early_drop(const struct dpsw_early_drop_cfg *cfg,
11825 + u8 *early_drop_buf)
11826 +{
11827 + struct dpsw_prep_early_drop *ext_params;
11828 +
11829 + ext_params = (struct dpsw_prep_early_drop *)early_drop_buf;
11830 + dpsw_set_field(ext_params->conf, EARLY_DROP_MODE, cfg->drop_mode);
11831 + dpsw_set_field(ext_params->conf, EARLY_DROP_UNIT, cfg->units);
11832 + ext_params->tail_drop_threshold = cpu_to_le32(cfg->tail_drop_threshold);
11833 + ext_params->green_drop_probability = cfg->green.drop_probability;
11834 + ext_params->green_max_threshold = cpu_to_le64(cfg->green.max_threshold);
11835 + ext_params->green_min_threshold = cpu_to_le64(cfg->green.min_threshold);
11836 + ext_params->yellow_drop_probability = cfg->yellow.drop_probability;
11837 + ext_params->yellow_max_threshold =
11838 + cpu_to_le64(cfg->yellow.max_threshold);
11839 + ext_params->yellow_min_threshold =
11840 + cpu_to_le64(cfg->yellow.min_threshold);
11841 +}
11842 +
11843 +/**
11844 + * dpsw_if_set_early_drop() - Set interface traffic class early-drop
11845 + * configuration
11846 + * @mc_io: Pointer to MC portal's I/O object
11847 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11848 + * @token: Token of DPSW object
11849 + * @if_id: Interface Identifier
11850 + * @tc_id: Traffic class selection (0-7)
11851 + * @early_drop_iova: I/O virtual address of 64 bytes;
11852 + * Must be cacheline-aligned and DMA-able memory
11853 + *
11854 + * warning: Before calling this function, call dpsw_prepare_if_tc_early_drop()
11855 + * to prepare the early_drop_iova parameter
11856 + *
11857 + * Return: '0' on Success; error code otherwise.
11858 + */
11859 +int dpsw_if_set_early_drop(struct fsl_mc_io *mc_io,
11860 + u32 cmd_flags,
11861 + u16 token,
11862 + u16 if_id,
11863 + u8 tc_id,
11864 + u64 early_drop_iova)
11865 +{
11866 + struct mc_command cmd = { 0 };
11867 + struct dpsw_cmd_if_set_early_drop *cmd_params;
11868 +
11869 + /* prepare command */
11870 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_EARLY_DROP,
11871 + cmd_flags,
11872 + token);
11873 + cmd_params = (struct dpsw_cmd_if_set_early_drop *)cmd.params;
11874 + cmd_params->tc_id = tc_id;
11875 + cmd_params->if_id = cpu_to_le16(if_id);
11876 + cmd_params->early_drop_iova = cpu_to_le64(early_drop_iova);
11877 +
11878 + /* send command to mc*/
11879 + return mc_send_command(mc_io, &cmd);
11880 +}
11881 +
11882 +/**
11883 + * dpsw_add_custom_tpid() - API Configures a distinct Ethernet type value
11884 + * @mc_io: Pointer to MC portal's I/O object
11885 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11886 + * @token: Token of DPSW object
11887 + * @cfg: Tag Protocol identifier
11888 + *
11889 + * API Configures a distinct Ethernet type value (or TPID value)
11890 + * to indicate a VLAN tag in addition to the common
11891 + * TPID values 0x8100 and 0x88A8.
11892 + * Two additional TPID's are supported
11893 + *
11894 + * Return: Completion status. '0' on Success; Error code otherwise.
11895 + */
11896 +int dpsw_add_custom_tpid(struct fsl_mc_io *mc_io,
11897 + u32 cmd_flags,
11898 + u16 token,
11899 + const struct dpsw_custom_tpid_cfg *cfg)
11900 +{
11901 + struct mc_command cmd = { 0 };
11902 + struct dpsw_cmd_custom_tpid *cmd_params;
11903 +
11904 + /* prepare command */
11905 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ADD_CUSTOM_TPID,
11906 + cmd_flags,
11907 + token);
11908 + cmd_params = (struct dpsw_cmd_custom_tpid *)cmd.params;
11909 + cmd_params->tpid = cpu_to_le16(cfg->tpid);
11910 +
11911 + /* send command to mc*/
11912 + return mc_send_command(mc_io, &cmd);
11913 +}
11914 +
11915 +/**
11916 + * dpsw_remove_custom_tpid - API removes a distinct Ethernet type value
11917 + * @mc_io: Pointer to MC portal's I/O object
11918 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11919 + * @token: Token of DPSW object
11920 + * @cfg: Tag Protocol identifier
11921 + *
11922 + * Return: Completion status. '0' on Success; Error code otherwise.
11923 + */
11924 +int dpsw_remove_custom_tpid(struct fsl_mc_io *mc_io,
11925 + u32 cmd_flags,
11926 + u16 token,
11927 + const struct dpsw_custom_tpid_cfg *cfg)
11928 +{
11929 + struct mc_command cmd = { 0 };
11930 + struct dpsw_cmd_custom_tpid *cmd_params;
11931 +
11932 + /* prepare command */
11933 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_REMOVE_CUSTOM_TPID,
11934 + cmd_flags,
11935 + token);
11936 + cmd_params = (struct dpsw_cmd_custom_tpid *)cmd.params;
11937 + cmd_params->tpid = cpu_to_le16(cfg->tpid);
11938 +
11939 + /* send command to mc*/
11940 + return mc_send_command(mc_io, &cmd);
11941 +}
11942 +
11943 +/**
11944 + * dpsw_if_enable() - Enable Interface
11945 + * @mc_io: Pointer to MC portal's I/O object
11946 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11947 + * @token: Token of DPSW object
11948 + * @if_id: Interface Identifier
11949 + *
11950 + * Return: Completion status. '0' on Success; Error code otherwise.
11951 + */
11952 +int dpsw_if_enable(struct fsl_mc_io *mc_io,
11953 + u32 cmd_flags,
11954 + u16 token,
11955 + u16 if_id)
11956 +{
11957 + struct mc_command cmd = { 0 };
11958 + struct dpsw_cmd_if *cmd_params;
11959 +
11960 + /* prepare command */
11961 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ENABLE,
11962 + cmd_flags,
11963 + token);
11964 + cmd_params = (struct dpsw_cmd_if *)cmd.params;
11965 + cmd_params->if_id = cpu_to_le16(if_id);
11966 +
11967 + /* send command to mc*/
11968 + return mc_send_command(mc_io, &cmd);
11969 +}
11970 +
11971 +/**
11972 + * dpsw_if_disable() - Disable Interface
11973 + * @mc_io: Pointer to MC portal's I/O object
11974 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11975 + * @token: Token of DPSW object
11976 + * @if_id: Interface Identifier
11977 + *
11978 + * Return: Completion status. '0' on Success; Error code otherwise.
11979 + */
11980 +int dpsw_if_disable(struct fsl_mc_io *mc_io,
11981 + u32 cmd_flags,
11982 + u16 token,
11983 + u16 if_id)
11984 +{
11985 + struct mc_command cmd = { 0 };
11986 + struct dpsw_cmd_if *cmd_params;
11987 +
11988 + /* prepare command */
11989 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_DISABLE,
11990 + cmd_flags,
11991 + token);
11992 + cmd_params = (struct dpsw_cmd_if *)cmd.params;
11993 + cmd_params->if_id = cpu_to_le16(if_id);
11994 +
11995 + /* send command to mc*/
11996 + return mc_send_command(mc_io, &cmd);
11997 +}
11998 +
11999 +/**
12000 + * dpsw_if_get_attributes() - Function obtains attributes of interface
12001 + * @mc_io: Pointer to MC portal's I/O object
12002 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12003 + * @token: Token of DPSW object
12004 + * @if_id: Interface Identifier
12005 + * @attr: Returned interface attributes
12006 + *
12007 + * Return: Completion status. '0' on Success; Error code otherwise.
12008 + */
12009 +int dpsw_if_get_attributes(struct fsl_mc_io *mc_io,
12010 + u32 cmd_flags,
12011 + u16 token,
12012 + u16 if_id,
12013 + struct dpsw_if_attr *attr)
12014 +{
12015 + struct dpsw_rsp_if_get_attr *rsp_params;
12016 + struct dpsw_cmd_if *cmd_params;
12017 + struct mc_command cmd = { 0 };
12018 + int err;
12019 +
12020 + /* prepare command */
12021 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_ATTR,
12022 + cmd_flags,
12023 + token);
12024 + cmd_params = (struct dpsw_cmd_if *)cmd.params;
12025 + cmd_params->if_id = cpu_to_le16(if_id);
12026 +
12027 + /* send command to mc*/
12028 + err = mc_send_command(mc_io, &cmd);
12029 + if (err)
12030 + return err;
12031 +
12032 + /* retrieve response parameters */
12033 + rsp_params = (struct dpsw_rsp_if_get_attr *)cmd.params;
12034 + attr->num_tcs = rsp_params->num_tcs;
12035 + attr->rate = le32_to_cpu(rsp_params->rate);
12036 + attr->options = le32_to_cpu(rsp_params->options);
12037 + attr->enabled = dpsw_get_field(rsp_params->conf, ENABLED);
12038 + attr->accept_all_vlan = dpsw_get_field(rsp_params->conf,
12039 + ACCEPT_ALL_VLAN);
12040 + attr->admit_untagged = dpsw_get_field(rsp_params->conf, ADMIT_UNTAGGED);
12041 + attr->qdid = le16_to_cpu(rsp_params->qdid);
12042 +
12043 + return 0;
12044 +}
12045 +
12046 +/**
12047 + * dpsw_if_set_max_frame_length() - Set Maximum Receive frame length.
12048 + * @mc_io: Pointer to MC portal's I/O object
12049 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12050 + * @token: Token of DPSW object
12051 + * @if_id: Interface Identifier
12052 + * @frame_length: Maximum Frame Length
12053 + *
12054 + * Return: Completion status. '0' on Success; Error code otherwise.
12055 + */
12056 +int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io,
12057 + u32 cmd_flags,
12058 + u16 token,
12059 + u16 if_id,
12060 + u16 frame_length)
12061 +{
12062 + struct mc_command cmd = { 0 };
12063 + struct dpsw_cmd_if_set_max_frame_length *cmd_params;
12064 +
12065 + /* prepare command */
12066 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH,
12067 + cmd_flags,
12068 + token);
12069 + cmd_params = (struct dpsw_cmd_if_set_max_frame_length *)cmd.params;
12070 + cmd_params->if_id = cpu_to_le16(if_id);
12071 + cmd_params->frame_length = cpu_to_le16(frame_length);
12072 +
12073 + /* send command to mc*/
12074 + return mc_send_command(mc_io, &cmd);
12075 +}
12076 +
12077 +/**
12078 + * dpsw_if_get_max_frame_length() - Get Maximum Receive frame length.
12079 + * @mc_io: Pointer to MC portal's I/O object
12080 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12081 + * @token: Token of DPSW object
12082 + * @if_id: Interface Identifier
12083 + * @frame_length: Returned maximum Frame Length
12084 + *
12085 + * Return: Completion status. '0' on Success; Error code otherwise.
12086 + */
12087 +int dpsw_if_get_max_frame_length(struct fsl_mc_io *mc_io,
12088 + u32 cmd_flags,
12089 + u16 token,
12090 + u16 if_id,
12091 + u16 *frame_length)
12092 +{
12093 + struct mc_command cmd = { 0 };
12094 + struct dpsw_cmd_if_get_max_frame_length *cmd_params;
12095 + struct dpsw_rsp_if_get_max_frame_length *rsp_params;
12096 + int err;
12097 +
12098 + /* prepare command */
12099 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_MAX_FRAME_LENGTH,
12100 + cmd_flags,
12101 + token);
12102 + cmd_params = (struct dpsw_cmd_if_get_max_frame_length *)cmd.params;
12103 + cmd_params->if_id = cpu_to_le16(if_id);
12104 +
12105 + /* send command to mc*/
12106 + err = mc_send_command(mc_io, &cmd);
12107 + if (err)
12108 + return err;
12109 +
12110 + rsp_params = (struct dpsw_rsp_if_get_max_frame_length *)cmd.params;
12111 + *frame_length = le16_to_cpu(rsp_params->frame_length);
12112 +
12113 + return 0;
12114 +}
12115 +
12116 +/**
12117 + * dpsw_vlan_add() - Adding new VLAN to DPSW.
12118 + * @mc_io: Pointer to MC portal's I/O object
12119 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12120 + * @token: Token of DPSW object
12121 + * @vlan_id: VLAN Identifier
12122 + * @cfg: VLAN configuration
12123 + *
12124 + * Only VLAN ID and FDB ID are required parameters here.
12125 + * 12 bit VLAN ID is defined in IEEE802.1Q.
12126 + * Adding a duplicate VLAN ID is not allowed.
12127 + * FDB ID can be shared across multiple VLANs. Shared learning
12128 + * is obtained by calling dpsw_vlan_add for multiple VLAN IDs
12129 + * with same fdb_id
12130 + *
12131 + * Return: Completion status. '0' on Success; Error code otherwise.
12132 + */
12133 +int dpsw_vlan_add(struct fsl_mc_io *mc_io,
12134 + u32 cmd_flags,
12135 + u16 token,
12136 + u16 vlan_id,
12137 + const struct dpsw_vlan_cfg *cfg)
12138 +{
12139 + struct mc_command cmd = { 0 };
12140 + struct dpsw_vlan_add *cmd_params;
12141 +
12142 + /* prepare command */
12143 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD,
12144 + cmd_flags,
12145 + token);
12146 + cmd_params = (struct dpsw_vlan_add *)cmd.params;
12147 + cmd_params->fdb_id = cpu_to_le16(cfg->fdb_id);
12148 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12149 +
12150 + /* send command to mc*/
12151 + return mc_send_command(mc_io, &cmd);
12152 +}
12153 +
12154 +/**
12155 + * dpsw_vlan_add_if() - Adding a set of interfaces to an existing VLAN.
12156 + * @mc_io: Pointer to MC portal's I/O object
12157 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12158 + * @token: Token of DPSW object
12159 + * @vlan_id: VLAN Identifier
12160 + * @cfg: Set of interfaces to add
12161 + *
12162 + * It adds only interfaces not belonging to this VLAN yet,
12163 + * otherwise an error is generated and an entire command is
12164 + * ignored. This function can be called numerous times always
12165 + * providing required interfaces delta.
12166 + *
12167 + * Return: Completion status. '0' on Success; Error code otherwise.
12168 + */
12169 +int dpsw_vlan_add_if(struct fsl_mc_io *mc_io,
12170 + u32 cmd_flags,
12171 + u16 token,
12172 + u16 vlan_id,
12173 + const struct dpsw_vlan_if_cfg *cfg)
12174 +{
12175 + struct mc_command cmd = { 0 };
12176 + struct dpsw_cmd_vlan_manage_if *cmd_params;
12177 +
12178 + /* prepare command */
12179 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF,
12180 + cmd_flags,
12181 + token);
12182 + cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
12183 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12184 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
12185 +
12186 + /* send command to mc*/
12187 + return mc_send_command(mc_io, &cmd);
12188 +}
12189 +
12190 +/**
12191 + * dpsw_vlan_add_if_untagged() - Defining a set of interfaces that should be
12192 + * transmitted as untagged.
12193 + * @mc_io: Pointer to MC portal's I/O object
12194 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12195 + * @token: Token of DPSW object
12196 + * @vlan_id: VLAN Identifier
12197 + * @cfg: Set of interfaces that should be transmitted as untagged
12198 + *
12199 + * These interfaces should already belong to this VLAN.
12200 + * By default all interfaces are transmitted as tagged.
12201 + * Providing un-existing interface or untagged interface that is
12202 + * configured untagged already generates an error and the entire
12203 + * command is ignored.
12204 + *
12205 + * Return: Completion status. '0' on Success; Error code otherwise.
12206 + */
12207 +int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io,
12208 + u32 cmd_flags,
12209 + u16 token,
12210 + u16 vlan_id,
12211 + const struct dpsw_vlan_if_cfg *cfg)
12212 +{
12213 + struct mc_command cmd = { 0 };
12214 + struct dpsw_cmd_vlan_manage_if *cmd_params;
12215 +
12216 + /* prepare command */
12217 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF_UNTAGGED,
12218 + cmd_flags,
12219 + token);
12220 + cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
12221 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12222 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
12223 +
12224 + /* send command to mc*/
12225 + return mc_send_command(mc_io, &cmd);
12226 +}
12227 +
12228 +/**
12229 + * dpsw_vlan_add_if_flooding() - Define a set of interfaces that should be
12230 + * included in flooding when frame with unknown destination
12231 + * unicast MAC arrived.
12232 + * @mc_io: Pointer to MC portal's I/O object
12233 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12234 + * @token: Token of DPSW object
12235 + * @vlan_id: VLAN Identifier
12236 + * @cfg: Set of interfaces that should be used for flooding
12237 + *
12238 + * These interfaces should belong to this VLAN. By default all
12239 + * interfaces are included into flooding list. Providing
12240 + * un-existing interface or an interface that already in the
12241 + * flooding list generates an error and the entire command is
12242 + * ignored.
12243 + *
12244 + * Return: Completion status. '0' on Success; Error code otherwise.
12245 + */
12246 +int dpsw_vlan_add_if_flooding(struct fsl_mc_io *mc_io,
12247 + u32 cmd_flags,
12248 + u16 token,
12249 + u16 vlan_id,
12250 + const struct dpsw_vlan_if_cfg *cfg)
12251 +{
12252 + struct mc_command cmd = { 0 };
12253 + struct dpsw_cmd_vlan_manage_if *cmd_params;
12254 +
12255 + /* prepare command */
12256 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF_FLOODING,
12257 + cmd_flags,
12258 + token);
12259 + cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
12260 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12261 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
12262 +
12263 + /* send command to mc*/
12264 + return mc_send_command(mc_io, &cmd);
12265 +}
12266 +
12267 +/**
12268 + * dpsw_vlan_remove_if() - Remove interfaces from an existing VLAN.
12269 + * @mc_io: Pointer to MC portal's I/O object
12270 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12271 + * @token: Token of DPSW object
12272 + * @vlan_id: VLAN Identifier
12273 + * @cfg: Set of interfaces that should be removed
12274 + *
12275 + * Interfaces must belong to this VLAN, otherwise an error
12276 + * is returned and an the command is ignored
12277 + *
12278 + * Return: Completion status. '0' on Success; Error code otherwise.
12279 + */
12280 +int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io,
12281 + u32 cmd_flags,
12282 + u16 token,
12283 + u16 vlan_id,
12284 + const struct dpsw_vlan_if_cfg *cfg)
12285 +{
12286 + struct mc_command cmd = { 0 };
12287 + struct dpsw_cmd_vlan_manage_if *cmd_params;
12288 +
12289 + /* prepare command */
12290 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF,
12291 + cmd_flags,
12292 + token);
12293 + cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
12294 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12295 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
12296 +
12297 + /* send command to mc*/
12298 + return mc_send_command(mc_io, &cmd);
12299 +}
12300 +
12301 +/**
12302 + * dpsw_vlan_remove_if_untagged() - Define a set of interfaces that should be
12303 + * converted from transmitted as untagged to transmit as tagged.
12304 + * @mc_io: Pointer to MC portal's I/O object
12305 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12306 + * @token: Token of DPSW object
12307 + * @vlan_id: VLAN Identifier
12308 + * @cfg: Set of interfaces that should be removed
12309 + *
12310 + * Interfaces provided by API have to belong to this VLAN and
12311 + * configured untagged, otherwise an error is returned and the
12312 + * command is ignored
12313 + *
12314 + * Return: Completion status. '0' on Success; Error code otherwise.
12315 + */
12316 +int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io,
12317 + u32 cmd_flags,
12318 + u16 token,
12319 + u16 vlan_id,
12320 + const struct dpsw_vlan_if_cfg *cfg)
12321 +{
12322 + struct mc_command cmd = { 0 };
12323 + struct dpsw_cmd_vlan_manage_if *cmd_params;
12324 +
12325 + /* prepare command */
12326 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED,
12327 + cmd_flags,
12328 + token);
12329 + cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
12330 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12331 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
12332 +
12333 + /* send command to mc*/
12334 + return mc_send_command(mc_io, &cmd);
12335 +}
12336 +
12337 +/**
12338 + * dpsw_vlan_remove_if_flooding() - Define a set of interfaces that should be
12339 + * removed from the flooding list.
12340 + * @mc_io: Pointer to MC portal's I/O object
12341 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12342 + * @token: Token of DPSW object
12343 + * @vlan_id: VLAN Identifier
12344 + * @cfg: Set of interfaces used for flooding
12345 + *
12346 + * Return: Completion status. '0' on Success; Error code otherwise.
12347 + */
12348 +int dpsw_vlan_remove_if_flooding(struct fsl_mc_io *mc_io,
12349 + u32 cmd_flags,
12350 + u16 token,
12351 + u16 vlan_id,
12352 + const struct dpsw_vlan_if_cfg *cfg)
12353 +{
12354 + struct mc_command cmd = { 0 };
12355 + struct dpsw_cmd_vlan_manage_if *cmd_params;
12356 +
12357 + /* prepare command */
12358 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF_FLOODING,
12359 + cmd_flags,
12360 + token);
12361 + cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
12362 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12363 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
12364 +
12365 + /* send command to mc*/
12366 + return mc_send_command(mc_io, &cmd);
12367 +}
12368 +
12369 +/**
12370 + * dpsw_vlan_remove() - Remove an entire VLAN
12371 + * @mc_io: Pointer to MC portal's I/O object
12372 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12373 + * @token: Token of DPSW object
12374 + * @vlan_id: VLAN Identifier
12375 + *
12376 + * Return: Completion status. '0' on Success; Error code otherwise.
12377 + */
12378 +int dpsw_vlan_remove(struct fsl_mc_io *mc_io,
12379 + u32 cmd_flags,
12380 + u16 token,
12381 + u16 vlan_id)
12382 +{
12383 + struct mc_command cmd = { 0 };
12384 + struct dpsw_cmd_vlan_remove *cmd_params;
12385 +
12386 + /* prepare command */
12387 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE,
12388 + cmd_flags,
12389 + token);
12390 + cmd_params = (struct dpsw_cmd_vlan_remove *)cmd.params;
12391 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12392 +
12393 + /* send command to mc*/
12394 + return mc_send_command(mc_io, &cmd);
12395 +}
12396 +
12397 +/**
12398 + * dpsw_vlan_get_attributes() - Get VLAN attributes
12399 + * @mc_io: Pointer to MC portal's I/O object
12400 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12401 + * @token: Token of DPSW object
12402 + * @vlan_id: VLAN Identifier
12403 + * @attr: Returned DPSW attributes
12404 + *
12405 + * Return: Completion status. '0' on Success; Error code otherwise.
12406 + */
12407 +int dpsw_vlan_get_attributes(struct fsl_mc_io *mc_io,
12408 + u32 cmd_flags,
12409 + u16 token,
12410 + u16 vlan_id,
12411 + struct dpsw_vlan_attr *attr)
12412 +{
12413 + struct mc_command cmd = { 0 };
12414 + struct dpsw_cmd_vlan_get_attr *cmd_params;
12415 + struct dpsw_rsp_vlan_get_attr *rsp_params;
12416 + int err;
12417 +
12418 + /* prepare command */
12419 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_ATTRIBUTES,
12420 + cmd_flags,
12421 + token);
12422 + cmd_params = (struct dpsw_cmd_vlan_get_attr *)cmd.params;
12423 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12424 +
12425 + /* send command to mc*/
12426 + err = mc_send_command(mc_io, &cmd);
12427 + if (err)
12428 + return err;
12429 +
12430 + /* retrieve response parameters */
12431 + rsp_params = (struct dpsw_rsp_vlan_get_attr *)cmd.params;
12432 + attr->fdb_id = le16_to_cpu(rsp_params->fdb_id);
12433 + attr->num_ifs = le16_to_cpu(rsp_params->num_ifs);
12434 + attr->num_untagged_ifs = le16_to_cpu(rsp_params->num_untagged_ifs);
12435 + attr->num_flooding_ifs = le16_to_cpu(rsp_params->num_flooding_ifs);
12436 +
12437 + return 0;
12438 +}
12439 +
12440 +/**
12441 + * dpsw_vlan_get_if() - Get interfaces belong to this VLAN
12442 + * @mc_io: Pointer to MC portal's I/O object
12443 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12444 + * @token: Token of DPSW object
12445 + * @vlan_id: VLAN Identifier
12446 + * @cfg: Returned set of interfaces belong to this VLAN
12447 + *
12448 + * Return: Completion status. '0' on Success; Error code otherwise.
12449 + */
12450 +int dpsw_vlan_get_if(struct fsl_mc_io *mc_io,
12451 + u32 cmd_flags,
12452 + u16 token,
12453 + u16 vlan_id,
12454 + struct dpsw_vlan_if_cfg *cfg)
12455 +{
12456 + struct mc_command cmd = { 0 };
12457 + struct dpsw_cmd_vlan_get_if *cmd_params;
12458 + struct dpsw_rsp_vlan_get_if *rsp_params;
12459 + int err;
12460 +
12461 + /* prepare command */
12462 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF,
12463 + cmd_flags,
12464 + token);
12465 + cmd_params = (struct dpsw_cmd_vlan_get_if *)cmd.params;
12466 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12467 +
12468 + /* send command to mc*/
12469 + err = mc_send_command(mc_io, &cmd);
12470 + if (err)
12471 + return err;
12472 +
12473 + /* retrieve response parameters */
12474 + rsp_params = (struct dpsw_rsp_vlan_get_if *)cmd.params;
12475 + cfg->num_ifs = le16_to_cpu(rsp_params->num_ifs);
12476 + read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, rsp_params->if_id);
12477 +
12478 + return 0;
12479 +}
12480 +
12481 +/**
12482 + * dpsw_vlan_get_if_flooding() - Get interfaces used in flooding for this VLAN
12483 + * @mc_io: Pointer to MC portal's I/O object
12484 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12485 + * @token: Token of DPSW object
12486 + * @vlan_id: VLAN Identifier
12487 + * @cfg: Returned set of flooding interfaces
12488 + *
12489 + * Return: Completion status. '0' on Success; Error code otherwise.
12490 + */
12491 +
12492 +int dpsw_vlan_get_if_flooding(struct fsl_mc_io *mc_io,
12493 + u32 cmd_flags,
12494 + u16 token,
12495 + u16 vlan_id,
12496 + struct dpsw_vlan_if_cfg *cfg)
12497 +{
12498 + struct mc_command cmd = { 0 };
12499 + struct dpsw_cmd_vlan_get_if_flooding *cmd_params;
12500 + struct dpsw_rsp_vlan_get_if_flooding *rsp_params;
12501 + int err;
12502 +
12503 + /* prepare command */
12504 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF_FLOODING,
12505 + cmd_flags,
12506 + token);
12507 + cmd_params = (struct dpsw_cmd_vlan_get_if_flooding *)cmd.params;
12508 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12509 +
12510 + /* send command to mc*/
12511 + err = mc_send_command(mc_io, &cmd);
12512 + if (err)
12513 + return err;
12514 +
12515 + /* retrieve response parameters */
12516 + rsp_params = (struct dpsw_rsp_vlan_get_if_flooding *)cmd.params;
12517 + cfg->num_ifs = le16_to_cpu(rsp_params->num_ifs);
12518 + read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, rsp_params->if_id);
12519 +
12520 + return 0;
12521 +}
12522 +
12523 +/**
12524 + * dpsw_vlan_get_if_untagged() - Get interfaces that should be transmitted as
12525 + * untagged
12526 + * @mc_io: Pointer to MC portal's I/O object
12527 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12528 + * @token: Token of DPSW object
12529 + * @vlan_id: VLAN Identifier
12530 + * @cfg: Returned set of untagged interfaces
12531 + *
12532 + * Return: Completion status. '0' on Success; Error code otherwise.
12533 + */
12534 +int dpsw_vlan_get_if_untagged(struct fsl_mc_io *mc_io,
12535 + u32 cmd_flags,
12536 + u16 token,
12537 + u16 vlan_id,
12538 + struct dpsw_vlan_if_cfg *cfg)
12539 +{
12540 + struct mc_command cmd = { 0 };
12541 + struct dpsw_cmd_vlan_get_if_untagged *cmd_params;
12542 + struct dpsw_rsp_vlan_get_if_untagged *rsp_params;
12543 + int err;
12544 +
12545 + /* prepare command */
12546 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF_UNTAGGED,
12547 + cmd_flags,
12548 + token);
12549 + cmd_params = (struct dpsw_cmd_vlan_get_if_untagged *)cmd.params;
12550 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12551 +
12552 + /* send command to mc*/
12553 + err = mc_send_command(mc_io, &cmd);
12554 + if (err)
12555 + return err;
12556 +
12557 + /* retrieve response parameters */
12558 + rsp_params = (struct dpsw_rsp_vlan_get_if_untagged *)cmd.params;
12559 + cfg->num_ifs = le16_to_cpu(rsp_params->num_ifs);
12560 + read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, rsp_params->if_id);
12561 +
12562 + return 0;
12563 +}
12564 +
12565 +/**
12566 + * dpsw_fdb_add() - Add FDB to switch and Returns handle to FDB table for
12567 + * the reference
12568 + * @mc_io: Pointer to MC portal's I/O object
12569 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12570 + * @token: Token of DPSW object
12571 + * @fdb_id: Returned Forwarding Database Identifier
12572 + * @cfg: FDB Configuration
12573 + *
12574 + * Return: Completion status. '0' on Success; Error code otherwise.
12575 + */
12576 +int dpsw_fdb_add(struct fsl_mc_io *mc_io,
12577 + u32 cmd_flags,
12578 + u16 token,
12579 + u16 *fdb_id,
12580 + const struct dpsw_fdb_cfg *cfg)
12581 +{
12582 + struct mc_command cmd = { 0 };
12583 + struct dpsw_cmd_fdb_add *cmd_params;
12584 + struct dpsw_rsp_fdb_add *rsp_params;
12585 + int err;
12586 +
12587 + /* prepare command */
12588 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD,
12589 + cmd_flags,
12590 + token);
12591 + cmd_params = (struct dpsw_cmd_fdb_add *)cmd.params;
12592 + cmd_params->fdb_aging_time = cpu_to_le16(cfg->fdb_aging_time);
12593 + cmd_params->num_fdb_entries = cpu_to_le16(cfg->num_fdb_entries);
12594 +
12595 + /* send command to mc*/
12596 + err = mc_send_command(mc_io, &cmd);
12597 + if (err)
12598 + return err;
12599 +
12600 + /* retrieve response parameters */
12601 + rsp_params = (struct dpsw_rsp_fdb_add *)cmd.params;
12602 + *fdb_id = le16_to_cpu(rsp_params->fdb_id);
12603 +
12604 + return 0;
12605 +}
12606 +
12607 +/**
12608 + * dpsw_fdb_remove() - Remove FDB from switch
12609 + * @mc_io: Pointer to MC portal's I/O object
12610 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12611 + * @token: Token of DPSW object
12612 + * @fdb_id: Forwarding Database Identifier
12613 + *
12614 + * Return: Completion status. '0' on Success; Error code otherwise.
12615 + */
12616 +int dpsw_fdb_remove(struct fsl_mc_io *mc_io,
12617 + u32 cmd_flags,
12618 + u16 token,
12619 + u16 fdb_id)
12620 +{
12621 + struct mc_command cmd = { 0 };
12622 + struct dpsw_cmd_fdb_remove *cmd_params;
12623 +
12624 + /* prepare command */
12625 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE,
12626 + cmd_flags,
12627 + token);
12628 + cmd_params = (struct dpsw_cmd_fdb_remove *)cmd.params;
12629 + cmd_params->fdb_id = cpu_to_le16(fdb_id);
12630 +
12631 + /* send command to mc*/
12632 + return mc_send_command(mc_io, &cmd);
12633 +}
12634 +
12635 +/**
12636 + * dpsw_fdb_add_unicast() - Function adds an unicast entry into MAC lookup table
12637 + * @mc_io: Pointer to MC portal's I/O object
12638 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12639 + * @token: Token of DPSW object
12640 + * @fdb_id: Forwarding Database Identifier
12641 + * @cfg: Unicast entry configuration
12642 + *
12643 + * Return: Completion status. '0' on Success; Error code otherwise.
12644 + */
12645 +int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io,
12646 + u32 cmd_flags,
12647 + u16 token,
12648 + u16 fdb_id,
12649 + const struct dpsw_fdb_unicast_cfg *cfg)
12650 +{
12651 + struct mc_command cmd = { 0 };
12652 + struct dpsw_cmd_fdb_add_unicast *cmd_params;
12653 + int i;
12654 +
12655 + /* prepare command */
12656 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_UNICAST,
12657 + cmd_flags,
12658 + token);
12659 + cmd_params = (struct dpsw_cmd_fdb_add_unicast *)cmd.params;
12660 + cmd_params->fdb_id = cpu_to_le16(fdb_id);
12661 + cmd_params->if_egress = cpu_to_le16(cfg->if_egress);
12662 + for (i = 0; i < 6; i++)
12663 + cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
12664 + dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
12665 +
12666 + /* send command to mc*/
12667 + return mc_send_command(mc_io, &cmd);
12668 +}
12669 +
12670 +/**
12671 + * dpsw_fdb_get_unicast() - Get unicast entry from MAC lookup table by
12672 + * unicast Ethernet address
12673 + * @mc_io: Pointer to MC portal's I/O object
12674 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12675 + * @token: Token of DPSW object
12676 + * @fdb_id: Forwarding Database Identifier
12677 + * @cfg: Returned unicast entry configuration
12678 + *
12679 + * Return: Completion status. '0' on Success; Error code otherwise.
12680 + */
12681 +int dpsw_fdb_get_unicast(struct fsl_mc_io *mc_io,
12682 + u32 cmd_flags,
12683 + u16 token,
12684 + u16 fdb_id,
12685 + struct dpsw_fdb_unicast_cfg *cfg)
12686 +{
12687 + struct mc_command cmd = { 0 };
12688 + struct dpsw_cmd_fdb_get_unicast *cmd_params;
12689 + struct dpsw_rsp_fdb_get_unicast *rsp_params;
12690 + int err, i;
12691 +
12692 + /* prepare command */
12693 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_UNICAST,
12694 + cmd_flags,
12695 + token);
12696 + cmd_params = (struct dpsw_cmd_fdb_get_unicast *)cmd.params;
12697 + cmd_params->fdb_id = cpu_to_le16(fdb_id);
12698 + for (i = 0; i < 6; i++)
12699 + cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
12700 +
12701 + /* send command to mc*/
12702 + err = mc_send_command(mc_io, &cmd);
12703 + if (err)
12704 + return err;
12705 +
12706 + /* retrieve response parameters */
12707 + rsp_params = (struct dpsw_rsp_fdb_get_unicast *)cmd.params;
12708 + cfg->if_egress = le16_to_cpu(rsp_params->if_egress);
12709 + cfg->type = dpsw_get_field(rsp_params->type, ENTRY_TYPE);
12710 +
12711 + return 0;
12712 +}
12713 +
12714 +/**
12715 + * dpsw_fdb_remove_unicast() - removes an entry from MAC lookup table
12716 + * @mc_io: Pointer to MC portal's I/O object
12717 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12718 + * @token: Token of DPSW object
12719 + * @fdb_id: Forwarding Database Identifier
12720 + * @cfg: Unicast entry configuration
12721 + *
12722 + * Return: Completion status. '0' on Success; Error code otherwise.
12723 + */
12724 +int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io,
12725 + u32 cmd_flags,
12726 + u16 token,
12727 + u16 fdb_id,
12728 + const struct dpsw_fdb_unicast_cfg *cfg)
12729 +{
12730 + struct mc_command cmd = { 0 };
12731 + struct dpsw_cmd_fdb_remove_unicast *cmd_params;
12732 + int i;
12733 +
12734 + /* prepare command */
12735 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_UNICAST,
12736 + cmd_flags,
12737 + token);
12738 + cmd_params = (struct dpsw_cmd_fdb_remove_unicast *)cmd.params;
12739 + cmd_params->fdb_id = cpu_to_le16(fdb_id);
12740 + for (i = 0; i < 6; i++)
12741 + cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
12742 + cmd_params->if_egress = cpu_to_le16(cfg->if_egress);
12743 + dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
12744 +
12745 + /* send command to mc*/
12746 + return mc_send_command(mc_io, &cmd);
12747 +}
12748 +
12749 +/**
12750 + * dpsw_fdb_add_multicast() - Add a set of egress interfaces to multi-cast group
12751 + * @mc_io: Pointer to MC portal's I/O object
12752 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12753 + * @token: Token of DPSW object
12754 + * @fdb_id: Forwarding Database Identifier
12755 + * @cfg: Multicast entry configuration
12756 + *
12757 + * If group doesn't exist, it will be created.
12758 + * It adds only interfaces not belonging to this multicast group
12759 + * yet, otherwise error will be generated and the command is
12760 + * ignored.
12761 + * This function may be called numerous times always providing
12762 + * required interfaces delta.
12763 + *
12764 + * Return: Completion status. '0' on Success; Error code otherwise.
12765 + */
12766 +int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io,
12767 + u32 cmd_flags,
12768 + u16 token,
12769 + u16 fdb_id,
12770 + const struct dpsw_fdb_multicast_cfg *cfg)
12771 +{
12772 + struct mc_command cmd = { 0 };
12773 + struct dpsw_cmd_fdb_add_multicast *cmd_params;
12774 + int i;
12775 +
12776 + /* prepare command */
12777 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_MULTICAST,
12778 + cmd_flags,
12779 + token);
12780 + cmd_params = (struct dpsw_cmd_fdb_add_multicast *)cmd.params;
12781 + cmd_params->fdb_id = cpu_to_le16(fdb_id);
12782 + cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
12783 + dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
12784 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
12785 + for (i = 0; i < 6; i++)
12786 + cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
12787 +
12788 + /* send command to mc*/
12789 + return mc_send_command(mc_io, &cmd);
12790 +}
12791 +
12792 +/**
12793 + * dpsw_fdb_get_multicast() - Reading multi-cast group by multi-cast Ethernet
12794 + * address.
12795 + * @mc_io: Pointer to MC portal's I/O object
12796 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12797 + * @token: Token of DPSW object
12798 + * @fdb_id: Forwarding Database Identifier
12799 + * @cfg: Returned multicast entry configuration
12800 + *
12801 + * Return: Completion status. '0' on Success; Error code otherwise.
12802 + */
12803 +int dpsw_fdb_get_multicast(struct fsl_mc_io *mc_io,
12804 + u32 cmd_flags,
12805 + u16 token,
12806 + u16 fdb_id,
12807 + struct dpsw_fdb_multicast_cfg *cfg)
12808 +{
12809 + struct mc_command cmd = { 0 };
12810 + struct dpsw_cmd_fdb_get_multicast *cmd_params;
12811 + struct dpsw_rsp_fdb_get_multicast *rsp_params;
12812 + int err, i;
12813 +
12814 + /* prepare command */
12815 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_MULTICAST,
12816 + cmd_flags,
12817 + token);
12818 + cmd_params = (struct dpsw_cmd_fdb_get_multicast *)cmd.params;
12819 + cmd_params->fdb_id = cpu_to_le16(fdb_id);
12820 + for (i = 0; i < 6; i++)
12821 + cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
12822 +
12823 + /* send command to mc*/
12824 + err = mc_send_command(mc_io, &cmd);
12825 + if (err)
12826 + return err;
12827 +
12828 + /* retrieve response parameters */
12829 + rsp_params = (struct dpsw_rsp_fdb_get_multicast *)cmd.params;
12830 + cfg->num_ifs = le16_to_cpu(rsp_params->num_ifs);
12831 + cfg->type = dpsw_get_field(rsp_params->type, ENTRY_TYPE);
12832 + read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, rsp_params->if_id);
12833 +
12834 + return 0;
12835 +}
12836 +
12837 +/**
12838 + * dpsw_fdb_remove_multicast() - Removing interfaces from an existing multicast
12839 + * group.
12840 + * @mc_io: Pointer to MC portal's I/O object
12841 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12842 + * @token: Token of DPSW object
12843 + * @fdb_id: Forwarding Database Identifier
12844 + * @cfg: Multicast entry configuration
12845 + *
12846 + * Interfaces provided by this API have to exist in the group,
12847 + * otherwise an error will be returned and an entire command
12848 + * ignored. If there is no interface left in the group,
12849 + * an entire group is deleted
12850 + *
12851 + * Return: Completion status. '0' on Success; Error code otherwise.
12852 + */
12853 +int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io,
12854 + u32 cmd_flags,
12855 + u16 token,
12856 + u16 fdb_id,
12857 + const struct dpsw_fdb_multicast_cfg *cfg)
12858 +{
12859 + struct mc_command cmd = { 0 };
12860 + struct dpsw_cmd_fdb_remove_multicast *cmd_params;
12861 + int i;
12862 +
12863 + /* prepare command */
12864 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_MULTICAST,
12865 + cmd_flags,
12866 + token);
12867 + cmd_params = (struct dpsw_cmd_fdb_remove_multicast *)cmd.params;
12868 + cmd_params->fdb_id = cpu_to_le16(fdb_id);
12869 + cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
12870 + dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
12871 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
12872 + for (i = 0; i < 6; i++)
12873 + cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
12874 +
12875 + /* send command to mc*/
12876 + return mc_send_command(mc_io, &cmd);
12877 +}
12878 +
12879 +/**
12880 + * dpsw_fdb_set_learning_mode() - Define FDB learning mode
12881 + * @mc_io: Pointer to MC portal's I/O object
12882 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12883 + * @token: Token of DPSW object
12884 + * @fdb_id: Forwarding Database Identifier
12885 + * @mode: Learning mode
12886 + *
12887 + * Return: Completion status. '0' on Success; Error code otherwise.
12888 + */
12889 +int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io,
12890 + u32 cmd_flags,
12891 + u16 token,
12892 + u16 fdb_id,
12893 + enum dpsw_fdb_learning_mode mode)
12894 +{
12895 + struct mc_command cmd = { 0 };
12896 + struct dpsw_cmd_fdb_set_learning_mode *cmd_params;
12897 +
12898 + /* prepare command */
12899 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_SET_LEARNING_MODE,
12900 + cmd_flags,
12901 + token);
12902 + cmd_params = (struct dpsw_cmd_fdb_set_learning_mode *)cmd.params;
12903 + cmd_params->fdb_id = cpu_to_le16(fdb_id);
12904 + dpsw_set_field(cmd_params->mode, LEARNING_MODE, mode);
12905 +
12906 + /* send command to mc*/
12907 + return mc_send_command(mc_io, &cmd);
12908 +}
12909 +
12910 +/**
12911 + * dpsw_fdb_get_attributes() - Get FDB attributes
12912 + * @mc_io: Pointer to MC portal's I/O object
12913 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12914 + * @token: Token of DPSW object
12915 + * @fdb_id: Forwarding Database Identifier
12916 + * @attr: Returned FDB attributes
12917 + *
12918 + * Return: Completion status. '0' on Success; Error code otherwise.
12919 + */
12920 +int dpsw_fdb_get_attributes(struct fsl_mc_io *mc_io,
12921 + u32 cmd_flags,
12922 + u16 token,
12923 + u16 fdb_id,
12924 + struct dpsw_fdb_attr *attr)
12925 +{
12926 + struct mc_command cmd = { 0 };
12927 + struct dpsw_cmd_fdb_get_attr *cmd_params;
12928 + struct dpsw_rsp_fdb_get_attr *rsp_params;
12929 + int err;
12930 +
12931 + /* prepare command */
12932 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_ATTR,
12933 + cmd_flags,
12934 + token);
12935 + cmd_params = (struct dpsw_cmd_fdb_get_attr *)cmd.params;
12936 + cmd_params->fdb_id = cpu_to_le16(fdb_id);
12937 +
12938 + /* send command to mc*/
12939 + err = mc_send_command(mc_io, &cmd);
12940 + if (err)
12941 + return err;
12942 +
12943 + /* retrieve response parameters */
12944 + rsp_params = (struct dpsw_rsp_fdb_get_attr *)cmd.params;
12945 + attr->max_fdb_entries = le16_to_cpu(rsp_params->max_fdb_entries);
12946 + attr->fdb_aging_time = le16_to_cpu(rsp_params->fdb_aging_time);
12947 + attr->learning_mode = dpsw_get_field(rsp_params->learning_mode,
12948 + LEARNING_MODE);
12949 + attr->num_fdb_mc_groups = le16_to_cpu(rsp_params->num_fdb_mc_groups);
12950 + attr->max_fdb_mc_groups = le16_to_cpu(rsp_params->max_fdb_mc_groups);
12951 +
12952 + return 0;
12953 +}
12954 +
12955 +/**
12956 + * dpsw_acl_add() - Adds ACL to L2 switch.
12957 + * @mc_io: Pointer to MC portal's I/O object
12958 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12959 + * @token: Token of DPSW object
12960 + * @acl_id: Returned ACL ID, for the future reference
12961 + * @cfg: ACL configuration
12962 + *
12963 + * Create Access Control List. Multiple ACLs can be created and
12964 + * co-exist in L2 switch
12965 + *
12966 + * Return: '0' on Success; Error code otherwise.
12967 + */
12968 +int dpsw_acl_add(struct fsl_mc_io *mc_io,
12969 + u32 cmd_flags,
12970 + u16 token,
12971 + u16 *acl_id,
12972 + const struct dpsw_acl_cfg *cfg)
12973 +{
12974 + struct mc_command cmd = { 0 };
12975 + struct dpsw_cmd_acl_add *cmd_params;
12976 + struct dpsw_rsp_acl_add *rsp_params;
12977 + int err;
12978 +
12979 + /* prepare command */
12980 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD,
12981 + cmd_flags,
12982 + token);
12983 + cmd_params = (struct dpsw_cmd_acl_add *)cmd.params;
12984 + cmd_params->max_entries = cpu_to_le16(cfg->max_entries);
12985 +
12986 + /* send command to mc*/
12987 + err = mc_send_command(mc_io, &cmd);
12988 + if (err)
12989 + return err;
12990 +
12991 + /* retrieve response parameters */
12992 + rsp_params = (struct dpsw_rsp_acl_add *)cmd.params;
12993 + *acl_id = le16_to_cpu(rsp_params->acl_id);
12994 +
12995 + return 0;
12996 +}
12997 +
12998 +/**
12999 + * dpsw_acl_remove() - Removes ACL from L2 switch.
13000 + * @mc_io: Pointer to MC portal's I/O object
13001 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13002 + * @token: Token of DPSW object
13003 + * @acl_id: ACL ID
13004 + *
13005 + * Return: '0' on Success; Error code otherwise.
13006 + */
13007 +int dpsw_acl_remove(struct fsl_mc_io *mc_io,
13008 + u32 cmd_flags,
13009 + u16 token,
13010 + u16 acl_id)
13011 +{
13012 + struct mc_command cmd = { 0 };
13013 + struct dpsw_cmd_acl_remove *cmd_params;
13014 +
13015 + /* prepare command */
13016 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE,
13017 + cmd_flags,
13018 + token);
13019 + cmd_params = (struct dpsw_cmd_acl_remove *)cmd.params;
13020 + cmd_params->acl_id = cpu_to_le16(acl_id);
13021 +
13022 + /* send command to mc*/
13023 + return mc_send_command(mc_io, &cmd);
13024 +}
13025 +
13026 +/**
13027 + * dpsw_acl_prepare_entry_cfg() - Set an entry to ACL.
13028 + * @key: Key
13029 + * @entry_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA
13030 + *
13031 + * This function has to be called before adding or removing acl_entry
13032 + *
13033 + */
13034 +void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key,
13035 + u8 *entry_cfg_buf)
13036 +{
13037 + struct dpsw_prep_acl_entry *ext_params;
13038 + int i;
13039 +
13040 + ext_params = (struct dpsw_prep_acl_entry *)entry_cfg_buf;
13041 +
13042 + for (i = 0; i < 6; i++) {
13043 + ext_params->match_l2_dest_mac[i] =
13044 + key->match.l2_dest_mac[5 - i];
13045 + ext_params->match_l2_source_mac[i] =
13046 + key->match.l2_source_mac[5 - i];
13047 + ext_params->mask_l2_dest_mac[i] =
13048 + key->mask.l2_dest_mac[5 - i];
13049 + ext_params->mask_l2_source_mac[i] =
13050 + key->mask.l2_source_mac[5 - i];
13051 + }
13052 +
13053 + ext_params->match_l2_tpid = cpu_to_le16(key->match.l2_tpid);
13054 + ext_params->match_l2_vlan_id = cpu_to_le16(key->match.l2_vlan_id);
13055 + ext_params->match_l3_dest_ip = cpu_to_le32(key->match.l3_dest_ip);
13056 + ext_params->match_l3_source_ip = cpu_to_le32(key->match.l3_source_ip);
13057 + ext_params->match_l4_dest_port = cpu_to_le16(key->match.l4_dest_port);
13058 + ext_params->match_l2_ether_type = cpu_to_le16(key->match.l2_ether_type);
13059 + ext_params->match_l2_pcp_dei = key->match.l2_pcp_dei;
13060 + ext_params->match_l3_dscp = key->match.l3_dscp;
13061 + ext_params->match_l4_source_port =
13062 + cpu_to_le16(key->match.l4_source_port);
13063 +
13064 + ext_params->mask_l2_tpid = cpu_to_le16(key->mask.l2_tpid);
13065 + ext_params->mask_l2_vlan_id = cpu_to_le16(key->mask.l2_vlan_id);
13066 + ext_params->mask_l3_dest_ip = cpu_to_le32(key->mask.l3_dest_ip);
13067 + ext_params->mask_l3_source_ip = cpu_to_le32(key->mask.l3_source_ip);
13068 + ext_params->mask_l4_dest_port = cpu_to_le16(key->mask.l4_dest_port);
13069 + ext_params->mask_l4_source_port = cpu_to_le16(key->mask.l4_source_port);
13070 + ext_params->mask_l2_ether_type = cpu_to_le16(key->mask.l2_ether_type);
13071 + ext_params->mask_l2_pcp_dei = key->mask.l2_pcp_dei;
13072 + ext_params->mask_l3_dscp = key->mask.l3_dscp;
13073 + ext_params->match_l3_protocol = key->match.l3_protocol;
13074 + ext_params->mask_l3_protocol = key->mask.l3_protocol;
13075 +}
13076 +
13077 +/**
13078 + * dpsw_acl_add_entry() - Adds an entry to ACL.
13079 + * @mc_io: Pointer to MC portal's I/O object
13080 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13081 + * @token: Token of DPSW object
13082 + * @acl_id: ACL ID
13083 + * @cfg: Entry configuration
13084 + *
13085 + * warning: This function has to be called after dpsw_acl_set_entry_cfg()
13086 + *
13087 + * Return: '0' on Success; Error code otherwise.
13088 + */
13089 +int dpsw_acl_add_entry(struct fsl_mc_io *mc_io,
13090 + u32 cmd_flags,
13091 + u16 token,
13092 + u16 acl_id,
13093 + const struct dpsw_acl_entry_cfg *cfg)
13094 +{
13095 + struct mc_command cmd = { 0 };
13096 + struct dpsw_cmd_acl_entry *cmd_params;
13097 +
13098 + /* prepare command */
13099 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD_ENTRY,
13100 + cmd_flags,
13101 + token);
13102 + cmd_params = (struct dpsw_cmd_acl_entry *)cmd.params;
13103 + cmd_params->acl_id = cpu_to_le16(acl_id);
13104 + cmd_params->result_if_id = cpu_to_le16(cfg->result.if_id);
13105 + cmd_params->precedence = cpu_to_le32(cfg->precedence);
13106 + dpsw_set_field(cmd_params->result_action, RESULT_ACTION,
13107 + cfg->result.action);
13108 + cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
13109 +
13110 + /* send command to mc*/
13111 + return mc_send_command(mc_io, &cmd);
13112 +}
13113 +
13114 +/**
13115 + * dpsw_acl_remove_entry() - Removes an entry from ACL.
13116 + * @mc_io: Pointer to MC portal's I/O object
13117 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13118 + * @token: Token of DPSW object
13119 + * @acl_id: ACL ID
13120 + * @cfg: Entry configuration
13121 + *
13122 + * warning: This function has to be called after dpsw_acl_set_entry_cfg()
13123 + *
13124 + * Return: '0' on Success; Error code otherwise.
13125 + */
13126 +int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io,
13127 + u32 cmd_flags,
13128 + u16 token,
13129 + u16 acl_id,
13130 + const struct dpsw_acl_entry_cfg *cfg)
13131 +{
13132 + struct mc_command cmd = { 0 };
13133 + struct dpsw_cmd_acl_entry *cmd_params;
13134 +
13135 + /* prepare command */
13136 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_ENTRY,
13137 + cmd_flags,
13138 + token);
13139 + cmd_params = (struct dpsw_cmd_acl_entry *)cmd.params;
13140 + cmd_params->acl_id = cpu_to_le16(acl_id);
13141 + cmd_params->result_if_id = cpu_to_le16(cfg->result.if_id);
13142 + cmd_params->precedence = cpu_to_le32(cfg->precedence);
13143 + dpsw_set_field(cmd_params->result_action, RESULT_ACTION,
13144 + cfg->result.action);
13145 + cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
13146 +
13147 + /* send command to mc*/
13148 + return mc_send_command(mc_io, &cmd);
13149 +}
13150 +
13151 +/**
13152 + * dpsw_acl_add_if() - Associate interface/interfaces with ACL.
13153 + * @mc_io: Pointer to MC portal's I/O object
13154 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13155 + * @token: Token of DPSW object
13156 + * @acl_id: ACL ID
13157 + * @cfg: Interfaces list
13158 + *
13159 + * Return: '0' on Success; Error code otherwise.
13160 + */
13161 +int dpsw_acl_add_if(struct fsl_mc_io *mc_io,
13162 + u32 cmd_flags,
13163 + u16 token,
13164 + u16 acl_id,
13165 + const struct dpsw_acl_if_cfg *cfg)
13166 +{
13167 + struct mc_command cmd = { 0 };
13168 + struct dpsw_cmd_acl_if *cmd_params;
13169 +
13170 + /* prepare command */
13171 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD_IF,
13172 + cmd_flags,
13173 + token);
13174 + cmd_params = (struct dpsw_cmd_acl_if *)cmd.params;
13175 + cmd_params->acl_id = cpu_to_le16(acl_id);
13176 + cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
13177 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
13178 +
13179 + /* send command to mc*/
13180 + return mc_send_command(mc_io, &cmd);
13181 +}
13182 +
13183 +/**
13184 + * dpsw_acl_remove_if() - De-associate interface/interfaces from ACL.
13185 + * @mc_io: Pointer to MC portal's I/O object
13186 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13187 + * @token: Token of DPSW object
13188 + * @acl_id: ACL ID
13189 + * @cfg: Interfaces list
13190 + *
13191 + * Return: '0' on Success; Error code otherwise.
13192 + */
13193 +int dpsw_acl_remove_if(struct fsl_mc_io *mc_io,
13194 + u32 cmd_flags,
13195 + u16 token,
13196 + u16 acl_id,
13197 + const struct dpsw_acl_if_cfg *cfg)
13198 +{
13199 + struct mc_command cmd = { 0 };
13200 + struct dpsw_cmd_acl_if *cmd_params;
13201 +
13202 + /* prepare command */
13203 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_IF,
13204 + cmd_flags,
13205 + token);
13206 + cmd_params = (struct dpsw_cmd_acl_if *)cmd.params;
13207 + cmd_params->acl_id = cpu_to_le16(acl_id);
13208 + cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
13209 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
13210 +
13211 + /* send command to mc*/
13212 + return mc_send_command(mc_io, &cmd);
13213 +}
13214 +
13215 +/**
13216 + * dpsw_acl_get_attributes() - Get specific counter of particular interface
13217 + * @mc_io: Pointer to MC portal's I/O object
13218 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13219 + * @token: Token of DPSW object
13220 + * @acl_id: ACL Identifier
13221 + * @attr: Returned ACL attributes
13222 + *
13223 + * Return: '0' on Success; Error code otherwise.
13224 + */
13225 +int dpsw_acl_get_attributes(struct fsl_mc_io *mc_io,
13226 + u32 cmd_flags,
13227 + u16 token,
13228 + u16 acl_id,
13229 + struct dpsw_acl_attr *attr)
13230 +{
13231 + struct mc_command cmd = { 0 };
13232 + struct dpsw_cmd_acl_get_attr *cmd_params;
13233 + struct dpsw_rsp_acl_get_attr *rsp_params;
13234 + int err;
13235 +
13236 + /* prepare command */
13237 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_GET_ATTR,
13238 + cmd_flags,
13239 + token);
13240 + cmd_params = (struct dpsw_cmd_acl_get_attr *)cmd.params;
13241 + cmd_params->acl_id = cpu_to_le16(acl_id);
13242 +
13243 + /* send command to mc*/
13244 + err = mc_send_command(mc_io, &cmd);
13245 + if (err)
13246 + return err;
13247 +
13248 + /* retrieve response parameters */
13249 + rsp_params = (struct dpsw_rsp_acl_get_attr *)cmd.params;
13250 + attr->max_entries = le16_to_cpu(rsp_params->max_entries);
13251 + attr->num_entries = le16_to_cpu(rsp_params->num_entries);
13252 + attr->num_ifs = le16_to_cpu(rsp_params->num_ifs);
13253 +
13254 + return 0;
13255 +}
13256 +
13257 +/**
13258 + * dpsw_ctrl_if_get_attributes() - Obtain control interface attributes
13259 + * @mc_io: Pointer to MC portal's I/O object
13260 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13261 + * @token: Token of DPSW object
13262 + * @attr: Returned control interface attributes
13263 + *
13264 + * Return: '0' on Success; Error code otherwise.
13265 + */
13266 +int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io,
13267 + u32 cmd_flags,
13268 + u16 token,
13269 + struct dpsw_ctrl_if_attr *attr)
13270 +{
13271 + struct mc_command cmd = { 0 };
13272 + struct dpsw_rsp_ctrl_if_get_attr *rsp_params;
13273 + int err;
13274 +
13275 + /* prepare command */
13276 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_GET_ATTR,
13277 + cmd_flags,
13278 + token);
13279 +
13280 + /* send command to mc*/
13281 + err = mc_send_command(mc_io, &cmd);
13282 + if (err)
13283 + return err;
13284 +
13285 + /* retrieve response parameters */
13286 + rsp_params = (struct dpsw_rsp_ctrl_if_get_attr *)cmd.params;
13287 + attr->rx_fqid = le32_to_cpu(rsp_params->rx_fqid);
13288 + attr->rx_err_fqid = le32_to_cpu(rsp_params->rx_err_fqid);
13289 + attr->tx_err_conf_fqid = le32_to_cpu(rsp_params->tx_err_conf_fqid);
13290 +
13291 + return 0;
13292 +}
13293 +
13294 +/**
13295 + * dpsw_ctrl_if_set_pools() - Set control interface buffer pools
13296 + * @mc_io: Pointer to MC portal's I/O object
13297 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13298 + * @token: Token of DPSW object
13299 + * @cfg: Buffer pools configuration
13300 + *
13301 + * Return: '0' on Success; Error code otherwise.
13302 + */
13303 +int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io,
13304 + u32 cmd_flags,
13305 + u16 token,
13306 + const struct dpsw_ctrl_if_pools_cfg *pools)
13307 +{
13308 + struct mc_command cmd = { 0 };
13309 + struct dpsw_cmd_ctrl_if_set_pools *cmd_params;
13310 + int i;
13311 +
13312 + /* prepare command */
13313 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_SET_POOLS,
13314 + cmd_flags,
13315 + token);
13316 + cmd_params = (struct dpsw_cmd_ctrl_if_set_pools *)cmd.params;
13317 + cmd_params->num_dpbp = pools->num_dpbp;
13318 + for (i = 0; i < 8; i++) {
13319 + cmd_params->backup_pool = dpsw_set_bit(cmd_params->backup_pool,
13320 + i,
13321 + pools->pools[i].backup_pool);
13322 + cmd_params->buffer_size[i] =
13323 + cpu_to_le16(pools->pools[i].buffer_size);
13324 + cmd_params->dpbp_id[i] =
13325 + cpu_to_le32(pools->pools[i].dpbp_id);
13326 + }
13327 +
13328 + /* send command to mc*/
13329 + return mc_send_command(mc_io, &cmd);
13330 +}
13331 +
13332 +/**
13333 + * dpsw_ctrl_if_enable() - Enable control interface
13334 + * @mc_io: Pointer to MC portal's I/O object
13335 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13336 + * @token: Token of DPSW object
13337 + *
13338 + * Return: '0' on Success; Error code otherwise.
13339 + */
13340 +int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io,
13341 + u32 cmd_flags,
13342 + u16 token)
13343 +{
13344 + struct mc_command cmd = { 0 };
13345 +
13346 + /* prepare command */
13347 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_ENABLE,
13348 + cmd_flags,
13349 + token);
13350 +
13351 + /* send command to mc*/
13352 + return mc_send_command(mc_io, &cmd);
13353 +}
13354 +
13355 +/**
13356 + * dpsw_ctrl_if_disable() - Function disables control interface
13357 + * @mc_io: Pointer to MC portal's I/O object
13358 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13359 + * @token: Token of DPSW object
13360 + *
13361 + * Return: '0' on Success; Error code otherwise.
13362 + */
13363 +int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io,
13364 + u32 cmd_flags,
13365 + u16 token)
13366 +{
13367 + struct mc_command cmd = { 0 };
13368 +
13369 + /* prepare command */
13370 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_DISABLE,
13371 + cmd_flags,
13372 + token);
13373 +
13374 + /* send command to mc*/
13375 + return mc_send_command(mc_io, &cmd);
13376 +}
13377 +
13378 +/**
13379 + * dpsw_get_api_version() - Get Data Path Switch API version
13380 + * @mc_io: Pointer to MC portal's I/O object
13381 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13382 + * @major_ver: Major version of data path switch API
13383 + * @minor_ver: Minor version of data path switch API
13384 + *
13385 + * Return: '0' on Success; Error code otherwise.
13386 + */
13387 +int dpsw_get_api_version(struct fsl_mc_io *mc_io,
13388 + u32 cmd_flags,
13389 + u16 *major_ver,
13390 + u16 *minor_ver)
13391 +{
13392 + struct mc_command cmd = { 0 };
13393 + struct dpsw_rsp_get_api_version *rsp_params;
13394 + int err;
13395 +
13396 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_API_VERSION,
13397 + cmd_flags,
13398 + 0);
13399 +
13400 + err = mc_send_command(mc_io, &cmd);
13401 + if (err)
13402 + return err;
13403 +
13404 + rsp_params = (struct dpsw_rsp_get_api_version *)cmd.params;
13405 + *major_ver = le16_to_cpu(rsp_params->version_major);
13406 + *minor_ver = le16_to_cpu(rsp_params->version_minor);
13407 +
13408 + return 0;
13409 +}
13410 diff --git a/drivers/staging/fsl-dpaa2/ethsw/dpsw.h b/drivers/staging/fsl-dpaa2/ethsw/dpsw.h
13411 new file mode 100644
13412 index 00000000..c91abeb4
13413 --- /dev/null
13414 +++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw.h
13415 @@ -0,0 +1,1269 @@
13416 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
13417 + *
13418 + * Redistribution and use in source and binary forms, with or without
13419 + * modification, are permitted provided that the following conditions are met:
13420 + * * Redistributions of source code must retain the above copyright
13421 + * notice, this list of conditions and the following disclaimer.
13422 + * * Redistributions in binary form must reproduce the above copyright
13423 + * notice, this list of conditions and the following disclaimer in the
13424 + * documentation and/or other materials provided with the distribution.
13425 + * * Neither the name of the above-listed copyright holders nor the
13426 + * names of any contributors may be used to endorse or promote products
13427 + * derived from this software without specific prior written permission.
13428 + *
13429 + *
13430 + * ALTERNATIVELY, this software may be distributed under the terms of the
13431 + * GNU General Public License ("GPL") as published by the Free Software
13432 + * Foundation, either version 2 of that License or (at your option) any
13433 + * later version.
13434 + *
13435 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
13436 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
13437 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
13438 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
13439 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
13440 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
13441 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
13442 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
13443 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
13444 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
13445 + * POSSIBILITY OF SUCH DAMAGE.
13446 + */
13447 +#ifndef __FSL_DPSW_H
13448 +#define __FSL_DPSW_H
13449 +
13450 +/* Data Path L2-Switch API
13451 + * Contains API for handling DPSW topology and functionality
13452 + */
13453 +
13454 +struct fsl_mc_io;
13455 +
13456 +/**
13457 + * DPSW general definitions
13458 + */
13459 +
13460 +/**
13461 + * Maximum number of traffic class priorities
13462 + */
13463 +#define DPSW_MAX_PRIORITIES 8
13464 +/**
13465 + * Maximum number of interfaces
13466 + */
13467 +#define DPSW_MAX_IF 64
13468 +
13469 +int dpsw_open(struct fsl_mc_io *mc_io,
13470 + u32 cmd_flags,
13471 + int dpsw_id,
13472 + u16 *token);
13473 +
13474 +int dpsw_close(struct fsl_mc_io *mc_io,
13475 + u32 cmd_flags,
13476 + u16 token);
13477 +
13478 +/**
13479 + * DPSW options
13480 + */
13481 +
13482 +/**
13483 + * Disable flooding
13484 + */
13485 +#define DPSW_OPT_FLOODING_DIS 0x0000000000000001ULL
13486 +/**
13487 + * Disable Multicast
13488 + */
13489 +#define DPSW_OPT_MULTICAST_DIS 0x0000000000000004ULL
13490 +/**
13491 + * Support control interface
13492 + */
13493 +#define DPSW_OPT_CTRL_IF_DIS 0x0000000000000010ULL
13494 +/**
13495 + * Disable flooding metering
13496 + */
13497 +#define DPSW_OPT_FLOODING_METERING_DIS 0x0000000000000020ULL
13498 +/**
13499 + * Enable metering
13500 + */
13501 +#define DPSW_OPT_METERING_EN 0x0000000000000040ULL
13502 +
13503 +/**
13504 + * enum dpsw_component_type - component type of a bridge
13505 + * @DPSW_COMPONENT_TYPE_C_VLAN: A C-VLAN component of an
13506 + * enterprise VLAN bridge or of a Provider Bridge used
13507 + * to process C-tagged frames
13508 + * @DPSW_COMPONENT_TYPE_S_VLAN: An S-VLAN component of a
13509 + * Provider Bridge
13510 + *
13511 + */
13512 +enum dpsw_component_type {
13513 + DPSW_COMPONENT_TYPE_C_VLAN = 0,
13514 + DPSW_COMPONENT_TYPE_S_VLAN
13515 +};
13516 +
13517 +/**
13518 + * struct dpsw_cfg - DPSW configuration
13519 + * @num_ifs: Number of external and internal interfaces
13520 + * @adv: Advanced parameters; default is all zeros;
13521 + * use this structure to change default settings
13522 + */
13523 +struct dpsw_cfg {
13524 + u16 num_ifs;
13525 + /**
13526 + * struct adv - Advanced parameters
13527 + * @options: Enable/Disable DPSW features (bitmap)
13528 + * @max_vlans: Maximum Number of VLAN's; 0 - indicates default 16
13529 + * @max_meters_per_if: Number of meters per interface
13530 + * @max_fdbs: Maximum Number of FDB's; 0 - indicates default 16
13531 + * @max_fdb_entries: Number of FDB entries for default FDB table;
13532 + * 0 - indicates default 1024 entries.
13533 + * @fdb_aging_time: Default FDB aging time for default FDB table;
13534 + * 0 - indicates default 300 seconds
13535 + * @max_fdb_mc_groups: Number of multicast groups in each FDB table;
13536 + * 0 - indicates default 32
13537 + * @component_type: Indicates the component type of this bridge
13538 + */
13539 + struct {
13540 + u64 options;
13541 + u16 max_vlans;
13542 + u8 max_meters_per_if;
13543 + u8 max_fdbs;
13544 + u16 max_fdb_entries;
13545 + u16 fdb_aging_time;
13546 + u16 max_fdb_mc_groups;
13547 + enum dpsw_component_type component_type;
13548 + } adv;
13549 +};
13550 +
13551 +int dpsw_create(struct fsl_mc_io *mc_io,
13552 + u16 dprc_token,
13553 + u32 cmd_flags,
13554 + const struct dpsw_cfg *cfg,
13555 + u32 *obj_id);
13556 +
13557 +int dpsw_destroy(struct fsl_mc_io *mc_io,
13558 + u16 dprc_token,
13559 + u32 cmd_flags,
13560 + u32 object_id);
13561 +
13562 +int dpsw_enable(struct fsl_mc_io *mc_io,
13563 + u32 cmd_flags,
13564 + u16 token);
13565 +
13566 +int dpsw_disable(struct fsl_mc_io *mc_io,
13567 + u32 cmd_flags,
13568 + u16 token);
13569 +
13570 +int dpsw_is_enabled(struct fsl_mc_io *mc_io,
13571 + u32 cmd_flags,
13572 + u16 token,
13573 + int *en);
13574 +
13575 +int dpsw_reset(struct fsl_mc_io *mc_io,
13576 + u32 cmd_flags,
13577 + u16 token);
13578 +
13579 +/**
13580 + * DPSW IRQ Index and Events
13581 + */
13582 +
13583 +#define DPSW_IRQ_INDEX_IF 0x0000
13584 +#define DPSW_IRQ_INDEX_L2SW 0x0001
13585 +
13586 +/**
13587 + * IRQ event - Indicates that the link state changed
13588 + */
13589 +#define DPSW_IRQ_EVENT_LINK_CHANGED 0x0001
13590 +
13591 +/**
13592 + * struct dpsw_irq_cfg - IRQ configuration
13593 + * @addr: Address that must be written to signal a message-based interrupt
13594 + * @val: Value to write into irq_addr address
13595 + * @irq_num: A user defined number associated with this IRQ
13596 + */
13597 +struct dpsw_irq_cfg {
13598 + u64 addr;
13599 + u32 val;
13600 + int irq_num;
13601 +};
13602 +
13603 +int dpsw_set_irq(struct fsl_mc_io *mc_io,
13604 + u32 cmd_flags,
13605 + u16 token,
13606 + u8 irq_index,
13607 + struct dpsw_irq_cfg *irq_cfg);
13608 +
13609 +int dpsw_get_irq(struct fsl_mc_io *mc_io,
13610 + u32 cmd_flags,
13611 + u16 token,
13612 + u8 irq_index,
13613 + int *type,
13614 + struct dpsw_irq_cfg *irq_cfg);
13615 +
13616 +int dpsw_set_irq_enable(struct fsl_mc_io *mc_io,
13617 + u32 cmd_flags,
13618 + u16 token,
13619 + u8 irq_index,
13620 + u8 en);
13621 +
13622 +int dpsw_get_irq_enable(struct fsl_mc_io *mc_io,
13623 + u32 cmd_flags,
13624 + u16 token,
13625 + u8 irq_index,
13626 + u8 *en);
13627 +
13628 +int dpsw_set_irq_mask(struct fsl_mc_io *mc_io,
13629 + u32 cmd_flags,
13630 + u16 token,
13631 + u8 irq_index,
13632 + u32 mask);
13633 +
13634 +int dpsw_get_irq_mask(struct fsl_mc_io *mc_io,
13635 + u32 cmd_flags,
13636 + u16 token,
13637 + u8 irq_index,
13638 + u32 *mask);
13639 +
13640 +int dpsw_get_irq_status(struct fsl_mc_io *mc_io,
13641 + u32 cmd_flags,
13642 + u16 token,
13643 + u8 irq_index,
13644 + u32 *status);
13645 +
13646 +int dpsw_clear_irq_status(struct fsl_mc_io *mc_io,
13647 + u32 cmd_flags,
13648 + u16 token,
13649 + u8 irq_index,
13650 + u32 status);
13651 +
13652 +/**
13653 + * struct dpsw_attr - Structure representing DPSW attributes
13654 + * @id: DPSW object ID
13655 + * @options: Enable/Disable DPSW features
13656 + * @max_vlans: Maximum Number of VLANs
13657 + * @max_meters_per_if: Number of meters per interface
13658 + * @max_fdbs: Maximum Number of FDBs
13659 + * @max_fdb_entries: Number of FDB entries for default FDB table;
13660 + * 0 - indicates default 1024 entries.
13661 + * @fdb_aging_time: Default FDB aging time for default FDB table;
13662 + * 0 - indicates default 300 seconds
13663 + * @max_fdb_mc_groups: Number of multicast groups in each FDB table;
13664 + * 0 - indicates default 32
13665 + * @mem_size: DPSW frame storage memory size
13666 + * @num_ifs: Number of interfaces
13667 + * @num_vlans: Current number of VLANs
13668 + * @num_fdbs: Current number of FDBs
13669 + * @component_type: Component type of this bridge
13670 + */
13671 +struct dpsw_attr {
13672 + int id;
13673 + u64 options;
13674 + u16 max_vlans;
13675 + u8 max_meters_per_if;
13676 + u8 max_fdbs;
13677 + u16 max_fdb_entries;
13678 + u16 fdb_aging_time;
13679 + u16 max_fdb_mc_groups;
13680 + u16 num_ifs;
13681 + u16 mem_size;
13682 + u16 num_vlans;
13683 + u8 num_fdbs;
13684 + enum dpsw_component_type component_type;
13685 +};
13686 +
13687 +int dpsw_get_attributes(struct fsl_mc_io *mc_io,
13688 + u32 cmd_flags,
13689 + u16 token,
13690 + struct dpsw_attr *attr);
13691 +
13692 +int dpsw_set_reflection_if(struct fsl_mc_io *mc_io,
13693 + u32 cmd_flags,
13694 + u16 token,
13695 + u16 if_id);
13696 +
13697 +/**
13698 + * enum dpsw_action - Action selection for special/control frames
13699 + * @DPSW_ACTION_DROP: Drop frame
13700 + * @DPSW_ACTION_REDIRECT: Redirect frame to control port
13701 + */
13702 +enum dpsw_action {
13703 + DPSW_ACTION_DROP = 0,
13704 + DPSW_ACTION_REDIRECT = 1
13705 +};
13706 +
13707 +/**
13708 + * Enable auto-negotiation
13709 + */
13710 +#define DPSW_LINK_OPT_AUTONEG 0x0000000000000001ULL
13711 +/**
13712 + * Enable half-duplex mode
13713 + */
13714 +#define DPSW_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
13715 +/**
13716 + * Enable pause frames
13717 + */
13718 +#define DPSW_LINK_OPT_PAUSE 0x0000000000000004ULL
13719 +/**
13720 + * Enable a-symmetric pause frames
13721 + */
13722 +#define DPSW_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
13723 +
13724 +/**
13725 + * struct dpsw_link_cfg - Structure representing DPSW link configuration
13726 + * @rate: Rate
13727 + * @options: Mask of available options; use 'DPSW_LINK_OPT_<X>' values
13728 + */
13729 +struct dpsw_link_cfg {
13730 + u32 rate;
13731 + u64 options;
13732 +};
13733 +
13734 +int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io,
13735 + u32 cmd_flags,
13736 + u16 token,
13737 + u16 if_id,
13738 + struct dpsw_link_cfg *cfg);
13739 +/**
13740 + * struct dpsw_link_state - Structure representing DPSW link state
13741 + * @rate: Rate
13742 + * @options: Mask of available options; use 'DPSW_LINK_OPT_<X>' values
13743 + * @up: 0 - covers two cases: down and disconnected, 1 - up
13744 + */
13745 +struct dpsw_link_state {
13746 + u32 rate;
13747 + u64 options;
13748 + int up;
13749 +};
13750 +
13751 +int dpsw_if_get_link_state(struct fsl_mc_io *mc_io,
13752 + u32 cmd_flags,
13753 + u16 token,
13754 + u16 if_id,
13755 + struct dpsw_link_state *state);
13756 +
13757 +int dpsw_if_set_flooding(struct fsl_mc_io *mc_io,
13758 + u32 cmd_flags,
13759 + u16 token,
13760 + u16 if_id,
13761 + int en);
13762 +
13763 +int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io,
13764 + u32 cmd_flags,
13765 + u16 token,
13766 + u16 if_id,
13767 + int en);
13768 +
13769 +int dpsw_if_set_multicast(struct fsl_mc_io *mc_io,
13770 + u32 cmd_flags,
13771 + u16 token,
13772 + u16 if_id,
13773 + int en);
13774 +
13775 +/**
13776 + * struct dpsw_tci_cfg - Tag Contorl Information (TCI) configuration
13777 + * @pcp: Priority Code Point (PCP): a 3-bit field which refers
13778 + * to the IEEE 802.1p priority
13779 + * @dei: Drop Eligible Indicator (DEI): a 1-bit field. May be used
13780 + * separately or in conjunction with PCP to indicate frames
13781 + * eligible to be dropped in the presence of congestion
13782 + * @vlan_id: VLAN Identifier (VID): a 12-bit field specifying the VLAN
13783 + * to which the frame belongs. The hexadecimal values
13784 + * of 0x000 and 0xFFF are reserved;
13785 + * all other values may be used as VLAN identifiers,
13786 + * allowing up to 4,094 VLANs
13787 + */
13788 +struct dpsw_tci_cfg {
13789 + u8 pcp;
13790 + u8 dei;
13791 + u16 vlan_id;
13792 +};
13793 +
13794 +int dpsw_if_set_tci(struct fsl_mc_io *mc_io,
13795 + u32 cmd_flags,
13796 + u16 token,
13797 + u16 if_id,
13798 + const struct dpsw_tci_cfg *cfg);
13799 +
13800 +int dpsw_if_get_tci(struct fsl_mc_io *mc_io,
13801 + u32 cmd_flags,
13802 + u16 token,
13803 + u16 if_id,
13804 + struct dpsw_tci_cfg *cfg);
13805 +
13806 +/**
13807 + * enum dpsw_stp_state - Spanning Tree Protocol (STP) states
13808 + * @DPSW_STP_STATE_BLOCKING: Blocking state
13809 + * @DPSW_STP_STATE_LISTENING: Listening state
13810 + * @DPSW_STP_STATE_LEARNING: Learning state
13811 + * @DPSW_STP_STATE_FORWARDING: Forwarding state
13812 + *
13813 + */
13814 +enum dpsw_stp_state {
13815 + DPSW_STP_STATE_BLOCKING = 0,
13816 + DPSW_STP_STATE_LISTENING = 1,
13817 + DPSW_STP_STATE_LEARNING = 2,
13818 + DPSW_STP_STATE_FORWARDING = 3
13819 +};
13820 +
13821 +/**
13822 + * struct dpsw_stp_cfg - Spanning Tree Protocol (STP) Configuration
13823 + * @vlan_id: VLAN ID STP state
13824 + * @state: STP state
13825 + */
13826 +struct dpsw_stp_cfg {
13827 + u16 vlan_id;
13828 + enum dpsw_stp_state state;
13829 +};
13830 +
13831 +int dpsw_if_set_stp(struct fsl_mc_io *mc_io,
13832 + u32 cmd_flags,
13833 + u16 token,
13834 + u16 if_id,
13835 + const struct dpsw_stp_cfg *cfg);
13836 +
13837 +/**
13838 + * enum dpsw_accepted_frames - Types of frames to accept
13839 + * @DPSW_ADMIT_ALL: The device accepts VLAN tagged, untagged and
13840 + * priority tagged frames
13841 + * @DPSW_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or
13842 + * Priority-Tagged frames received on this interface.
13843 + *
13844 + */
13845 +enum dpsw_accepted_frames {
13846 + DPSW_ADMIT_ALL = 1,
13847 + DPSW_ADMIT_ONLY_VLAN_TAGGED = 3
13848 +};
13849 +
13850 +/**
13851 + * struct dpsw_accepted_frames_cfg - Types of frames to accept configuration
13852 + * @type: Defines ingress accepted frames
13853 + * @unaccept_act: When a frame is not accepted, it may be discarded or
13854 + * redirected to control interface depending on this mode
13855 + */
13856 +struct dpsw_accepted_frames_cfg {
13857 + enum dpsw_accepted_frames type;
13858 + enum dpsw_action unaccept_act;
13859 +};
13860 +
13861 +int dpsw_if_set_accepted_frames(struct fsl_mc_io *mc_io,
13862 + u32 cmd_flags,
13863 + u16 token,
13864 + u16 if_id,
13865 + const struct dpsw_accepted_frames_cfg *cfg);
13866 +
13867 +int dpsw_if_set_accept_all_vlan(struct fsl_mc_io *mc_io,
13868 + u32 cmd_flags,
13869 + u16 token,
13870 + u16 if_id,
13871 + int accept_all);
13872 +
13873 +/**
13874 + * enum dpsw_counter - Counters types
13875 + * @DPSW_CNT_ING_FRAME: Counts ingress frames
13876 + * @DPSW_CNT_ING_BYTE: Counts ingress bytes
13877 + * @DPSW_CNT_ING_FLTR_FRAME: Counts filtered ingress frames
13878 + * @DPSW_CNT_ING_FRAME_DISCARD: Counts discarded ingress frame
13879 + * @DPSW_CNT_ING_MCAST_FRAME: Counts ingress multicast frames
13880 + * @DPSW_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes
13881 + * @DPSW_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames
13882 + * @DPSW_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes
13883 + * @DPSW_CNT_EGR_FRAME: Counts egress frames
13884 + * @DPSW_CNT_EGR_BYTE: Counts eEgress bytes
13885 + * @DPSW_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames
13886 + * @DPSW_CNT_EGR_STP_FRAME_DISCARD: Counts egress STP discarded frames
13887 + */
13888 +enum dpsw_counter {
13889 + DPSW_CNT_ING_FRAME = 0x0,
13890 + DPSW_CNT_ING_BYTE = 0x1,
13891 + DPSW_CNT_ING_FLTR_FRAME = 0x2,
13892 + DPSW_CNT_ING_FRAME_DISCARD = 0x3,
13893 + DPSW_CNT_ING_MCAST_FRAME = 0x4,
13894 + DPSW_CNT_ING_MCAST_BYTE = 0x5,
13895 + DPSW_CNT_ING_BCAST_FRAME = 0x6,
13896 + DPSW_CNT_ING_BCAST_BYTES = 0x7,
13897 + DPSW_CNT_EGR_FRAME = 0x8,
13898 + DPSW_CNT_EGR_BYTE = 0x9,
13899 + DPSW_CNT_EGR_FRAME_DISCARD = 0xa,
13900 + DPSW_CNT_EGR_STP_FRAME_DISCARD = 0xb
13901 +};
13902 +
13903 +int dpsw_if_get_counter(struct fsl_mc_io *mc_io,
13904 + u32 cmd_flags,
13905 + u16 token,
13906 + u16 if_id,
13907 + enum dpsw_counter type,
13908 + u64 *counter);
13909 +
13910 +int dpsw_if_set_counter(struct fsl_mc_io *mc_io,
13911 + u32 cmd_flags,
13912 + u16 token,
13913 + u16 if_id,
13914 + enum dpsw_counter type,
13915 + u64 counter);
13916 +
13917 +/**
13918 + * Maximum number of TC
13919 + */
13920 +#define DPSW_MAX_TC 8
13921 +
13922 +/**
13923 + * enum dpsw_priority_selector - User priority
13924 + * @DPSW_UP_PCP: Priority Code Point (PCP): a 3-bit field which
13925 + * refers to the IEEE 802.1p priority.
13926 + * @DPSW_UP_DSCP: Differentiated services Code Point (DSCP): 6 bit
13927 + * field from IP header
13928 + *
13929 + */
13930 +enum dpsw_priority_selector {
13931 + DPSW_UP_PCP = 0,
13932 + DPSW_UP_DSCP = 1
13933 +};
13934 +
13935 +/**
13936 + * enum dpsw_schedule_mode - Traffic classes scheduling
13937 + * @DPSW_SCHED_STRICT_PRIORITY: schedule strict priority
13938 + * @DPSW_SCHED_WEIGHTED: schedule based on token bucket created algorithm
13939 + */
13940 +enum dpsw_schedule_mode {
13941 + DPSW_SCHED_STRICT_PRIORITY,
13942 + DPSW_SCHED_WEIGHTED
13943 +};
13944 +
13945 +/**
13946 + * struct dpsw_tx_schedule_cfg - traffic class configuration
13947 + * @mode: Strict or weight-based scheduling
13948 + * @delta_bandwidth: weighted Bandwidth in range from 100 to 10000
13949 + */
13950 +struct dpsw_tx_schedule_cfg {
13951 + enum dpsw_schedule_mode mode;
13952 + u16 delta_bandwidth;
13953 +};
13954 +
13955 +/**
13956 + * struct dpsw_tx_selection_cfg - Mapping user priority into traffic
13957 + * class configuration
13958 + * @priority_selector: Source for user priority regeneration
13959 + * @tc_id: The Regenerated User priority that the incoming
13960 + * User Priority is mapped to for this interface
13961 + * @tc_sched: Traffic classes configuration
13962 + */
13963 +struct dpsw_tx_selection_cfg {
13964 + enum dpsw_priority_selector priority_selector;
13965 + u8 tc_id[DPSW_MAX_PRIORITIES];
13966 + struct dpsw_tx_schedule_cfg tc_sched[DPSW_MAX_TC];
13967 +};
13968 +
13969 +int dpsw_if_set_tx_selection(struct fsl_mc_io *mc_io,
13970 + u32 cmd_flags,
13971 + u16 token,
13972 + u16 if_id,
13973 + const struct dpsw_tx_selection_cfg *cfg);
13974 +
13975 +/**
13976 + * enum dpsw_reflection_filter - Filter type for frames to reflect
13977 + * @DPSW_REFLECTION_FILTER_INGRESS_ALL: Reflect all frames
13978 + * @DPSW_REFLECTION_FILTER_INGRESS_VLAN: Reflect only frames belong to
13979 + * particular VLAN defined by vid parameter
13980 + *
13981 + */
13982 +enum dpsw_reflection_filter {
13983 + DPSW_REFLECTION_FILTER_INGRESS_ALL = 0,
13984 + DPSW_REFLECTION_FILTER_INGRESS_VLAN = 1
13985 +};
13986 +
13987 +/**
13988 + * struct dpsw_reflection_cfg - Structure representing reflection information
13989 + * @filter: Filter type for frames to reflect
13990 + * @vlan_id: Vlan Id to reflect; valid only when filter type is
13991 + * DPSW_INGRESS_VLAN
13992 + */
13993 +struct dpsw_reflection_cfg {
13994 + enum dpsw_reflection_filter filter;
13995 + u16 vlan_id;
13996 +};
13997 +
13998 +int dpsw_if_add_reflection(struct fsl_mc_io *mc_io,
13999 + u32 cmd_flags,
14000 + u16 token,
14001 + u16 if_id,
14002 + const struct dpsw_reflection_cfg *cfg);
14003 +
14004 +int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io,
14005 + u32 cmd_flags,
14006 + u16 token,
14007 + u16 if_id,
14008 + const struct dpsw_reflection_cfg *cfg);
14009 +
14010 +/**
14011 + * enum dpsw_metering_mode - Metering modes
14012 + * @DPSW_METERING_MODE_NONE: metering disabled
14013 + * @DPSW_METERING_MODE_RFC2698: RFC 2698
14014 + * @DPSW_METERING_MODE_RFC4115: RFC 4115
14015 + */
14016 +enum dpsw_metering_mode {
14017 + DPSW_METERING_MODE_NONE = 0,
14018 + DPSW_METERING_MODE_RFC2698,
14019 + DPSW_METERING_MODE_RFC4115
14020 +};
14021 +
14022 +/**
14023 + * enum dpsw_metering_unit - Metering count
14024 + * @DPSW_METERING_UNIT_BYTES: count bytes
14025 + * @DPSW_METERING_UNIT_FRAMES: count frames
14026 + */
14027 +enum dpsw_metering_unit {
14028 + DPSW_METERING_UNIT_BYTES = 0,
14029 + DPSW_METERING_UNIT_FRAMES
14030 +};
14031 +
14032 +/**
14033 + * struct dpsw_metering_cfg - Metering configuration
14034 + * @mode: metering modes
14035 + * @units: Bytes or frame units
14036 + * @cir: Committed information rate (CIR) in Kbits/s
14037 + * @eir: Peak information rate (PIR) Kbit/s rfc2698
14038 + * Excess information rate (EIR) Kbit/s rfc4115
14039 + * @cbs: Committed burst size (CBS) in bytes
14040 + * @ebs: Peak burst size (PBS) in bytes for rfc2698
14041 + * Excess bust size (EBS) in bytes rfc4115
14042 + *
14043 + */
14044 +struct dpsw_metering_cfg {
14045 + enum dpsw_metering_mode mode;
14046 + enum dpsw_metering_unit units;
14047 + u32 cir;
14048 + u32 eir;
14049 + u32 cbs;
14050 + u32 ebs;
14051 +};
14052 +
14053 +int dpsw_if_set_flooding_metering(struct fsl_mc_io *mc_io,
14054 + u32 cmd_flags,
14055 + u16 token,
14056 + u16 if_id,
14057 + const struct dpsw_metering_cfg *cfg);
14058 +
14059 +int dpsw_if_set_metering(struct fsl_mc_io *mc_io,
14060 + u32 cmd_flags,
14061 + u16 token,
14062 + u16 if_id,
14063 + u8 tc_id,
14064 + const struct dpsw_metering_cfg *cfg);
14065 +
14066 +/**
14067 + * enum dpsw_early_drop_unit - DPSW early drop unit
14068 + * @DPSW_EARLY_DROP_UNIT_BYTE: count bytes
14069 + * @DPSW_EARLY_DROP_UNIT_FRAMES: count frames
14070 + */
14071 +enum dpsw_early_drop_unit {
14072 + DPSW_EARLY_DROP_UNIT_BYTE = 0,
14073 + DPSW_EARLY_DROP_UNIT_FRAMES
14074 +};
14075 +
14076 +/**
14077 + * enum dpsw_early_drop_mode - DPSW early drop mode
14078 + * @DPSW_EARLY_DROP_MODE_NONE: early drop is disabled
14079 + * @DPSW_EARLY_DROP_MODE_TAIL: early drop in taildrop mode
14080 + * @DPSW_EARLY_DROP_MODE_WRED: early drop in WRED mode
14081 + */
14082 +enum dpsw_early_drop_mode {
14083 + DPSW_EARLY_DROP_MODE_NONE = 0,
14084 + DPSW_EARLY_DROP_MODE_TAIL,
14085 + DPSW_EARLY_DROP_MODE_WRED
14086 +};
14087 +
14088 +/**
14089 + * struct dpsw_wred_cfg - WRED configuration
14090 + * @max_threshold: maximum threshold that packets may be discarded. Above this
14091 + * threshold all packets are discarded; must be less than 2^39;
14092 + * approximated to be expressed as (x+256)*2^(y-1) due to HW
14093 + * implementation.
14094 + * @min_threshold: minimum threshold that packets may be discarded at
14095 + * @drop_probability: probability that a packet will be discarded (1-100,
14096 + * associated with the maximum threshold)
14097 + */
14098 +struct dpsw_wred_cfg {
14099 + u64 min_threshold;
14100 + u64 max_threshold;
14101 + u8 drop_probability;
14102 +};
14103 +
14104 +/**
14105 + * struct dpsw_early_drop_cfg - early-drop configuration
14106 + * @drop_mode: drop mode
14107 + * @units: count units
14108 + * @yellow: WRED - 'yellow' configuration
14109 + * @green: WRED - 'green' configuration
14110 + * @tail_drop_threshold: tail drop threshold
14111 + */
14112 +struct dpsw_early_drop_cfg {
14113 + enum dpsw_early_drop_mode drop_mode;
14114 + enum dpsw_early_drop_unit units;
14115 + struct dpsw_wred_cfg yellow;
14116 + struct dpsw_wred_cfg green;
14117 + u32 tail_drop_threshold;
14118 +};
14119 +
14120 +void dpsw_prepare_early_drop(const struct dpsw_early_drop_cfg *cfg,
14121 + u8 *early_drop_buf);
14122 +
14123 +int dpsw_if_set_early_drop(struct fsl_mc_io *mc_io,
14124 + u32 cmd_flags,
14125 + u16 token,
14126 + u16 if_id,
14127 + u8 tc_id,
14128 + u64 early_drop_iova);
14129 +
14130 +/**
14131 + * struct dpsw_custom_tpid_cfg - Structure representing tag Protocol identifier
14132 + * @tpid: An additional tag protocol identifier
14133 + */
14134 +struct dpsw_custom_tpid_cfg {
14135 + u16 tpid;
14136 +};
14137 +
14138 +int dpsw_add_custom_tpid(struct fsl_mc_io *mc_io,
14139 + u32 cmd_flags,
14140 + u16 token,
14141 + const struct dpsw_custom_tpid_cfg *cfg);
14142 +
14143 +int dpsw_remove_custom_tpid(struct fsl_mc_io *mc_io,
14144 + u32 cmd_flags,
14145 + u16 token,
14146 + const struct dpsw_custom_tpid_cfg *cfg);
14147 +
14148 +int dpsw_if_enable(struct fsl_mc_io *mc_io,
14149 + u32 cmd_flags,
14150 + u16 token,
14151 + u16 if_id);
14152 +
14153 +int dpsw_if_disable(struct fsl_mc_io *mc_io,
14154 + u32 cmd_flags,
14155 + u16 token,
14156 + u16 if_id);
14157 +
14158 +/**
14159 + * struct dpsw_if_attr - Structure representing DPSW interface attributes
14160 + * @num_tcs: Number of traffic classes
14161 + * @rate: Transmit rate in bits per second
14162 + * @options: Interface configuration options (bitmap)
14163 + * @enabled: Indicates if interface is enabled
14164 + * @accept_all_vlan: The device discards/accepts incoming frames
14165 + * for VLANs that do not include this interface
14166 + * @admit_untagged: When set to 'DPSW_ADMIT_ONLY_VLAN_TAGGED', the device
14167 + * discards untagged frames or priority-tagged frames received on
14168 + * this interface;
14169 + * When set to 'DPSW_ADMIT_ALL', untagged frames or priority-
14170 + * tagged frames received on this interface are accepted
14171 + * @qdid: control frames transmit qdid
14172 + */
14173 +struct dpsw_if_attr {
14174 + u8 num_tcs;
14175 + u32 rate;
14176 + u32 options;
14177 + int enabled;
14178 + int accept_all_vlan;
14179 + enum dpsw_accepted_frames admit_untagged;
14180 + u16 qdid;
14181 +};
14182 +
14183 +int dpsw_if_get_attributes(struct fsl_mc_io *mc_io,
14184 + u32 cmd_flags,
14185 + u16 token,
14186 + u16 if_id,
14187 + struct dpsw_if_attr *attr);
14188 +
14189 +int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io,
14190 + u32 cmd_flags,
14191 + u16 token,
14192 + u16 if_id,
14193 + u16 frame_length);
14194 +
14195 +int dpsw_if_get_max_frame_length(struct fsl_mc_io *mc_io,
14196 + u32 cmd_flags,
14197 + u16 token,
14198 + u16 if_id,
14199 + u16 *frame_length);
14200 +
14201 +/**
14202 + * struct dpsw_vlan_cfg - VLAN Configuration
14203 + * @fdb_id: Forwarding Data Base
14204 + */
14205 +struct dpsw_vlan_cfg {
14206 + u16 fdb_id;
14207 +};
14208 +
14209 +int dpsw_vlan_add(struct fsl_mc_io *mc_io,
14210 + u32 cmd_flags,
14211 + u16 token,
14212 + u16 vlan_id,
14213 + const struct dpsw_vlan_cfg *cfg);
14214 +
14215 +/**
14216 + * struct dpsw_vlan_if_cfg - Set of VLAN Interfaces
14217 + * @num_ifs: The number of interfaces that are assigned to the egress
14218 + * list for this VLAN
14219 + * @if_id: The set of interfaces that are
14220 + * assigned to the egress list for this VLAN
14221 + */
14222 +struct dpsw_vlan_if_cfg {
14223 + u16 num_ifs;
14224 + u16 if_id[DPSW_MAX_IF];
14225 +};
14226 +
14227 +int dpsw_vlan_add_if(struct fsl_mc_io *mc_io,
14228 + u32 cmd_flags,
14229 + u16 token,
14230 + u16 vlan_id,
14231 + const struct dpsw_vlan_if_cfg *cfg);
14232 +
14233 +int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io,
14234 + u32 cmd_flags,
14235 + u16 token,
14236 + u16 vlan_id,
14237 + const struct dpsw_vlan_if_cfg *cfg);
14238 +
14239 +int dpsw_vlan_add_if_flooding(struct fsl_mc_io *mc_io,
14240 + u32 cmd_flags,
14241 + u16 token,
14242 + u16 vlan_id,
14243 + const struct dpsw_vlan_if_cfg *cfg);
14244 +
14245 +int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io,
14246 + u32 cmd_flags,
14247 + u16 token,
14248 + u16 vlan_id,
14249 + const struct dpsw_vlan_if_cfg *cfg);
14250 +
14251 +int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io,
14252 + u32 cmd_flags,
14253 + u16 token,
14254 + u16 vlan_id,
14255 + const struct dpsw_vlan_if_cfg *cfg);
14256 +
14257 +int dpsw_vlan_remove_if_flooding(struct fsl_mc_io *mc_io,
14258 + u32 cmd_flags,
14259 + u16 token,
14260 + u16 vlan_id,
14261 + const struct dpsw_vlan_if_cfg *cfg);
14262 +
14263 +int dpsw_vlan_remove(struct fsl_mc_io *mc_io,
14264 + u32 cmd_flags,
14265 + u16 token,
14266 + u16 vlan_id);
14267 +
14268 +/**
14269 + * struct dpsw_vlan_attr - VLAN attributes
14270 + * @fdb_id: Associated FDB ID
14271 + * @num_ifs: Number of interfaces
14272 + * @num_untagged_ifs: Number of untagged interfaces
14273 + * @num_flooding_ifs: Number of flooding interfaces
14274 + */
14275 +struct dpsw_vlan_attr {
14276 + u16 fdb_id;
14277 + u16 num_ifs;
14278 + u16 num_untagged_ifs;
14279 + u16 num_flooding_ifs;
14280 +};
14281 +
14282 +int dpsw_vlan_get_attributes(struct fsl_mc_io *mc_io,
14283 + u32 cmd_flags,
14284 + u16 token,
14285 + u16 vlan_id,
14286 + struct dpsw_vlan_attr *attr);
14287 +
14288 +int dpsw_vlan_get_if(struct fsl_mc_io *mc_io,
14289 + u32 cmd_flags,
14290 + u16 token,
14291 + u16 vlan_id,
14292 + struct dpsw_vlan_if_cfg *cfg);
14293 +
14294 +int dpsw_vlan_get_if_flooding(struct fsl_mc_io *mc_io,
14295 + u32 cmd_flags,
14296 + u16 token,
14297 + u16 vlan_id,
14298 + struct dpsw_vlan_if_cfg *cfg);
14299 +
14300 +int dpsw_vlan_get_if_untagged(struct fsl_mc_io *mc_io,
14301 + u32 cmd_flags,
14302 + u16 token,
14303 + u16 vlan_id,
14304 + struct dpsw_vlan_if_cfg *cfg);
14305 +
14306 +/**
14307 + * struct dpsw_fdb_cfg - FDB Configuration
14308 + * @num_fdb_entries: Number of FDB entries
14309 + * @fdb_aging_time: Aging time in seconds
14310 + */
14311 +struct dpsw_fdb_cfg {
14312 + u16 num_fdb_entries;
14313 + u16 fdb_aging_time;
14314 +};
14315 +
14316 +int dpsw_fdb_add(struct fsl_mc_io *mc_io,
14317 + u32 cmd_flags,
14318 + u16 token,
14319 + u16 *fdb_id,
14320 + const struct dpsw_fdb_cfg *cfg);
14321 +
14322 +int dpsw_fdb_remove(struct fsl_mc_io *mc_io,
14323 + u32 cmd_flags,
14324 + u16 token,
14325 + u16 fdb_id);
14326 +
14327 +/**
14328 + * enum dpsw_fdb_entry_type - FDB Entry type - Static/Dynamic
14329 + * @DPSW_FDB_ENTRY_STATIC: Static entry
14330 + * @DPSW_FDB_ENTRY_DINAMIC: Dynamic entry
14331 + */
14332 +enum dpsw_fdb_entry_type {
14333 + DPSW_FDB_ENTRY_STATIC = 0,
14334 + DPSW_FDB_ENTRY_DINAMIC = 1
14335 +};
14336 +
14337 +/**
14338 + * struct dpsw_fdb_unicast_cfg - Unicast entry configuration
14339 + * @type: Select static or dynamic entry
14340 + * @mac_addr: MAC address
14341 + * @if_egress: Egress interface ID
14342 + */
14343 +struct dpsw_fdb_unicast_cfg {
14344 + enum dpsw_fdb_entry_type type;
14345 + u8 mac_addr[6];
14346 + u16 if_egress;
14347 +};
14348 +
14349 +int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io,
14350 + u32 cmd_flags,
14351 + u16 token,
14352 + u16 fdb_id,
14353 + const struct dpsw_fdb_unicast_cfg *cfg);
14354 +
14355 +int dpsw_fdb_get_unicast(struct fsl_mc_io *mc_io,
14356 + u32 cmd_flags,
14357 + u16 token,
14358 + u16 fdb_id,
14359 + struct dpsw_fdb_unicast_cfg *cfg);
14360 +
14361 +int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io,
14362 + u32 cmd_flags,
14363 + u16 token,
14364 + u16 fdb_id,
14365 + const struct dpsw_fdb_unicast_cfg *cfg);
14366 +
14367 +/**
14368 + * struct dpsw_fdb_multicast_cfg - Multi-cast entry configuration
14369 + * @type: Select static or dynamic entry
14370 + * @mac_addr: MAC address
14371 + * @num_ifs: Number of external and internal interfaces
14372 + * @if_id: Egress interface IDs
14373 + */
14374 +struct dpsw_fdb_multicast_cfg {
14375 + enum dpsw_fdb_entry_type type;
14376 + u8 mac_addr[6];
14377 + u16 num_ifs;
14378 + u16 if_id[DPSW_MAX_IF];
14379 +};
14380 +
14381 +int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io,
14382 + u32 cmd_flags,
14383 + u16 token,
14384 + u16 fdb_id,
14385 + const struct dpsw_fdb_multicast_cfg *cfg);
14386 +
14387 +int dpsw_fdb_get_multicast(struct fsl_mc_io *mc_io,
14388 + u32 cmd_flags,
14389 + u16 token,
14390 + u16 fdb_id,
14391 + struct dpsw_fdb_multicast_cfg *cfg);
14392 +
14393 +int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io,
14394 + u32 cmd_flags,
14395 + u16 token,
14396 + u16 fdb_id,
14397 + const struct dpsw_fdb_multicast_cfg *cfg);
14398 +
14399 +/**
14400 + * enum dpsw_fdb_learning_mode - Auto-learning modes
14401 + * @DPSW_FDB_LEARNING_MODE_DIS: Disable Auto-learning
14402 + * @DPSW_FDB_LEARNING_MODE_HW: Enable HW auto-Learning
14403 + * @DPSW_FDB_LEARNING_MODE_NON_SECURE: Enable None secure learning by CPU
14404 + * @DPSW_FDB_LEARNING_MODE_SECURE: Enable secure learning by CPU
14405 + *
14406 + * NONE - SECURE LEARNING
14407 + * SMAC found DMAC found CTLU Action
14408 + * v v Forward frame to
14409 + * 1. DMAC destination
14410 + * - v Forward frame to
14411 + * 1. DMAC destination
14412 + * 2. Control interface
14413 + * v - Forward frame to
14414 + * 1. Flooding list of interfaces
14415 + * - - Forward frame to
14416 + * 1. Flooding list of interfaces
14417 + * 2. Control interface
14418 + * SECURE LEARING
14419 + * SMAC found DMAC found CTLU Action
14420 + * v v Forward frame to
14421 + * 1. DMAC destination
14422 + * - v Forward frame to
14423 + * 1. Control interface
14424 + * v - Forward frame to
14425 + * 1. Flooding list of interfaces
14426 + * - - Forward frame to
14427 + * 1. Control interface
14428 + */
14429 +enum dpsw_fdb_learning_mode {
14430 + DPSW_FDB_LEARNING_MODE_DIS = 0,
14431 + DPSW_FDB_LEARNING_MODE_HW = 1,
14432 + DPSW_FDB_LEARNING_MODE_NON_SECURE = 2,
14433 + DPSW_FDB_LEARNING_MODE_SECURE = 3
14434 +};
14435 +
14436 +int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io,
14437 + u32 cmd_flags,
14438 + u16 token,
14439 + u16 fdb_id,
14440 + enum dpsw_fdb_learning_mode mode);
14441 +
14442 +/**
14443 + * struct dpsw_fdb_attr - FDB Attributes
14444 + * @max_fdb_entries: Number of FDB entries
14445 + * @fdb_aging_time: Aging time in seconds
14446 + * @learning_mode: Learning mode
14447 + * @num_fdb_mc_groups: Current number of multicast groups
14448 + * @max_fdb_mc_groups: Maximum number of multicast groups
14449 + */
14450 +struct dpsw_fdb_attr {
14451 + u16 max_fdb_entries;
14452 + u16 fdb_aging_time;
14453 + enum dpsw_fdb_learning_mode learning_mode;
14454 + u16 num_fdb_mc_groups;
14455 + u16 max_fdb_mc_groups;
14456 +};
14457 +
14458 +int dpsw_fdb_get_attributes(struct fsl_mc_io *mc_io,
14459 + u32 cmd_flags,
14460 + u16 token,
14461 + u16 fdb_id,
14462 + struct dpsw_fdb_attr *attr);
14463 +
14464 +/**
14465 + * struct dpsw_acl_cfg - ACL Configuration
14466 + * @max_entries: Number of FDB entries
14467 + */
14468 +struct dpsw_acl_cfg {
14469 + u16 max_entries;
14470 +};
14471 +
14472 +/**
14473 + * struct dpsw_acl_fields - ACL fields.
14474 + * @l2_dest_mac: Destination MAC address: BPDU, Multicast, Broadcast, Unicast,
14475 + * slow protocols, MVRP, STP
14476 + * @l2_source_mac: Source MAC address
14477 + * @l2_tpid: Layer 2 (Ethernet) protocol type, used to identify the following
14478 + * protocols: MPLS, PTP, PFC, ARP, Jumbo frames, LLDP, IEEE802.1ae,
14479 + * Q-in-Q, IPv4, IPv6, PPPoE
14480 + * @l2_pcp_dei: indicate which protocol is encapsulated in the payload
14481 + * @l2_vlan_id: layer 2 VLAN ID
14482 + * @l2_ether_type: layer 2 Ethernet type
14483 + * @l3_dscp: Layer 3 differentiated services code point
14484 + * @l3_protocol: Tells the Network layer at the destination host, to which
14485 + * Protocol this packet belongs to. The following protocol are
14486 + * supported: ICMP, IGMP, IPv4 (encapsulation), TCP, IPv6
14487 + * (encapsulation), GRE, PTP
14488 + * @l3_source_ip: Source IPv4 IP
14489 + * @l3_dest_ip: Destination IPv4 IP
14490 + * @l4_source_port: Source TCP/UDP Port
14491 + * @l4_dest_port: Destination TCP/UDP Port
14492 + */
14493 +struct dpsw_acl_fields {
14494 + u8 l2_dest_mac[6];
14495 + u8 l2_source_mac[6];
14496 + u16 l2_tpid;
14497 + u8 l2_pcp_dei;
14498 + u16 l2_vlan_id;
14499 + u16 l2_ether_type;
14500 + u8 l3_dscp;
14501 + u8 l3_protocol;
14502 + u32 l3_source_ip;
14503 + u32 l3_dest_ip;
14504 + u16 l4_source_port;
14505 + u16 l4_dest_port;
14506 +};
14507 +
14508 +/**
14509 + * struct dpsw_acl_key - ACL key
14510 + * @match: Match fields
14511 + * @mask: Mask: b'1 - valid, b'0 don't care
14512 + */
14513 +struct dpsw_acl_key {
14514 + struct dpsw_acl_fields match;
14515 + struct dpsw_acl_fields mask;
14516 +};
14517 +
14518 +/**
14519 + * enum dpsw_acl_action
14520 + * @DPSW_ACL_ACTION_DROP: Drop frame
14521 + * @DPSW_ACL_ACTION_REDIRECT: Redirect to certain port
14522 + * @DPSW_ACL_ACTION_ACCEPT: Accept frame
14523 + * @DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF: Redirect to control interface
14524 + */
14525 +enum dpsw_acl_action {
14526 + DPSW_ACL_ACTION_DROP,
14527 + DPSW_ACL_ACTION_REDIRECT,
14528 + DPSW_ACL_ACTION_ACCEPT,
14529 + DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF
14530 +};
14531 +
14532 +/**
14533 + * struct dpsw_acl_result - ACL action
14534 + * @action: Action should be taken when ACL entry hit
14535 + * @if_id: Interface IDs to redirect frame. Valid only if redirect selected for
14536 + * action
14537 + */
14538 +struct dpsw_acl_result {
14539 + enum dpsw_acl_action action;
14540 + u16 if_id;
14541 +};
14542 +
14543 +/**
14544 + * struct dpsw_acl_entry_cfg - ACL entry
14545 + * @key_iova: I/O virtual address of DMA-able memory filled with key after call
14546 + * to dpsw_acl_prepare_entry_cfg()
14547 + * @result: Required action when entry hit occurs
14548 + * @precedence: Precedence inside ACL 0 is lowest; This priority can not change
14549 + * during the lifetime of a Policy. It is user responsibility to
14550 + * space the priorities according to consequent rule additions.
14551 + */
14552 +struct dpsw_acl_entry_cfg {
14553 + u64 key_iova;
14554 + struct dpsw_acl_result result;
14555 + int precedence;
14556 +};
14557 +
14558 +int dpsw_acl_add(struct fsl_mc_io *mc_io,
14559 + u32 cmd_flags,
14560 + u16 token,
14561 + u16 *acl_id,
14562 + const struct dpsw_acl_cfg *cfg);
14563 +
14564 +int dpsw_acl_remove(struct fsl_mc_io *mc_io,
14565 + u32 cmd_flags,
14566 + u16 token,
14567 + u16 acl_id);
14568 +
14569 +void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key,
14570 + uint8_t *entry_cfg_buf);
14571 +
14572 +int dpsw_acl_add_entry(struct fsl_mc_io *mc_io,
14573 + u32 cmd_flags,
14574 + u16 token,
14575 + u16 acl_id,
14576 + const struct dpsw_acl_entry_cfg *cfg);
14577 +
14578 +int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io,
14579 + u32 cmd_flags,
14580 + u16 token,
14581 + u16 acl_id,
14582 + const struct dpsw_acl_entry_cfg *cfg);
14583 +
14584 +/**
14585 + * struct dpsw_acl_if_cfg - List of interfaces to Associate with ACL
14586 + * @num_ifs: Number of interfaces
14587 + * @if_id: List of interfaces
14588 + */
14589 +struct dpsw_acl_if_cfg {
14590 + u16 num_ifs;
14591 + u16 if_id[DPSW_MAX_IF];
14592 +};
14593 +
14594 +int dpsw_acl_add_if(struct fsl_mc_io *mc_io,
14595 + u32 cmd_flags,
14596 + u16 token,
14597 + u16 acl_id,
14598 + const struct dpsw_acl_if_cfg *cfg);
14599 +
14600 +int dpsw_acl_remove_if(struct fsl_mc_io *mc_io,
14601 + u32 cmd_flags,
14602 + u16 token,
14603 + u16 acl_id,
14604 + const struct dpsw_acl_if_cfg *cfg);
14605 +
14606 +/**
14607 + * struct dpsw_acl_attr - ACL Attributes
14608 + * @max_entries: Max number of ACL entries
14609 + * @num_entries: Number of used ACL entries
14610 + * @num_ifs: Number of interfaces associated with ACL
14611 + */
14612 +struct dpsw_acl_attr {
14613 + u16 max_entries;
14614 + u16 num_entries;
14615 + u16 num_ifs;
14616 +};
14617 +
14618 +int dpsw_acl_get_attributes(struct fsl_mc_io *mc_io,
14619 + u32 cmd_flags,
14620 + u16 token,
14621 + u16 acl_id,
14622 + struct dpsw_acl_attr *attr);
14623 +/**
14624 + * struct dpsw_ctrl_if_attr - Control interface attributes
14625 + * @rx_fqid: Receive FQID
14626 + * @rx_err_fqid: Receive error FQID
14627 + * @tx_err_conf_fqid: Transmit error and confirmation FQID
14628 + */
14629 +struct dpsw_ctrl_if_attr {
14630 + u32 rx_fqid;
14631 + u32 rx_err_fqid;
14632 + u32 tx_err_conf_fqid;
14633 +};
14634 +
14635 +int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io,
14636 + u32 cmd_flags,
14637 + u16 token,
14638 + struct dpsw_ctrl_if_attr *attr);
14639 +
14640 +/**
14641 + * Maximum number of DPBP
14642 + */
14643 +#define DPSW_MAX_DPBP 8
14644 +
14645 +/**
14646 + * struct dpsw_ctrl_if_pools_cfg - Control interface buffer pools configuration
14647 + * @num_dpbp: Number of DPBPs
14648 + * @pools: Array of buffer pools parameters; The number of valid entries
14649 + * must match 'num_dpbp' value
14650 + */
14651 +struct dpsw_ctrl_if_pools_cfg {
14652 + u8 num_dpbp;
14653 + /**
14654 + * struct pools - Buffer pools parameters
14655 + * @dpbp_id: DPBP object ID
14656 + * @buffer_size: Buffer size
14657 + * @backup_pool: Backup pool
14658 + */
14659 + struct {
14660 + int dpbp_id;
14661 + u16 buffer_size;
14662 + int backup_pool;
14663 + } pools[DPSW_MAX_DPBP];
14664 +};
14665 +
14666 +int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io,
14667 + u32 cmd_flags,
14668 + u16 token,
14669 + const struct dpsw_ctrl_if_pools_cfg *cfg);
14670 +
14671 +int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io,
14672 + u32 cmd_flags,
14673 + u16 token);
14674 +
14675 +int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io,
14676 + u32 cmd_flags,
14677 + u16 token);
14678 +
14679 +int dpsw_get_api_version(struct fsl_mc_io *mc_io,
14680 + u32 cmd_flags,
14681 + u16 *major_ver,
14682 + u16 *minor_ver);
14683 +
14684 +#endif /* __FSL_DPSW_H */
14685 diff --git a/drivers/staging/fsl-dpaa2/ethsw/switch.c b/drivers/staging/fsl-dpaa2/ethsw/switch.c
14686 new file mode 100644
14687 index 00000000..3f2c9648
14688 --- /dev/null
14689 +++ b/drivers/staging/fsl-dpaa2/ethsw/switch.c
14690 @@ -0,0 +1,1857 @@
14691 +/* Copyright 2014-2015 Freescale Semiconductor Inc.
14692 + *
14693 + * Redistribution and use in source and binary forms, with or without
14694 + * modification, are permitted provided that the following conditions are met:
14695 + * * Redistributions of source code must retain the above copyright
14696 + * notice, this list of conditions and the following disclaimer.
14697 + * * Redistributions in binary form must reproduce the above copyright
14698 + * notice, this list of conditions and the following disclaimer in the
14699 + * documentation and/or other materials provided with the distribution.
14700 + * * Neither the name of Freescale Semiconductor nor the
14701 + * names of its contributors may be used to endorse or promote products
14702 + * derived from this software without specific prior written permission.
14703 + *
14704 + *
14705 + * ALTERNATIVELY, this software may be distributed under the terms of the
14706 + * GNU General Public License ("GPL") as published by the Free Software
14707 + * Foundation, either version 2 of that License or (at your option) any
14708 + * later version.
14709 + *
14710 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
14711 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
14712 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
14713 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
14714 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
14715 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
14716 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
14717 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
14718 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
14719 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
14720 + */
14721 +
14722 +#include <linux/module.h>
14723 +#include <linux/msi.h>
14724 +
14725 +#include <linux/netdevice.h>
14726 +#include <linux/etherdevice.h>
14727 +#include <linux/rtnetlink.h>
14728 +#include <linux/if_vlan.h>
14729 +
14730 +#include <uapi/linux/if_bridge.h>
14731 +#include <net/netlink.h>
14732 +
14733 +#include "../../fsl-mc/include/mc.h"
14734 +#include "dpsw.h"
14735 +#include "dpsw-cmd.h"
14736 +
14737 +static const char ethsw_drv_version[] = "0.1";
14738 +
14739 +/* Minimal supported DPSE version */
14740 +#define DPSW_MIN_VER_MAJOR 8
14741 +#define DPSW_MIN_VER_MINOR 0
14742 +
14743 +/* IRQ index */
14744 +#define DPSW_MAX_IRQ_NUM 2
14745 +
14746 +#define ETHSW_VLAN_MEMBER 1
14747 +#define ETHSW_VLAN_UNTAGGED 2
14748 +#define ETHSW_VLAN_PVID 4
14749 +#define ETHSW_VLAN_GLOBAL 8
14750 +
14751 +/* Maximum Frame Length supported by HW (currently 10k) */
14752 +#define DPAA2_MFL (10 * 1024)
14753 +#define ETHSW_MAX_FRAME_LENGTH (DPAA2_MFL - VLAN_ETH_HLEN - ETH_FCS_LEN)
14754 +#define ETHSW_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN + ETH_FCS_LEN)
14755 +
14756 +struct ethsw_port_priv {
14757 + struct net_device *netdev;
14758 + struct list_head list;
14759 + u16 port_index;
14760 + struct ethsw_dev_priv *ethsw_priv;
14761 + u8 stp_state;
14762 +
14763 + char vlans[VLAN_VID_MASK + 1];
14764 +
14765 +};
14766 +
14767 +struct ethsw_dev_priv {
14768 + struct net_device *netdev;
14769 + struct fsl_mc_io *mc_io;
14770 + u16 dpsw_handle;
14771 + struct dpsw_attr sw_attr;
14772 + int dev_id;
14773 + /*TODO: redundant, we can use the slave dev list */
14774 + struct list_head port_list;
14775 +
14776 + bool flood;
14777 + bool learning;
14778 +
14779 + char vlans[VLAN_VID_MASK + 1];
14780 +};
14781 +
14782 +static int ethsw_port_stop(struct net_device *netdev);
14783 +static int ethsw_port_open(struct net_device *netdev);
14784 +
14785 +static inline void __get_priv(struct net_device *netdev,
14786 + struct ethsw_dev_priv **priv,
14787 + struct ethsw_port_priv **port_priv)
14788 +{
14789 + struct ethsw_dev_priv *_priv = NULL;
14790 + struct ethsw_port_priv *_port_priv = NULL;
14791 +
14792 + if (netdev->flags & IFF_MASTER) {
14793 + _priv = netdev_priv(netdev);
14794 + } else {
14795 + _port_priv = netdev_priv(netdev);
14796 + _priv = _port_priv->ethsw_priv;
14797 + }
14798 +
14799 + if (priv)
14800 + *priv = _priv;
14801 + if (port_priv)
14802 + *port_priv = _port_priv;
14803 +}
14804 +
14805 +/* -------------------------------------------------------------------------- */
14806 +/* ethsw netdevice ops */
14807 +
14808 +static netdev_tx_t ethsw_dropframe(struct sk_buff *skb, struct net_device *dev)
14809 +{
14810 + /* we don't support I/O for now, drop the frame */
14811 + dev_kfree_skb_any(skb);
14812 + return NETDEV_TX_OK;
14813 +}
14814 +
14815 +static int ethsw_open(struct net_device *netdev)
14816 +{
14817 + struct ethsw_dev_priv *priv = netdev_priv(netdev);
14818 + struct list_head *pos;
14819 + struct ethsw_port_priv *port_priv = NULL;
14820 + int err;
14821 +
14822 + err = dpsw_enable(priv->mc_io, 0, priv->dpsw_handle);
14823 + if (err) {
14824 + netdev_err(netdev, "dpsw_enable err %d\n", err);
14825 + return err;
14826 + }
14827 +
14828 + list_for_each(pos, &priv->port_list) {
14829 + port_priv = list_entry(pos, struct ethsw_port_priv, list);
14830 + err = dev_open(port_priv->netdev);
14831 + if (err)
14832 + netdev_err(port_priv->netdev, "dev_open err %d\n", err);
14833 + }
14834 +
14835 + return 0;
14836 +}
14837 +
14838 +static int ethsw_stop(struct net_device *netdev)
14839 +{
14840 + struct ethsw_dev_priv *priv = netdev_priv(netdev);
14841 + struct list_head *pos;
14842 + struct ethsw_port_priv *port_priv = NULL;
14843 + int err;
14844 +
14845 + err = dpsw_disable(priv->mc_io, 0, priv->dpsw_handle);
14846 + if (err) {
14847 + netdev_err(netdev, "dpsw_disable err %d\n", err);
14848 + return err;
14849 + }
14850 +
14851 + list_for_each(pos, &priv->port_list) {
14852 + port_priv = list_entry(pos, struct ethsw_port_priv, list);
14853 + err = dev_close(port_priv->netdev);
14854 + if (err)
14855 + netdev_err(port_priv->netdev,
14856 + "dev_close err %d\n", err);
14857 + }
14858 +
14859 + return 0;
14860 +}
14861 +
14862 +static int ethsw_add_vlan(struct net_device *netdev, u16 vid)
14863 +{
14864 + struct ethsw_dev_priv *priv = netdev_priv(netdev);
14865 + int err;
14866 +
14867 + struct dpsw_vlan_cfg vcfg = {
14868 + /* TODO: add support for VLAN private FDBs */
14869 + .fdb_id = 0,
14870 + };
14871 + if (priv->vlans[vid]) {
14872 + netdev_err(netdev, "VLAN already configured\n");
14873 + return -EEXIST;
14874 + }
14875 +
14876 + err = dpsw_vlan_add(priv->mc_io, 0, priv->dpsw_handle, vid, &vcfg);
14877 + if (err) {
14878 + netdev_err(netdev, "dpsw_vlan_add err %d\n", err);
14879 + return err;
14880 + }
14881 + priv->vlans[vid] = ETHSW_VLAN_MEMBER;
14882 +
14883 + return 0;
14884 +}
14885 +
14886 +static int ethsw_port_add_vlan(struct net_device *netdev, u16 vid, u16 flags)
14887 +{
14888 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
14889 + struct ethsw_dev_priv *priv = port_priv->ethsw_priv;
14890 + int err;
14891 +
14892 + struct dpsw_vlan_if_cfg vcfg = {
14893 + .num_ifs = 1,
14894 + .if_id[0] = port_priv->port_index,
14895 + };
14896 +
14897 + if (port_priv->vlans[vid]) {
14898 + netdev_err(netdev, "VLAN already configured\n");
14899 + return -EEXIST;
14900 + }
14901 +
14902 + if (flags & BRIDGE_VLAN_INFO_PVID && netif_oper_up(netdev)) {
14903 + netdev_err(netdev, "interface must be down to change PVID!\n");
14904 + return -EBUSY;
14905 + }
14906 +
14907 + err = dpsw_vlan_add_if(priv->mc_io, 0, priv->dpsw_handle, vid, &vcfg);
14908 + if (err) {
14909 + netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err);
14910 + return err;
14911 + }
14912 + port_priv->vlans[vid] = ETHSW_VLAN_MEMBER;
14913 +
14914 + if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
14915 + err = dpsw_vlan_add_if_untagged(priv->mc_io, 0,
14916 + priv->dpsw_handle, vid, &vcfg);
14917 + if (err) {
14918 + netdev_err(netdev, "dpsw_vlan_add_if_untagged err %d\n",
14919 + err);
14920 + return err;
14921 + }
14922 + port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED;
14923 + }
14924 +
14925 + if (flags & BRIDGE_VLAN_INFO_PVID) {
14926 + struct dpsw_tci_cfg tci_cfg = {
14927 + /* TODO: at least add better defaults if these cannot
14928 + * be configured
14929 + */
14930 + .pcp = 0,
14931 + .dei = 0,
14932 + .vlan_id = vid,
14933 + };
14934 +
14935 + err = dpsw_if_set_tci(priv->mc_io, 0, priv->dpsw_handle,
14936 + port_priv->port_index, &tci_cfg);
14937 + if (err) {
14938 + netdev_err(netdev, "dpsw_if_set_tci err %d\n", err);
14939 + return err;
14940 + }
14941 + port_priv->vlans[vid] |= ETHSW_VLAN_PVID;
14942 + }
14943 +
14944 + return 0;
14945 +}
14946 +
14947 +static const struct nla_policy ifla_br_policy[IFLA_MAX + 1] = {
14948 + [IFLA_BRIDGE_FLAGS] = { .type = NLA_U16 },
14949 + [IFLA_BRIDGE_MODE] = { .type = NLA_U16 },
14950 + [IFLA_BRIDGE_VLAN_INFO] = { .type = NLA_BINARY,
14951 + .len = sizeof(struct bridge_vlan_info), },
14952 +};
14953 +
14954 +static int ethsw_setlink_af_spec(struct net_device *netdev,
14955 + struct nlattr **tb)
14956 +{
14957 + struct bridge_vlan_info *vinfo;
14958 + struct ethsw_dev_priv *priv = NULL;
14959 + struct ethsw_port_priv *port_priv = NULL;
14960 + int err = 0;
14961 +
14962 + if (!tb[IFLA_BRIDGE_VLAN_INFO]) {
14963 + netdev_err(netdev, "no VLAN INFO in nlmsg\n");
14964 + return -EOPNOTSUPP;
14965 + }
14966 +
14967 + vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
14968 +
14969 + if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK)
14970 + return -EINVAL;
14971 +
14972 + __get_priv(netdev, &priv, &port_priv);
14973 +
14974 + if (!port_priv || !priv->vlans[vinfo->vid]) {
14975 + /* command targets switch device or this is a new VLAN */
14976 + err = ethsw_add_vlan(priv->netdev, vinfo->vid);
14977 + if (err)
14978 + return err;
14979 +
14980 + /* command targets switch device; mark it*/
14981 + if (!port_priv)
14982 + priv->vlans[vinfo->vid] |= ETHSW_VLAN_GLOBAL;
14983 + }
14984 +
14985 + if (port_priv) {
14986 + /* command targets switch port */
14987 + err = ethsw_port_add_vlan(netdev, vinfo->vid, vinfo->flags);
14988 + if (err)
14989 + return err;
14990 + }
14991 +
14992 + return 0;
14993 +}
14994 +
14995 +static const struct nla_policy ifla_brport_policy[IFLA_BRPORT_MAX + 1] = {
14996 + [IFLA_BRPORT_STATE] = { .type = NLA_U8 },
14997 + [IFLA_BRPORT_COST] = { .type = NLA_U32 },
14998 + [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 },
14999 + [IFLA_BRPORT_MODE] = { .type = NLA_U8 },
15000 + [IFLA_BRPORT_GUARD] = { .type = NLA_U8 },
15001 + [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 },
15002 + [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 },
15003 + [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
15004 +};
15005 +
15006 +static int ethsw_set_learning(struct net_device *netdev, u8 flag)
15007 +{
15008 + struct ethsw_dev_priv *priv = netdev_priv(netdev);
15009 + enum dpsw_fdb_learning_mode learn_mode;
15010 + int err;
15011 +
15012 + if (flag)
15013 + learn_mode = DPSW_FDB_LEARNING_MODE_HW;
15014 + else
15015 + learn_mode = DPSW_FDB_LEARNING_MODE_DIS;
15016 +
15017 + err = dpsw_fdb_set_learning_mode(priv->mc_io, 0, priv->dpsw_handle,
15018 + 0, learn_mode);
15019 + if (err) {
15020 + netdev_err(netdev, "dpsw_fdb_set_learning_mode err %d\n", err);
15021 + return err;
15022 + }
15023 + priv->learning = !!flag;
15024 +
15025 + return 0;
15026 +}
15027 +
15028 +static int ethsw_port_set_flood(struct net_device *netdev, u8 flag)
15029 +{
15030 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15031 + struct ethsw_dev_priv *priv = port_priv->ethsw_priv;
15032 + int err;
15033 +
15034 + err = dpsw_if_set_flooding(priv->mc_io, 0, priv->dpsw_handle,
15035 + port_priv->port_index, (int)flag);
15036 + if (err) {
15037 + netdev_err(netdev, "dpsw_fdb_set_learning_mode err %d\n", err);
15038 + return err;
15039 + }
15040 + priv->flood = !!flag;
15041 +
15042 + return 0;
15043 +}
15044 +
15045 +static int ethsw_port_set_state(struct net_device *netdev, u8 state)
15046 +{
15047 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15048 + struct ethsw_dev_priv *priv = port_priv->ethsw_priv;
15049 + u8 old_state = port_priv->stp_state;
15050 + int err;
15051 +
15052 + struct dpsw_stp_cfg stp_cfg = {
15053 + .vlan_id = 1,
15054 + .state = state,
15055 + };
15056 + /* TODO: check port state, interface may be down */
15057 +
15058 + if (state > BR_STATE_BLOCKING)
15059 + return -EINVAL;
15060 +
15061 + if (state == port_priv->stp_state)
15062 + return 0;
15063 +
15064 + if (state == BR_STATE_DISABLED) {
15065 + port_priv->stp_state = state;
15066 +
15067 + err = ethsw_port_stop(netdev);
15068 + if (err)
15069 + goto error;
15070 + } else {
15071 + err = dpsw_if_set_stp(priv->mc_io, 0, priv->dpsw_handle,
15072 + port_priv->port_index, &stp_cfg);
15073 + if (err) {
15074 + netdev_err(netdev, "dpsw_if_set_stp err %d\n", err);
15075 + return err;
15076 + }
15077 +
15078 + port_priv->stp_state = state;
15079 +
15080 + if (old_state == BR_STATE_DISABLED) {
15081 + err = ethsw_port_open(netdev);
15082 + if (err)
15083 + goto error;
15084 + }
15085 + }
15086 +
15087 + return 0;
15088 +error:
15089 + port_priv->stp_state = old_state;
15090 + return err;
15091 +}
15092 +
15093 +static int ethsw_setlink_protinfo(struct net_device *netdev,
15094 + struct nlattr **tb)
15095 +{
15096 + struct ethsw_dev_priv *priv;
15097 + struct ethsw_port_priv *port_priv = NULL;
15098 + int err = 0;
15099 +
15100 + __get_priv(netdev, &priv, &port_priv);
15101 +
15102 + if (tb[IFLA_BRPORT_LEARNING]) {
15103 + u8 flag = nla_get_u8(tb[IFLA_BRPORT_LEARNING]);
15104 +
15105 + if (port_priv)
15106 + netdev_warn(netdev,
15107 + "learning set on whole switch dev\n");
15108 +
15109 + err = ethsw_set_learning(priv->netdev, flag);
15110 + if (err)
15111 + return err;
15112 +
15113 + } else if (tb[IFLA_BRPORT_UNICAST_FLOOD] && port_priv) {
15114 + u8 flag = nla_get_u8(tb[IFLA_BRPORT_UNICAST_FLOOD]);
15115 +
15116 + err = ethsw_port_set_flood(port_priv->netdev, flag);
15117 + if (err)
15118 + return err;
15119 +
15120 + } else if (tb[IFLA_BRPORT_STATE] && port_priv) {
15121 + u8 state = nla_get_u8(tb[IFLA_BRPORT_STATE]);
15122 +
15123 + err = ethsw_port_set_state(port_priv->netdev, state);
15124 + if (err)
15125 + return err;
15126 +
15127 + } else {
15128 + return -EOPNOTSUPP;
15129 + }
15130 +
15131 + return 0;
15132 +}
15133 +
15134 +static int ethsw_setlink(struct net_device *netdev,
15135 + struct nlmsghdr *nlh,
15136 + u16 flags)
15137 +{
15138 + struct nlattr *attr;
15139 + struct nlattr *tb[(IFLA_BRIDGE_MAX > IFLA_BRPORT_MAX) ?
15140 + IFLA_BRIDGE_MAX : IFLA_BRPORT_MAX + 1];
15141 + int err = 0;
15142 +
15143 + attr = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
15144 + if (attr) {
15145 + err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, attr,
15146 + ifla_br_policy);
15147 + if (err) {
15148 + netdev_err(netdev,
15149 + "nla_parse_nested for br_policy err %d\n",
15150 + err);
15151 + return err;
15152 + }
15153 +
15154 + err = ethsw_setlink_af_spec(netdev, tb);
15155 + return err;
15156 + }
15157 +
15158 + attr = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO);
15159 + if (attr) {
15160 + err = nla_parse_nested(tb, IFLA_BRPORT_MAX, attr,
15161 + ifla_brport_policy);
15162 + if (err) {
15163 + netdev_err(netdev,
15164 + "nla_parse_nested for brport_policy err %d\n",
15165 + err);
15166 + return err;
15167 + }
15168 +
15169 + err = ethsw_setlink_protinfo(netdev, tb);
15170 + return err;
15171 + }
15172 +
15173 + netdev_err(netdev, "nlmsg_find_attr found no AF_SPEC/PROTINFO\n");
15174 + return -EOPNOTSUPP;
15175 +}
15176 +
15177 +static int __nla_put_netdev(struct sk_buff *skb, struct net_device *netdev,
15178 + struct ethsw_dev_priv *priv)
15179 +{
15180 + u8 operstate = netif_running(netdev) ? netdev->operstate : IF_OPER_DOWN;
15181 + int iflink;
15182 + int err;
15183 +
15184 + err = nla_put_string(skb, IFLA_IFNAME, netdev->name);
15185 + if (err)
15186 + goto nla_put_err;
15187 + err = nla_put_u32(skb, IFLA_MASTER, priv->netdev->ifindex);
15188 + if (err)
15189 + goto nla_put_err;
15190 + err = nla_put_u32(skb, IFLA_MTU, netdev->mtu);
15191 + if (err)
15192 + goto nla_put_err;
15193 + err = nla_put_u8(skb, IFLA_OPERSTATE, operstate);
15194 + if (err)
15195 + goto nla_put_err;
15196 + if (netdev->addr_len) {
15197 + err = nla_put(skb, IFLA_ADDRESS, netdev->addr_len,
15198 + netdev->dev_addr);
15199 + if (err)
15200 + goto nla_put_err;
15201 + }
15202 +
15203 + iflink = dev_get_iflink(netdev);
15204 + if (netdev->ifindex != iflink) {
15205 + err = nla_put_u32(skb, IFLA_LINK, iflink);
15206 + if (err)
15207 + goto nla_put_err;
15208 + }
15209 +
15210 + return 0;
15211 +
15212 +nla_put_err:
15213 + netdev_err(netdev, "nla_put_ err %d\n", err);
15214 + return err;
15215 +}
15216 +
15217 +static int __nla_put_port(struct sk_buff *skb, struct net_device *netdev,
15218 + struct ethsw_port_priv *port_priv)
15219 +{
15220 + struct nlattr *nest;
15221 + int err;
15222 +
15223 + u8 stp_state = port_priv->stp_state;
15224 +
15225 + if (port_priv->stp_state == DPSW_STP_STATE_BLOCKING)
15226 + stp_state = BR_STATE_BLOCKING;
15227 +
15228 + nest = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
15229 + if (!nest) {
15230 + netdev_err(netdev, "nla_nest_start failed\n");
15231 + return -ENOMEM;
15232 + }
15233 +
15234 + err = nla_put_u8(skb, IFLA_BRPORT_STATE, stp_state);
15235 + if (err)
15236 + goto nla_put_err;
15237 + err = nla_put_u16(skb, IFLA_BRPORT_PRIORITY, 0);
15238 + if (err)
15239 + goto nla_put_err;
15240 + err = nla_put_u32(skb, IFLA_BRPORT_COST, 0);
15241 + if (err)
15242 + goto nla_put_err;
15243 + err = nla_put_u8(skb, IFLA_BRPORT_MODE, 0);
15244 + if (err)
15245 + goto nla_put_err;
15246 + err = nla_put_u8(skb, IFLA_BRPORT_GUARD, 0);
15247 + if (err)
15248 + goto nla_put_err;
15249 + err = nla_put_u8(skb, IFLA_BRPORT_PROTECT, 0);
15250 + if (err)
15251 + goto nla_put_err;
15252 + err = nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, 0);
15253 + if (err)
15254 + goto nla_put_err;
15255 + err = nla_put_u8(skb, IFLA_BRPORT_LEARNING,
15256 + port_priv->ethsw_priv->learning);
15257 + if (err)
15258 + goto nla_put_err;
15259 + err = nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD,
15260 + port_priv->ethsw_priv->flood);
15261 + if (err)
15262 + goto nla_put_err;
15263 + nla_nest_end(skb, nest);
15264 +
15265 + return 0;
15266 +
15267 +nla_put_err:
15268 + netdev_err(netdev, "nla_put_ err %d\n", err);
15269 + nla_nest_cancel(skb, nest);
15270 + return err;
15271 +}
15272 +
15273 +static int __nla_put_vlan(struct sk_buff *skb, struct net_device *netdev,
15274 + struct ethsw_dev_priv *priv,
15275 + struct ethsw_port_priv *port_priv)
15276 +{
15277 + struct nlattr *nest;
15278 + struct bridge_vlan_info vinfo;
15279 + const char *vlans;
15280 + u16 i;
15281 + int err;
15282 +
15283 + nest = nla_nest_start(skb, IFLA_AF_SPEC);
15284 + if (!nest) {
15285 + netdev_err(netdev, "nla_nest_start failed");
15286 + return -ENOMEM;
15287 + }
15288 +
15289 + if (port_priv)
15290 + vlans = port_priv->vlans;
15291 + else
15292 + vlans = priv->vlans;
15293 +
15294 + for (i = 0; i < VLAN_VID_MASK + 1; i++) {
15295 + vinfo.flags = 0;
15296 + vinfo.vid = i;
15297 +
15298 + if (vlans[i] & ETHSW_VLAN_UNTAGGED)
15299 + vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
15300 +
15301 + if (vlans[i] & ETHSW_VLAN_PVID)
15302 + vinfo.flags |= BRIDGE_VLAN_INFO_PVID;
15303 +
15304 + if (vlans[i] & ETHSW_VLAN_MEMBER) {
15305 + err = nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
15306 + sizeof(vinfo), &vinfo);
15307 + if (err)
15308 + goto nla_put_err;
15309 + }
15310 + }
15311 +
15312 + nla_nest_end(skb, nest);
15313 +
15314 + return 0;
15315 +nla_put_err:
15316 + netdev_err(netdev, "nla_put_ err %d\n", err);
15317 + nla_nest_cancel(skb, nest);
15318 + return err;
15319 +}
15320 +
15321 +static int ethsw_getlink(struct sk_buff *skb, u32 pid, u32 seq,
15322 + struct net_device *netdev, u32 filter_mask,
15323 + int nlflags)
15324 +{
15325 + struct ethsw_dev_priv *priv;
15326 + struct ethsw_port_priv *port_priv = NULL;
15327 + struct ifinfomsg *hdr;
15328 + struct nlmsghdr *nlh;
15329 + int err;
15330 +
15331 + __get_priv(netdev, &priv, &port_priv);
15332 +
15333 + nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*hdr), NLM_F_MULTI);
15334 + if (!nlh)
15335 + return -EMSGSIZE;
15336 +
15337 + hdr = nlmsg_data(nlh);
15338 + memset(hdr, 0, sizeof(*hdr));
15339 + hdr->ifi_family = AF_BRIDGE;
15340 + hdr->ifi_type = netdev->type;
15341 + hdr->ifi_index = netdev->ifindex;
15342 + hdr->ifi_flags = dev_get_flags(netdev);
15343 +
15344 + err = __nla_put_netdev(skb, netdev, priv);
15345 + if (err)
15346 + goto nla_put_err;
15347 +
15348 + if (port_priv) {
15349 + err = __nla_put_port(skb, netdev, port_priv);
15350 + if (err)
15351 + goto nla_put_err;
15352 + }
15353 +
15354 + /* Check if the VID information is requested */
15355 + if (filter_mask & RTEXT_FILTER_BRVLAN) {
15356 + err = __nla_put_vlan(skb, netdev, priv, port_priv);
15357 + if (err)
15358 + goto nla_put_err;
15359 + }
15360 +
15361 + nlmsg_end(skb, nlh);
15362 + return skb->len;
15363 +
15364 +nla_put_err:
15365 + nlmsg_cancel(skb, nlh);
15366 + return -EMSGSIZE;
15367 +}
15368 +
15369 +static int ethsw_dellink_switch(struct ethsw_dev_priv *priv, u16 vid)
15370 +{
15371 + struct list_head *pos;
15372 + struct ethsw_port_priv *ppriv_local = NULL;
15373 + int err = 0;
15374 +
15375 + if (!priv->vlans[vid])
15376 + return -ENOENT;
15377 +
15378 + err = dpsw_vlan_remove(priv->mc_io, 0, priv->dpsw_handle, vid);
15379 + if (err) {
15380 + netdev_err(priv->netdev, "dpsw_vlan_remove err %d\n", err);
15381 + return err;
15382 + }
15383 + priv->vlans[vid] = 0;
15384 +
15385 + list_for_each(pos, &priv->port_list) {
15386 + ppriv_local = list_entry(pos, struct ethsw_port_priv,
15387 + list);
15388 + ppriv_local->vlans[vid] = 0;
15389 + }
15390 +
15391 + return 0;
15392 +}
15393 +
15394 +static int ethsw_dellink_port(struct ethsw_dev_priv *priv,
15395 + struct ethsw_port_priv *port_priv,
15396 + u16 vid)
15397 +{
15398 + struct list_head *pos;
15399 + struct ethsw_port_priv *ppriv_local = NULL;
15400 + struct dpsw_vlan_if_cfg vcfg = {
15401 + .num_ifs = 1,
15402 + .if_id[0] = port_priv->port_index,
15403 + };
15404 + unsigned int count = 0;
15405 + int err = 0;
15406 +
15407 + if (!port_priv->vlans[vid])
15408 + return -ENOENT;
15409 +
15410 + /* VLAN will be deleted from switch if global flag is not set
15411 + * and is configured on only one port
15412 + */
15413 + if (!(priv->vlans[vid] & ETHSW_VLAN_GLOBAL)) {
15414 + list_for_each(pos, &priv->port_list) {
15415 + ppriv_local = list_entry(pos, struct ethsw_port_priv,
15416 + list);
15417 + if (ppriv_local->vlans[vid] & ETHSW_VLAN_MEMBER)
15418 + count++;
15419 + }
15420 +
15421 + if (count == 1)
15422 + return ethsw_dellink_switch(priv, vid);
15423 + }
15424 +
15425 + err = dpsw_vlan_remove_if(priv->mc_io, 0, priv->dpsw_handle,
15426 + vid, &vcfg);
15427 + if (err) {
15428 + netdev_err(priv->netdev, "dpsw_vlan_remove_if err %d\n", err);
15429 + return err;
15430 + }
15431 + port_priv->vlans[vid] = 0;
15432 + return 0;
15433 +}
15434 +
15435 +static int ethsw_dellink(struct net_device *netdev,
15436 + struct nlmsghdr *nlh,
15437 + u16 flags)
15438 +{
15439 + struct nlattr *tb[IFLA_BRIDGE_MAX + 1];
15440 + struct nlattr *spec;
15441 + struct bridge_vlan_info *vinfo;
15442 + struct ethsw_dev_priv *priv;
15443 + struct ethsw_port_priv *port_priv = NULL;
15444 + int err = 0;
15445 +
15446 + spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
15447 + if (!spec)
15448 + return 0;
15449 +
15450 + err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, spec, ifla_br_policy);
15451 + if (err)
15452 + return err;
15453 +
15454 + if (!tb[IFLA_BRIDGE_VLAN_INFO])
15455 + return -EOPNOTSUPP;
15456 +
15457 + vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
15458 +
15459 + if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK)
15460 + return -EINVAL;
15461 +
15462 + __get_priv(netdev, &priv, &port_priv);
15463 +
15464 + /* decide if command targets switch device or port */
15465 + if (!port_priv)
15466 + err = ethsw_dellink_switch(priv, vinfo->vid);
15467 + else
15468 + err = ethsw_dellink_port(priv, port_priv, vinfo->vid);
15469 +
15470 + return err;
15471 +}
15472 +
15473 +static const struct net_device_ops ethsw_ops = {
15474 + .ndo_open = &ethsw_open,
15475 + .ndo_stop = &ethsw_stop,
15476 +
15477 + .ndo_bridge_setlink = &ethsw_setlink,
15478 + .ndo_bridge_getlink = &ethsw_getlink,
15479 + .ndo_bridge_dellink = &ethsw_dellink,
15480 +
15481 + .ndo_start_xmit = &ethsw_dropframe,
15482 +};
15483 +
15484 +/*--------------------------------------------------------------------------- */
15485 +/* switch port netdevice ops */
15486 +
15487 +static int _ethsw_port_carrier_state_sync(struct net_device *netdev)
15488 +{
15489 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15490 + struct dpsw_link_state state;
15491 + int err;
15492 +
15493 + err = dpsw_if_get_link_state(port_priv->ethsw_priv->mc_io, 0,
15494 + port_priv->ethsw_priv->dpsw_handle,
15495 + port_priv->port_index, &state);
15496 + if (unlikely(err)) {
15497 + netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
15498 + return err;
15499 + }
15500 +
15501 + WARN_ONCE(state.up > 1, "Garbage read into link_state");
15502 +
15503 + if (state.up)
15504 + netif_carrier_on(port_priv->netdev);
15505 + else
15506 + netif_carrier_off(port_priv->netdev);
15507 +
15508 + return 0;
15509 +}
15510 +
15511 +static int ethsw_port_open(struct net_device *netdev)
15512 +{
15513 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15514 + int err;
15515 +
15516 + err = dpsw_if_enable(port_priv->ethsw_priv->mc_io, 0,
15517 + port_priv->ethsw_priv->dpsw_handle,
15518 + port_priv->port_index);
15519 + if (err) {
15520 + netdev_err(netdev, "dpsw_if_enable err %d\n", err);
15521 + return err;
15522 + }
15523 +
15524 + /* sync carrier state */
15525 + err = _ethsw_port_carrier_state_sync(netdev);
15526 + if (err) {
15527 + netdev_err(netdev, "_ethsw_port_carrier_state_sync err %d\n",
15528 + err);
15529 + goto err_carrier_sync;
15530 + }
15531 +
15532 + return 0;
15533 +
15534 +err_carrier_sync:
15535 + dpsw_if_disable(port_priv->ethsw_priv->mc_io, 0,
15536 + port_priv->ethsw_priv->dpsw_handle,
15537 + port_priv->port_index);
15538 + return err;
15539 +}
15540 +
15541 +static int ethsw_port_stop(struct net_device *netdev)
15542 +{
15543 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15544 + int err;
15545 +
15546 + err = dpsw_if_disable(port_priv->ethsw_priv->mc_io, 0,
15547 + port_priv->ethsw_priv->dpsw_handle,
15548 + port_priv->port_index);
15549 + if (err) {
15550 + netdev_err(netdev, "dpsw_if_disable err %d\n", err);
15551 + return err;
15552 + }
15553 +
15554 + return 0;
15555 +}
15556 +
15557 +static int ethsw_port_fdb_add_uc(struct net_device *netdev,
15558 + const unsigned char *addr)
15559 +{
15560 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15561 + struct dpsw_fdb_unicast_cfg entry = {0};
15562 + int err;
15563 +
15564 + entry.if_egress = port_priv->port_index;
15565 + entry.type = DPSW_FDB_ENTRY_STATIC;
15566 + ether_addr_copy(entry.mac_addr, addr);
15567 +
15568 + err = dpsw_fdb_add_unicast(port_priv->ethsw_priv->mc_io, 0,
15569 + port_priv->ethsw_priv->dpsw_handle,
15570 + 0, &entry);
15571 + if (err)
15572 + netdev_err(netdev, "dpsw_fdb_add_unicast err %d\n", err);
15573 + return err;
15574 +}
15575 +
15576 +static int ethsw_port_fdb_del_uc(struct net_device *netdev,
15577 + const unsigned char *addr)
15578 +{
15579 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15580 + struct dpsw_fdb_unicast_cfg entry = {0};
15581 + int err;
15582 +
15583 + entry.if_egress = port_priv->port_index;
15584 + entry.type = DPSW_FDB_ENTRY_STATIC;
15585 + ether_addr_copy(entry.mac_addr, addr);
15586 +
15587 + err = dpsw_fdb_remove_unicast(port_priv->ethsw_priv->mc_io, 0,
15588 + port_priv->ethsw_priv->dpsw_handle,
15589 + 0, &entry);
15590 + if (err)
15591 + netdev_err(netdev, "dpsw_fdb_remove_unicast err %d\n", err);
15592 + return err;
15593 +}
15594 +
15595 +static int ethsw_port_fdb_add_mc(struct net_device *netdev,
15596 + const unsigned char *addr)
15597 +{
15598 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15599 + struct dpsw_fdb_multicast_cfg entry = {0};
15600 + int err;
15601 +
15602 + ether_addr_copy(entry.mac_addr, addr);
15603 + entry.type = DPSW_FDB_ENTRY_STATIC;
15604 + entry.num_ifs = 1;
15605 + entry.if_id[0] = port_priv->port_index;
15606 +
15607 + err = dpsw_fdb_add_multicast(port_priv->ethsw_priv->mc_io, 0,
15608 + port_priv->ethsw_priv->dpsw_handle,
15609 + 0, &entry);
15610 + if (err)
15611 + netdev_err(netdev, "dpsw_fdb_add_multicast err %d\n", err);
15612 + return err;
15613 +}
15614 +
15615 +static int ethsw_port_fdb_del_mc(struct net_device *netdev,
15616 + const unsigned char *addr)
15617 +{
15618 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15619 + struct dpsw_fdb_multicast_cfg entry = {0};
15620 + int err;
15621 +
15622 + ether_addr_copy(entry.mac_addr, addr);
15623 + entry.type = DPSW_FDB_ENTRY_STATIC;
15624 + entry.num_ifs = 1;
15625 + entry.if_id[0] = port_priv->port_index;
15626 +
15627 + err = dpsw_fdb_remove_multicast(port_priv->ethsw_priv->mc_io, 0,
15628 + port_priv->ethsw_priv->dpsw_handle,
15629 + 0, &entry);
15630 + if (err)
15631 + netdev_err(netdev, "dpsw_fdb_remove_multicast err %d\n", err);
15632 + return err;
15633 +}
15634 +
15635 +static int _lookup_address(struct net_device *netdev, int is_uc,
15636 + const unsigned char *addr)
15637 +{
15638 + struct netdev_hw_addr *ha;
15639 + struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc;
15640 +
15641 + netif_addr_lock_bh(netdev);
15642 + list_for_each_entry(ha, &list->list, list) {
15643 + if (ether_addr_equal(ha->addr, addr)) {
15644 + netif_addr_unlock_bh(netdev);
15645 + return 1;
15646 + }
15647 + }
15648 + netif_addr_unlock_bh(netdev);
15649 + return 0;
15650 +}
15651 +
15652 +static int ethsw_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
15653 + struct net_device *netdev,
15654 + const unsigned char *addr, u16 vid,
15655 + u16 flags)
15656 +{
15657 + struct list_head *pos;
15658 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15659 + struct ethsw_dev_priv *priv = port_priv->ethsw_priv;
15660 + int err;
15661 +
15662 + /* TODO: add replace support when added to iproute bridge */
15663 + if (!(flags & NLM_F_REQUEST)) {
15664 + netdev_err(netdev,
15665 + "ethsw_port_fdb_add unexpected flags value %08x\n",
15666 + flags);
15667 + return -EINVAL;
15668 + }
15669 +
15670 + if (is_unicast_ether_addr(addr)) {
15671 + /* if entry cannot be replaced, return error if exists */
15672 + if (flags & NLM_F_EXCL || flags & NLM_F_APPEND) {
15673 + list_for_each(pos, &priv->port_list) {
15674 + port_priv = list_entry(pos,
15675 + struct ethsw_port_priv,
15676 + list);
15677 + if (_lookup_address(port_priv->netdev,
15678 + 1, addr))
15679 + return -EEXIST;
15680 + }
15681 + }
15682 +
15683 + err = ethsw_port_fdb_add_uc(netdev, addr);
15684 + if (err) {
15685 + netdev_err(netdev, "ethsw_port_fdb_add_uc err %d\n",
15686 + err);
15687 + return err;
15688 + }
15689 +
15690 + /* we might have replaced an existing entry for a different
15691 + * switch port, make sure the address doesn't linger in any
15692 + * port address list
15693 + */
15694 + list_for_each(pos, &priv->port_list) {
15695 + port_priv = list_entry(pos, struct ethsw_port_priv,
15696 + list);
15697 + dev_uc_del(port_priv->netdev, addr);
15698 + }
15699 +
15700 + err = dev_uc_add(netdev, addr);
15701 + if (err) {
15702 + netdev_err(netdev, "dev_uc_add err %d\n", err);
15703 + return err;
15704 + }
15705 + } else {
15706 + struct dpsw_fdb_multicast_cfg entry = {
15707 + .type = DPSW_FDB_ENTRY_STATIC,
15708 + .num_ifs = 0,
15709 + };
15710 +
15711 + /* check if address is already set on this port */
15712 + if (_lookup_address(netdev, 0, addr))
15713 + return -EEXIST;
15714 +
15715 + /* check if the address exists on other port */
15716 + ether_addr_copy(entry.mac_addr, addr);
15717 + err = dpsw_fdb_get_multicast(priv->mc_io, 0, priv->dpsw_handle,
15718 + 0, &entry);
15719 + if (!err) {
15720 + /* entry exists, can we replace it? */
15721 + if (flags & NLM_F_EXCL)
15722 + return -EEXIST;
15723 + } else if (err != -ENAVAIL) {
15724 + netdev_err(netdev, "dpsw_fdb_get_unicast err %d\n",
15725 + err);
15726 + return err;
15727 + }
15728 +
15729 + err = ethsw_port_fdb_add_mc(netdev, addr);
15730 + if (err) {
15731 + netdev_err(netdev, "ethsw_port_fdb_add_mc err %d\n",
15732 + err);
15733 + return err;
15734 + }
15735 +
15736 + err = dev_mc_add(netdev, addr);
15737 + if (err) {
15738 + netdev_err(netdev, "dev_mc_add err %d\n", err);
15739 + return err;
15740 + }
15741 + }
15742 +
15743 + return 0;
15744 +}
15745 +
15746 +static int ethsw_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
15747 + struct net_device *netdev,
15748 + const unsigned char *addr, u16 vid)
15749 +{
15750 + int err;
15751 +
15752 + if (is_unicast_ether_addr(addr)) {
15753 + err = ethsw_port_fdb_del_uc(netdev, addr);
15754 + if (err) {
15755 + netdev_err(netdev, "ethsw_port_fdb_del_uc err %d\n",
15756 + err);
15757 + return err;
15758 + }
15759 +
15760 + /* also delete if configured on port */
15761 + err = dev_uc_del(netdev, addr);
15762 + if (err && err != -ENOENT) {
15763 + netdev_err(netdev, "dev_uc_del err %d\n", err);
15764 + return err;
15765 + }
15766 + } else {
15767 + if (!_lookup_address(netdev, 0, addr))
15768 + return -ENOENT;
15769 +
15770 + err = dev_mc_del(netdev, addr);
15771 + if (err) {
15772 + netdev_err(netdev, "dev_mc_del err %d\n", err);
15773 + return err;
15774 + }
15775 +
15776 + err = ethsw_port_fdb_del_mc(netdev, addr);
15777 + if (err) {
15778 + netdev_err(netdev, "ethsw_port_fdb_del_mc err %d\n",
15779 + err);
15780 + return err;
15781 + }
15782 + }
15783 +
15784 + return 0;
15785 +}
15786 +
15787 +void ethsw_port_get_stats(struct net_device *netdev,
15788 + struct rtnl_link_stats64 *storage)
15789 +{
15790 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15791 + u64 tmp;
15792 + int err;
15793 +
15794 + err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
15795 + port_priv->ethsw_priv->dpsw_handle,
15796 + port_priv->port_index,
15797 + DPSW_CNT_ING_FRAME, &storage->rx_packets);
15798 + if (err)
15799 + goto error;
15800 +
15801 + err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
15802 + port_priv->ethsw_priv->dpsw_handle,
15803 + port_priv->port_index,
15804 + DPSW_CNT_EGR_FRAME, &storage->tx_packets);
15805 + if (err)
15806 + goto error;
15807 +
15808 + err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
15809 + port_priv->ethsw_priv->dpsw_handle,
15810 + port_priv->port_index,
15811 + DPSW_CNT_ING_BYTE, &storage->rx_bytes);
15812 + if (err)
15813 + goto error;
15814 +
15815 + err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
15816 + port_priv->ethsw_priv->dpsw_handle,
15817 + port_priv->port_index,
15818 + DPSW_CNT_EGR_BYTE, &storage->tx_bytes);
15819 + if (err)
15820 + goto error;
15821 +
15822 + err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
15823 + port_priv->ethsw_priv->dpsw_handle,
15824 + port_priv->port_index,
15825 + DPSW_CNT_ING_FRAME_DISCARD,
15826 + &storage->rx_dropped);
15827 + if (err)
15828 + goto error;
15829 +
15830 + err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
15831 + port_priv->ethsw_priv->dpsw_handle,
15832 + port_priv->port_index,
15833 + DPSW_CNT_ING_FLTR_FRAME,
15834 + &tmp);
15835 + if (err)
15836 + goto error;
15837 + storage->rx_dropped += tmp;
15838 +
15839 + err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
15840 + port_priv->ethsw_priv->dpsw_handle,
15841 + port_priv->port_index,
15842 + DPSW_CNT_EGR_FRAME_DISCARD,
15843 + &storage->tx_dropped);
15844 + if (err)
15845 + goto error;
15846 +
15847 + return;
15848 +
15849 +error:
15850 + netdev_err(netdev, "dpsw_if_get_counter err %d\n", err);
15851 +}
15852 +
15853 +static int ethsw_port_change_mtu(struct net_device *netdev, int mtu)
15854 +{
15855 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15856 + int err;
15857 +
15858 + if (mtu < ETH_ZLEN || mtu > ETHSW_MAX_FRAME_LENGTH) {
15859 + netdev_err(netdev, "Invalid MTU %d. Valid range is: %d..%d\n",
15860 + mtu, ETH_ZLEN, ETHSW_MAX_FRAME_LENGTH);
15861 + return -EINVAL;
15862 + }
15863 +
15864 + err = dpsw_if_set_max_frame_length(port_priv->ethsw_priv->mc_io,
15865 + 0,
15866 + port_priv->ethsw_priv->dpsw_handle,
15867 + port_priv->port_index,
15868 + (u16)ETHSW_L2_MAX_FRM(mtu));
15869 + if (err) {
15870 + netdev_err(netdev,
15871 + "dpsw_if_set_max_frame_length() err %d\n", err);
15872 + return err;
15873 + }
15874 +
15875 + netdev->mtu = mtu;
15876 + return 0;
15877 +}
15878 +
15879 +static const struct net_device_ops ethsw_port_ops = {
15880 + .ndo_open = &ethsw_port_open,
15881 + .ndo_stop = &ethsw_port_stop,
15882 +
15883 + .ndo_fdb_add = &ethsw_port_fdb_add,
15884 + .ndo_fdb_del = &ethsw_port_fdb_del,
15885 + .ndo_fdb_dump = &ndo_dflt_fdb_dump,
15886 +
15887 + .ndo_get_stats64 = &ethsw_port_get_stats,
15888 + .ndo_change_mtu = &ethsw_port_change_mtu,
15889 +
15890 + .ndo_start_xmit = &ethsw_dropframe,
15891 +};
15892 +
15893 +static void ethsw_get_drvinfo(struct net_device *netdev,
15894 + struct ethtool_drvinfo *drvinfo)
15895 +{
15896 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15897 + u16 version_major, version_minor;
15898 + int err;
15899 +
15900 + strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
15901 + strlcpy(drvinfo->version, ethsw_drv_version, sizeof(drvinfo->version));
15902 +
15903 + err = dpsw_get_api_version(port_priv->ethsw_priv->mc_io, 0,
15904 + &version_major,
15905 + &version_minor);
15906 + if (err)
15907 + strlcpy(drvinfo->fw_version, "N/A",
15908 + sizeof(drvinfo->fw_version));
15909 + else
15910 + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
15911 + "%u.%u", version_major, version_minor);
15912 +
15913 + strlcpy(drvinfo->bus_info, dev_name(netdev->dev.parent->parent),
15914 + sizeof(drvinfo->bus_info));
15915 +}
15916 +
15917 +static int ethsw_get_settings(struct net_device *netdev,
15918 + struct ethtool_cmd *cmd)
15919 +{
15920 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15921 + struct dpsw_link_state state = {0};
15922 + int err = 0;
15923 +
15924 + err = dpsw_if_get_link_state(port_priv->ethsw_priv->mc_io, 0,
15925 + port_priv->ethsw_priv->dpsw_handle,
15926 + port_priv->port_index,
15927 + &state);
15928 + if (err) {
15929 + netdev_err(netdev, "ERROR %d getting link state", err);
15930 + goto out;
15931 + }
15932 +
15933 + /* At the moment, we have no way of interrogating the DPMAC
15934 + * from the DPSW side or there may not exist a DPMAC at all.
15935 + * Report only autoneg state, duplexity and speed.
15936 + */
15937 + if (state.options & DPSW_LINK_OPT_AUTONEG)
15938 + cmd->autoneg = AUTONEG_ENABLE;
15939 + if (!(state.options & DPSW_LINK_OPT_HALF_DUPLEX))
15940 + cmd->autoneg = DUPLEX_FULL;
15941 + ethtool_cmd_speed_set(cmd, state.rate);
15942 +
15943 +out:
15944 + return err;
15945 +}
15946 +
15947 +static int ethsw_set_settings(struct net_device *netdev,
15948 + struct ethtool_cmd *cmd)
15949 +{
15950 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15951 + struct dpsw_link_state state = {0};
15952 + struct dpsw_link_cfg cfg = {0};
15953 + int err = 0;
15954 +
15955 + netdev_dbg(netdev, "Setting link parameters...");
15956 +
15957 + err = dpsw_if_get_link_state(port_priv->ethsw_priv->mc_io, 0,
15958 + port_priv->ethsw_priv->dpsw_handle,
15959 + port_priv->port_index,
15960 + &state);
15961 + if (err) {
15962 + netdev_err(netdev, "ERROR %d getting link state", err);
15963 + goto out;
15964 + }
15965 +
15966 + /* Due to a temporary MC limitation, the DPSW port must be down
15967 + * in order to be able to change link settings. Taking steps to let
15968 + * the user know that.
15969 + */
15970 + if (netif_running(netdev)) {
15971 + netdev_info(netdev,
15972 + "Sorry, interface must be brought down first.\n");
15973 + return -EACCES;
15974 + }
15975 +
15976 + cfg.options = state.options;
15977 + cfg.rate = ethtool_cmd_speed(cmd);
15978 + if (cmd->autoneg == AUTONEG_ENABLE)
15979 + cfg.options |= DPSW_LINK_OPT_AUTONEG;
15980 + else
15981 + cfg.options &= ~DPSW_LINK_OPT_AUTONEG;
15982 + if (cmd->duplex == DUPLEX_HALF)
15983 + cfg.options |= DPSW_LINK_OPT_HALF_DUPLEX;
15984 + else
15985 + cfg.options &= ~DPSW_LINK_OPT_HALF_DUPLEX;
15986 +
15987 + err = dpsw_if_set_link_cfg(port_priv->ethsw_priv->mc_io, 0,
15988 + port_priv->ethsw_priv->dpsw_handle,
15989 + port_priv->port_index,
15990 + &cfg);
15991 + if (err)
15992 + /* ethtool will be loud enough if we return an error; no point
15993 + * in putting our own error message on the console by default
15994 + */
15995 + netdev_dbg(netdev, "ERROR %d setting link cfg", err);
15996 +
15997 +out:
15998 + return err;
15999 +}
16000 +
16001 +static struct {
16002 + enum dpsw_counter id;
16003 + char name[ETH_GSTRING_LEN];
16004 +} ethsw_ethtool_counters[] = {
16005 + {DPSW_CNT_ING_FRAME, "rx frames"},
16006 + {DPSW_CNT_ING_BYTE, "rx bytes"},
16007 + {DPSW_CNT_ING_FLTR_FRAME, "rx filtered frames"},
16008 + {DPSW_CNT_ING_FRAME_DISCARD, "rx discarded frames"},
16009 + {DPSW_CNT_ING_BCAST_FRAME, "rx b-cast frames"},
16010 + {DPSW_CNT_ING_BCAST_BYTES, "rx b-cast bytes"},
16011 + {DPSW_CNT_ING_MCAST_FRAME, "rx m-cast frames"},
16012 + {DPSW_CNT_ING_MCAST_BYTE, "rx m-cast bytes"},
16013 + {DPSW_CNT_EGR_FRAME, "tx frames"},
16014 + {DPSW_CNT_EGR_BYTE, "tx bytes"},
16015 + {DPSW_CNT_EGR_FRAME_DISCARD, "tx discarded frames"},
16016 +
16017 +};
16018 +
16019 +static int ethsw_ethtool_get_sset_count(struct net_device *dev, int sset)
16020 +{
16021 + switch (sset) {
16022 + case ETH_SS_STATS:
16023 + return ARRAY_SIZE(ethsw_ethtool_counters);
16024 + default:
16025 + return -EOPNOTSUPP;
16026 + }
16027 +}
16028 +
16029 +static void ethsw_ethtool_get_strings(struct net_device *netdev,
16030 + u32 stringset, u8 *data)
16031 +{
16032 + u32 i;
16033 +
16034 + switch (stringset) {
16035 + case ETH_SS_STATS:
16036 + for (i = 0; i < ARRAY_SIZE(ethsw_ethtool_counters); i++)
16037 + memcpy(data + i * ETH_GSTRING_LEN,
16038 + ethsw_ethtool_counters[i].name, ETH_GSTRING_LEN);
16039 + break;
16040 + }
16041 +}
16042 +
16043 +static void ethsw_ethtool_get_stats(struct net_device *netdev,
16044 + struct ethtool_stats *stats,
16045 + u64 *data)
16046 +{
16047 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
16048 + u32 i;
16049 + int err;
16050 +
16051 + for (i = 0; i < ARRAY_SIZE(ethsw_ethtool_counters); i++) {
16052 + err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
16053 + port_priv->ethsw_priv->dpsw_handle,
16054 + port_priv->port_index,
16055 + ethsw_ethtool_counters[i].id,
16056 + &data[i]);
16057 + if (err)
16058 + netdev_err(netdev, "dpsw_if_get_counter[%s] err %d\n",
16059 + ethsw_ethtool_counters[i].name, err);
16060 + }
16061 +}
16062 +
16063 +static const struct ethtool_ops ethsw_port_ethtool_ops = {
16064 + .get_drvinfo = &ethsw_get_drvinfo,
16065 + .get_link = &ethtool_op_get_link,
16066 + .get_settings = &ethsw_get_settings,
16067 + .set_settings = &ethsw_set_settings,
16068 + .get_strings = &ethsw_ethtool_get_strings,
16069 + .get_ethtool_stats = &ethsw_ethtool_get_stats,
16070 + .get_sset_count = &ethsw_ethtool_get_sset_count,
16071 +};
16072 +
16073 +/* -------------------------------------------------------------------------- */
16074 +/* ethsw driver functions */
16075 +
16076 +static int ethsw_links_state_update(struct ethsw_dev_priv *priv)
16077 +{
16078 + struct list_head *pos;
16079 + struct ethsw_port_priv *port_priv;
16080 + int err;
16081 +
16082 + list_for_each(pos, &priv->port_list) {
16083 + port_priv = list_entry(pos, struct ethsw_port_priv,
16084 + list);
16085 +
16086 + err = _ethsw_port_carrier_state_sync(port_priv->netdev);
16087 + if (err)
16088 + netdev_err(port_priv->netdev,
16089 + "_ethsw_port_carrier_state_sync err %d\n",
16090 + err);
16091 + }
16092 +
16093 + return 0;
16094 +}
16095 +
16096 +static irqreturn_t ethsw_irq0_handler(int irq_num, void *arg)
16097 +{
16098 + return IRQ_WAKE_THREAD;
16099 +}
16100 +
16101 +static irqreturn_t _ethsw_irq0_handler_thread(int irq_num, void *arg)
16102 +{
16103 + struct device *dev = (struct device *)arg;
16104 + struct net_device *netdev = dev_get_drvdata(dev);
16105 + struct ethsw_dev_priv *priv = netdev_priv(netdev);
16106 +
16107 + struct fsl_mc_io *io = priv->mc_io;
16108 + u16 token = priv->dpsw_handle;
16109 + int irq_index = DPSW_IRQ_INDEX_IF;
16110 +
16111 + /* Mask the events and the if_id reserved bits to be cleared on read */
16112 + u32 status = DPSW_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000;
16113 + int err;
16114 +
16115 + err = dpsw_get_irq_status(io, 0, token, irq_index, &status);
16116 + if (unlikely(err)) {
16117 + netdev_err(netdev, "Can't get irq status (err %d)", err);
16118 +
16119 + err = dpsw_clear_irq_status(io, 0, token, irq_index,
16120 + 0xFFFFFFFF);
16121 + if (unlikely(err))
16122 + netdev_err(netdev, "Can't clear irq status (err %d)",
16123 + err);
16124 + goto out;
16125 + }
16126 +
16127 + if (status & DPSW_IRQ_EVENT_LINK_CHANGED) {
16128 + err = ethsw_links_state_update(priv);
16129 + if (unlikely(err))
16130 + goto out;
16131 + }
16132 +
16133 +out:
16134 + return IRQ_HANDLED;
16135 +}
16136 +
16137 +static int ethsw_setup_irqs(struct fsl_mc_device *sw_dev)
16138 +{
16139 + struct device *dev = &sw_dev->dev;
16140 + struct net_device *netdev = dev_get_drvdata(dev);
16141 + struct ethsw_dev_priv *priv = netdev_priv(netdev);
16142 + int err = 0;
16143 + struct fsl_mc_device_irq *irq;
16144 + const int irq_index = DPSW_IRQ_INDEX_IF;
16145 + u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED;
16146 +
16147 + err = fsl_mc_allocate_irqs(sw_dev);
16148 + if (unlikely(err)) {
16149 + dev_err(dev, "MC irqs allocation failed\n");
16150 + return err;
16151 + }
16152 +
16153 + if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_MAX_IRQ_NUM)) {
16154 + err = -EINVAL;
16155 + goto free_irq;
16156 + }
16157 +
16158 + err = dpsw_set_irq_enable(priv->mc_io, 0, priv->dpsw_handle,
16159 + irq_index, 0);
16160 + if (unlikely(err)) {
16161 + dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
16162 + goto free_irq;
16163 + }
16164 +
16165 + irq = sw_dev->irqs[irq_index];
16166 +
16167 + err = devm_request_threaded_irq(dev, irq->msi_desc->irq,
16168 + ethsw_irq0_handler,
16169 + _ethsw_irq0_handler_thread,
16170 + IRQF_NO_SUSPEND | IRQF_ONESHOT,
16171 + dev_name(dev), dev);
16172 + if (unlikely(err)) {
16173 + dev_err(dev, "devm_request_threaded_irq(): %d", err);
16174 + goto free_irq;
16175 + }
16176 +
16177 + err = dpsw_set_irq_mask(priv->mc_io, 0, priv->dpsw_handle,
16178 + irq_index, mask);
16179 + if (unlikely(err)) {
16180 + dev_err(dev, "dpsw_set_irq_mask(): %d", err);
16181 + goto free_devm_irq;
16182 + }
16183 +
16184 + err = dpsw_set_irq_enable(priv->mc_io, 0, priv->dpsw_handle,
16185 + irq_index, 1);
16186 + if (unlikely(err)) {
16187 + dev_err(dev, "dpsw_set_irq_enable(): %d", err);
16188 + goto free_devm_irq;
16189 + }
16190 +
16191 + return 0;
16192 +
16193 +free_devm_irq:
16194 + devm_free_irq(dev, irq->msi_desc->irq, dev);
16195 +free_irq:
16196 + fsl_mc_free_irqs(sw_dev);
16197 + return err;
16198 +}
16199 +
16200 +static void ethsw_teardown_irqs(struct fsl_mc_device *sw_dev)
16201 +{
16202 + struct device *dev = &sw_dev->dev;
16203 + struct net_device *netdev = dev_get_drvdata(dev);
16204 + struct ethsw_dev_priv *priv = netdev_priv(netdev);
16205 +
16206 + dpsw_set_irq_enable(priv->mc_io, 0, priv->dpsw_handle,
16207 + DPSW_IRQ_INDEX_IF, 0);
16208 + devm_free_irq(dev,
16209 + sw_dev->irqs[DPSW_IRQ_INDEX_IF]->msi_desc->irq,
16210 + dev);
16211 + fsl_mc_free_irqs(sw_dev);
16212 +}
16213 +
16214 +static int __cold
16215 +ethsw_init(struct fsl_mc_device *sw_dev)
16216 +{
16217 + struct device *dev = &sw_dev->dev;
16218 + struct ethsw_dev_priv *priv;
16219 + struct net_device *netdev;
16220 + int err = 0;
16221 + u16 i;
16222 + u16 version_major, version_minor;
16223 + const struct dpsw_stp_cfg stp_cfg = {
16224 + .vlan_id = 1,
16225 + .state = DPSW_STP_STATE_FORWARDING,
16226 + };
16227 +
16228 + netdev = dev_get_drvdata(dev);
16229 + priv = netdev_priv(netdev);
16230 +
16231 + priv->dev_id = sw_dev->obj_desc.id;
16232 +
16233 + err = dpsw_open(priv->mc_io, 0, priv->dev_id, &priv->dpsw_handle);
16234 + if (err) {
16235 + dev_err(dev, "dpsw_open err %d\n", err);
16236 + goto err_exit;
16237 + }
16238 + if (!priv->dpsw_handle) {
16239 + dev_err(dev, "dpsw_open returned null handle but no error\n");
16240 + err = -EFAULT;
16241 + goto err_exit;
16242 + }
16243 +
16244 + err = dpsw_get_attributes(priv->mc_io, 0, priv->dpsw_handle,
16245 + &priv->sw_attr);
16246 + if (err) {
16247 + dev_err(dev, "dpsw_get_attributes err %d\n", err);
16248 + goto err_close;
16249 + }
16250 +
16251 + err = dpsw_get_api_version(priv->mc_io, 0,
16252 + &version_major,
16253 + &version_minor);
16254 + if (err) {
16255 + dev_err(dev, "dpsw_get_api_version err %d\n", err);
16256 + goto err_close;
16257 + }
16258 +
16259 + /* Minimum supported DPSW version check */
16260 + if (version_major < DPSW_MIN_VER_MAJOR ||
16261 + (version_major == DPSW_MIN_VER_MAJOR &&
16262 + version_minor < DPSW_MIN_VER_MINOR)) {
16263 + dev_err(dev, "DPSW version %d:%d not supported. Use %d.%d or greater.\n",
16264 + version_major,
16265 + version_minor,
16266 + DPSW_MIN_VER_MAJOR, DPSW_MIN_VER_MINOR);
16267 + err = -ENOTSUPP;
16268 + goto err_close;
16269 + }
16270 +
16271 + err = dpsw_reset(priv->mc_io, 0, priv->dpsw_handle);
16272 + if (err) {
16273 + dev_err(dev, "dpsw_reset err %d\n", err);
16274 + goto err_close;
16275 + }
16276 +
16277 + err = dpsw_fdb_set_learning_mode(priv->mc_io, 0, priv->dpsw_handle, 0,
16278 + DPSW_FDB_LEARNING_MODE_HW);
16279 + if (err) {
16280 + dev_err(dev, "dpsw_fdb_set_learning_mode err %d\n", err);
16281 + goto err_close;
16282 + }
16283 +
16284 + for (i = 0; i < priv->sw_attr.num_ifs; i++) {
16285 + err = dpsw_if_set_stp(priv->mc_io, 0, priv->dpsw_handle, i,
16286 + &stp_cfg);
16287 + if (err) {
16288 + dev_err(dev, "dpsw_if_set_stp err %d for port %d\n",
16289 + err, i);
16290 + goto err_close;
16291 + }
16292 +
16293 + err = dpsw_if_set_broadcast(priv->mc_io, 0,
16294 + priv->dpsw_handle, i, 1);
16295 + if (err) {
16296 + dev_err(dev,
16297 + "dpsw_if_set_broadcast err %d for port %d\n",
16298 + err, i);
16299 + goto err_close;
16300 + }
16301 + }
16302 +
16303 + return 0;
16304 +
16305 +err_close:
16306 + dpsw_close(priv->mc_io, 0, priv->dpsw_handle);
16307 +err_exit:
16308 + return err;
16309 +}
16310 +
16311 +static int __cold
16312 +ethsw_takedown(struct fsl_mc_device *sw_dev)
16313 +{
16314 + struct device *dev = &sw_dev->dev;
16315 + struct net_device *netdev;
16316 + struct ethsw_dev_priv *priv;
16317 + int err;
16318 +
16319 + netdev = dev_get_drvdata(dev);
16320 + priv = netdev_priv(netdev);
16321 +
16322 + err = dpsw_close(priv->mc_io, 0, priv->dpsw_handle);
16323 + if (err)
16324 + dev_warn(dev, "dpsw_close err %d\n", err);
16325 +
16326 + return 0;
16327 +}
16328 +
16329 +static int __cold
16330 +ethsw_remove(struct fsl_mc_device *sw_dev)
16331 +{
16332 + struct device *dev;
16333 + struct net_device *netdev;
16334 + struct ethsw_dev_priv *priv;
16335 + struct ethsw_port_priv *port_priv;
16336 + struct list_head *pos;
16337 +
16338 + dev = &sw_dev->dev;
16339 + netdev = dev_get_drvdata(dev);
16340 + priv = netdev_priv(netdev);
16341 +
16342 + list_for_each(pos, &priv->port_list) {
16343 + port_priv = list_entry(pos, struct ethsw_port_priv, list);
16344 +
16345 + rtnl_lock();
16346 + netdev_upper_dev_unlink(port_priv->netdev, netdev);
16347 + rtnl_unlock();
16348 +
16349 + unregister_netdev(port_priv->netdev);
16350 + free_netdev(port_priv->netdev);
16351 + }
16352 +
16353 + ethsw_teardown_irqs(sw_dev);
16354 +
16355 + unregister_netdev(netdev);
16356 +
16357 + ethsw_takedown(sw_dev);
16358 + fsl_mc_portal_free(priv->mc_io);
16359 +
16360 + dev_set_drvdata(dev, NULL);
16361 + free_netdev(netdev);
16362 +
16363 + return 0;
16364 +}
16365 +
16366 +static int __cold
16367 +ethsw_probe(struct fsl_mc_device *sw_dev)
16368 +{
16369 + struct device *dev;
16370 + struct net_device *netdev = NULL;
16371 + struct ethsw_dev_priv *priv = NULL;
16372 + int err = 0;
16373 + u16 i;
16374 + const char def_mcast[ETH_ALEN] = {
16375 + 0x01, 0x00, 0x5e, 0x00, 0x00, 0x01,
16376 + };
16377 + char port_name[IFNAMSIZ];
16378 +
16379 + dev = &sw_dev->dev;
16380 +
16381 + /* register switch device, it's for management only - no I/O */
16382 + netdev = alloc_etherdev(sizeof(*priv));
16383 + if (!netdev) {
16384 + dev_err(dev, "alloc_etherdev error\n");
16385 + return -ENOMEM;
16386 + }
16387 + netdev->netdev_ops = &ethsw_ops;
16388 +
16389 + SET_NETDEV_DEV(netdev, dev);
16390 + dev_set_drvdata(dev, netdev);
16391 +
16392 + priv = netdev_priv(netdev);
16393 + priv->netdev = netdev;
16394 +
16395 + err = fsl_mc_portal_allocate(sw_dev, 0, &priv->mc_io);
16396 + if (err) {
16397 + dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
16398 + goto err_free_netdev;
16399 + }
16400 + if (!priv->mc_io) {
16401 + dev_err(dev, "fsl_mc_portal_allocate returned null handle but no error\n");
16402 + err = -EFAULT;
16403 + goto err_free_netdev;
16404 + }
16405 +
16406 + err = ethsw_init(sw_dev);
16407 + if (err) {
16408 + dev_err(dev, "switch init err %d\n", err);
16409 + goto err_free_cmdport;
16410 + }
16411 +
16412 + netdev->flags = netdev->flags | IFF_PROMISC | IFF_MASTER;
16413 +
16414 + /* TODO: should we hold rtnl_lock here? We can't register_netdev under
16415 + * lock
16416 + */
16417 + dev_alloc_name(netdev, "sw%d");
16418 + err = register_netdev(netdev);
16419 + if (err < 0) {
16420 + dev_err(dev, "register_netdev error %d\n", err);
16421 + goto err_takedown;
16422 + }
16423 + if (err)
16424 + dev_info(dev, "register_netdev res %d\n", err);
16425 +
16426 + /* VLAN 1 is implicitly configured on the switch */
16427 + priv->vlans[1] = ETHSW_VLAN_MEMBER;
16428 + /* Flooding, learning are implicitly enabled */
16429 + priv->learning = true;
16430 + priv->flood = true;
16431 +
16432 + /* register switch ports */
16433 + snprintf(port_name, IFNAMSIZ, "%sp%%d", netdev->name);
16434 +
16435 + INIT_LIST_HEAD(&priv->port_list);
16436 + for (i = 0; i < priv->sw_attr.num_ifs; i++) {
16437 + struct net_device *port_netdev;
16438 + struct ethsw_port_priv *port_priv;
16439 +
16440 + port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv));
16441 + if (!port_netdev) {
16442 + dev_err(dev, "alloc_etherdev error\n");
16443 + goto err_takedown;
16444 + }
16445 +
16446 + port_priv = netdev_priv(port_netdev);
16447 + port_priv->netdev = port_netdev;
16448 + port_priv->ethsw_priv = priv;
16449 +
16450 + port_priv->port_index = i;
16451 + port_priv->stp_state = BR_STATE_FORWARDING;
16452 + /* VLAN 1 is configured by default on all switch ports */
16453 + port_priv->vlans[1] = ETHSW_VLAN_MEMBER | ETHSW_VLAN_UNTAGGED |
16454 + ETHSW_VLAN_PVID;
16455 +
16456 + SET_NETDEV_DEV(port_netdev, dev);
16457 + port_netdev->netdev_ops = &ethsw_port_ops;
16458 + port_netdev->ethtool_ops = &ethsw_port_ethtool_ops;
16459 +
16460 + port_netdev->flags = port_netdev->flags |
16461 + IFF_PROMISC | IFF_SLAVE;
16462 +
16463 + dev_alloc_name(port_netdev, port_name);
16464 + err = register_netdev(port_netdev);
16465 + if (err < 0) {
16466 + dev_err(dev, "register_netdev error %d\n", err);
16467 + free_netdev(port_netdev);
16468 + goto err_takedown;
16469 + }
16470 +
16471 + rtnl_lock();
16472 +
16473 + err = netdev_master_upper_dev_link(port_netdev, netdev,
16474 + NULL, NULL);
16475 + if (err) {
16476 + dev_err(dev, "netdev_master_upper_dev_link error %d\n",
16477 + err);
16478 + unregister_netdev(port_netdev);
16479 + free_netdev(port_netdev);
16480 + rtnl_unlock();
16481 + goto err_takedown;
16482 + }
16483 +
16484 + rtmsg_ifinfo(RTM_NEWLINK, port_netdev, IFF_SLAVE, GFP_KERNEL);
16485 +
16486 + rtnl_unlock();
16487 +
16488 + list_add(&port_priv->list, &priv->port_list);
16489 +
16490 + /* TODO: implmenet set_rm_mode instead of this */
16491 + err = ethsw_port_fdb_add_mc(port_netdev, def_mcast);
16492 + if (err)
16493 + dev_warn(&netdev->dev,
16494 + "ethsw_port_fdb_add_mc err %d\n", err);
16495 + }
16496 +
16497 + /* the switch starts up enabled */
16498 + rtnl_lock();
16499 + err = dev_open(netdev);
16500 + rtnl_unlock();
16501 + if (err)
16502 + dev_warn(dev, "dev_open err %d\n", err);
16503 +
16504 + /* setup irqs */
16505 + err = ethsw_setup_irqs(sw_dev);
16506 + if (unlikely(err)) {
16507 + dev_warn(dev, "ethsw_setup_irqs err %d\n", err);
16508 + goto err_takedown;
16509 + }
16510 +
16511 + dev_info(&netdev->dev,
16512 + "probed %d port switch\n", priv->sw_attr.num_ifs);
16513 + return 0;
16514 +
16515 +err_takedown:
16516 + ethsw_remove(sw_dev);
16517 +err_free_cmdport:
16518 + fsl_mc_portal_free(priv->mc_io);
16519 +err_free_netdev:
16520 + dev_set_drvdata(dev, NULL);
16521 + free_netdev(netdev);
16522 +
16523 + return err;
16524 +}
16525 +
16526 +static const struct fsl_mc_device_id ethsw_match_id_table[] = {
16527 + {
16528 + .vendor = FSL_MC_VENDOR_FREESCALE,
16529 + .obj_type = "dpsw",
16530 + },
16531 + {}
16532 +};
16533 +
16534 +static struct fsl_mc_driver eth_sw_drv = {
16535 + .driver = {
16536 + .name = KBUILD_MODNAME,
16537 + .owner = THIS_MODULE,
16538 + },
16539 + .probe = ethsw_probe,
16540 + .remove = ethsw_remove,
16541 + .match_id_table = ethsw_match_id_table,
16542 +};
16543 +
16544 +module_fsl_mc_driver(eth_sw_drv);
16545 +
16546 +MODULE_LICENSE("GPL");
16547 +MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver (prototype)");
16548 diff --git a/drivers/staging/fsl-dpaa2/evb/Kconfig b/drivers/staging/fsl-dpaa2/evb/Kconfig
16549 new file mode 100644
16550 index 00000000..3534f697
16551 --- /dev/null
16552 +++ b/drivers/staging/fsl-dpaa2/evb/Kconfig
16553 @@ -0,0 +1,7 @@
16554 +config FSL_DPAA2_EVB
16555 + tristate "DPAA2 Edge Virtual Bridge"
16556 + depends on FSL_MC_BUS && FSL_DPAA2
16557 + select VLAN_8021Q
16558 + default y
16559 + ---help---
16560 + Prototype driver for DPAA2 Edge Virtual Bridge.
16561 diff --git a/drivers/staging/fsl-dpaa2/evb/Makefile b/drivers/staging/fsl-dpaa2/evb/Makefile
16562 new file mode 100644
16563 index 00000000..ecc529d7
16564 --- /dev/null
16565 +++ b/drivers/staging/fsl-dpaa2/evb/Makefile
16566 @@ -0,0 +1,10 @@
16567 +
16568 +obj-$(CONFIG_FSL_DPAA2_EVB) += dpaa2-evb.o
16569 +
16570 +dpaa2-evb-objs := evb.o dpdmux.o
16571 +
16572 +all:
16573 + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
16574 +
16575 +clean:
16576 + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
16577 diff --git a/drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h b/drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h
16578 new file mode 100644
16579 index 00000000..66306804
16580 --- /dev/null
16581 +++ b/drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h
16582 @@ -0,0 +1,279 @@
16583 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
16584 + *
16585 + * Redistribution and use in source and binary forms, with or without
16586 + * modification, are permitted provided that the following conditions are met:
16587 + * * Redistributions of source code must retain the above copyright
16588 + * notice, this list of conditions and the following disclaimer.
16589 + * * Redistributions in binary form must reproduce the above copyright
16590 + * notice, this list of conditions and the following disclaimer in the
16591 + * documentation and/or other materials provided with the distribution.
16592 + * * Neither the name of the above-listed copyright holders nor the
16593 + * names of any contributors may be used to endorse or promote products
16594 + * derived from this software without specific prior written permission.
16595 + *
16596 + *
16597 + * ALTERNATIVELY, this software may be distributed under the terms of the
16598 + * GNU General Public License ("GPL") as published by the Free Software
16599 + * Foundation, either version 2 of that License or (at your option) any
16600 + * later version.
16601 + *
16602 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16603 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16604 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16605 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
16606 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
16607 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
16608 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
16609 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
16610 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
16611 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
16612 + * POSSIBILITY OF SUCH DAMAGE.
16613 + */
16614 +#ifndef _FSL_DPDMUX_CMD_H
16615 +#define _FSL_DPDMUX_CMD_H
16616 +
16617 +/* DPDMUX Version */
16618 +#define DPDMUX_VER_MAJOR 6
16619 +#define DPDMUX_VER_MINOR 1
16620 +
16621 +#define DPDMUX_CMD_BASE_VER 1
16622 +#define DPDMUX_CMD_ID_OFFSET 4
16623 +
16624 +#define DPDMUX_CMD(id) (((id) << DPDMUX_CMD_ID_OFFSET) | DPDMUX_CMD_BASE_VER)
16625 +
16626 +/* Command IDs */
16627 +#define DPDMUX_CMDID_CLOSE DPDMUX_CMD(0x800)
16628 +#define DPDMUX_CMDID_OPEN DPDMUX_CMD(0x806)
16629 +#define DPDMUX_CMDID_CREATE DPDMUX_CMD(0x906)
16630 +#define DPDMUX_CMDID_DESTROY DPDMUX_CMD(0x986)
16631 +#define DPDMUX_CMDID_GET_API_VERSION DPDMUX_CMD(0xa06)
16632 +
16633 +#define DPDMUX_CMDID_ENABLE DPDMUX_CMD(0x002)
16634 +#define DPDMUX_CMDID_DISABLE DPDMUX_CMD(0x003)
16635 +#define DPDMUX_CMDID_GET_ATTR DPDMUX_CMD(0x004)
16636 +#define DPDMUX_CMDID_RESET DPDMUX_CMD(0x005)
16637 +#define DPDMUX_CMDID_IS_ENABLED DPDMUX_CMD(0x006)
16638 +
16639 +#define DPDMUX_CMDID_SET_IRQ_ENABLE DPDMUX_CMD(0x012)
16640 +#define DPDMUX_CMDID_GET_IRQ_ENABLE DPDMUX_CMD(0x013)
16641 +#define DPDMUX_CMDID_SET_IRQ_MASK DPDMUX_CMD(0x014)
16642 +#define DPDMUX_CMDID_GET_IRQ_MASK DPDMUX_CMD(0x015)
16643 +#define DPDMUX_CMDID_GET_IRQ_STATUS DPDMUX_CMD(0x016)
16644 +#define DPDMUX_CMDID_CLEAR_IRQ_STATUS DPDMUX_CMD(0x017)
16645 +
16646 +#define DPDMUX_CMDID_SET_MAX_FRAME_LENGTH DPDMUX_CMD(0x0a1)
16647 +
16648 +#define DPDMUX_CMDID_UL_RESET_COUNTERS DPDMUX_CMD(0x0a3)
16649 +
16650 +#define DPDMUX_CMDID_IF_SET_ACCEPTED_FRAMES DPDMUX_CMD(0x0a7)
16651 +#define DPDMUX_CMDID_IF_GET_ATTR DPDMUX_CMD(0x0a8)
16652 +#define DPDMUX_CMDID_IF_ENABLE DPDMUX_CMD(0x0a9)
16653 +#define DPDMUX_CMDID_IF_DISABLE DPDMUX_CMD(0x0aa)
16654 +
16655 +#define DPDMUX_CMDID_IF_ADD_L2_RULE DPDMUX_CMD(0x0b0)
16656 +#define DPDMUX_CMDID_IF_REMOVE_L2_RULE DPDMUX_CMD(0x0b1)
16657 +#define DPDMUX_CMDID_IF_GET_COUNTER DPDMUX_CMD(0x0b2)
16658 +#define DPDMUX_CMDID_IF_SET_LINK_CFG DPDMUX_CMD(0x0b3)
16659 +#define DPDMUX_CMDID_IF_GET_LINK_STATE DPDMUX_CMD(0x0b4)
16660 +
16661 +#define DPDMUX_CMDID_SET_CUSTOM_KEY DPDMUX_CMD(0x0b5)
16662 +#define DPDMUX_CMDID_ADD_CUSTOM_CLS_ENTRY DPDMUX_CMD(0x0b6)
16663 +#define DPDMUX_CMDID_REMOVE_CUSTOM_CLS_ENTRY DPDMUX_CMD(0x0b7)
16664 +
16665 +#define DPDMUX_MASK(field) \
16666 + GENMASK(DPDMUX_##field##_SHIFT + DPDMUX_##field##_SIZE - 1, \
16667 + DPDMUX_##field##_SHIFT)
16668 +#define dpdmux_set_field(var, field, val) \
16669 + ((var) |= (((val) << DPDMUX_##field##_SHIFT) & DPDMUX_MASK(field)))
16670 +#define dpdmux_get_field(var, field) \
16671 + (((var) & DPDMUX_MASK(field)) >> DPDMUX_##field##_SHIFT)
16672 +
16673 +struct dpdmux_cmd_open {
16674 + u32 dpdmux_id;
16675 +};
16676 +
16677 +struct dpdmux_cmd_create {
16678 + u8 method;
16679 + u8 manip;
16680 + u16 num_ifs;
16681 + u32 pad;
16682 +
16683 + u16 adv_max_dmat_entries;
16684 + u16 adv_max_mc_groups;
16685 + u16 adv_max_vlan_ids;
16686 + u16 pad1;
16687 +
16688 + u64 options;
16689 +};
16690 +
16691 +struct dpdmux_cmd_destroy {
16692 + u32 dpdmux_id;
16693 +};
16694 +
16695 +#define DPDMUX_ENABLE_SHIFT 0
16696 +#define DPDMUX_ENABLE_SIZE 1
16697 +
16698 +struct dpdmux_rsp_is_enabled {
16699 + u8 en;
16700 +};
16701 +
16702 +struct dpdmux_cmd_set_irq_enable {
16703 + u8 enable;
16704 + u8 pad[3];
16705 + u8 irq_index;
16706 +};
16707 +
16708 +struct dpdmux_cmd_get_irq_enable {
16709 + u32 pad;
16710 + u8 irq_index;
16711 +};
16712 +
16713 +struct dpdmux_rsp_get_irq_enable {
16714 + u8 enable;
16715 +};
16716 +
16717 +struct dpdmux_cmd_set_irq_mask {
16718 + u32 mask;
16719 + u8 irq_index;
16720 +};
16721 +
16722 +struct dpdmux_cmd_get_irq_mask {
16723 + u32 pad;
16724 + u8 irq_index;
16725 +};
16726 +
16727 +struct dpdmux_rsp_get_irq_mask {
16728 + u32 mask;
16729 +};
16730 +
16731 +struct dpdmux_cmd_get_irq_status {
16732 + u32 status;
16733 + u8 irq_index;
16734 +};
16735 +
16736 +struct dpdmux_rsp_get_irq_status {
16737 + u32 status;
16738 +};
16739 +
16740 +struct dpdmux_cmd_clear_irq_status {
16741 + u32 status;
16742 + u8 irq_index;
16743 +};
16744 +
16745 +struct dpdmux_rsp_get_attr {
16746 + u8 method;
16747 + u8 manip;
16748 + u16 num_ifs;
16749 + u16 mem_size;
16750 + u16 pad;
16751 +
16752 + u64 pad1;
16753 +
16754 + u32 id;
16755 + u32 pad2;
16756 +
16757 + u64 options;
16758 +};
16759 +
16760 +struct dpdmux_cmd_set_max_frame_length {
16761 + u16 max_frame_length;
16762 +};
16763 +
16764 +#define DPDMUX_ACCEPTED_FRAMES_TYPE_SHIFT 0
16765 +#define DPDMUX_ACCEPTED_FRAMES_TYPE_SIZE 4
16766 +#define DPDMUX_UNACCEPTED_FRAMES_ACTION_SHIFT 4
16767 +#define DPDMUX_UNACCEPTED_FRAMES_ACTION_SIZE 4
16768 +
16769 +struct dpdmux_cmd_if_set_accepted_frames {
16770 + u16 if_id;
16771 + u8 frames_options;
16772 +};
16773 +
16774 +struct dpdmux_cmd_if {
16775 + u16 if_id;
16776 +};
16777 +
16778 +struct dpdmux_rsp_if_get_attr {
16779 + u8 pad[3];
16780 + u8 enabled;
16781 + u8 pad1[3];
16782 + u8 accepted_frames_type;
16783 + u32 rate;
16784 +};
16785 +
16786 +struct dpdmux_cmd_if_l2_rule {
16787 + u16 if_id;
16788 + u8 mac_addr5;
16789 + u8 mac_addr4;
16790 + u8 mac_addr3;
16791 + u8 mac_addr2;
16792 + u8 mac_addr1;
16793 + u8 mac_addr0;
16794 +
16795 + u32 pad;
16796 + u16 vlan_id;
16797 +};
16798 +
16799 +struct dpdmux_cmd_if_get_counter {
16800 + u16 if_id;
16801 + u8 counter_type;
16802 +};
16803 +
16804 +struct dpdmux_rsp_if_get_counter {
16805 + u64 pad;
16806 + u64 counter;
16807 +};
16808 +
16809 +struct dpdmux_cmd_if_set_link_cfg {
16810 + u16 if_id;
16811 + u16 pad[3];
16812 +
16813 + u32 rate;
16814 + u32 pad1;
16815 +
16816 + u64 options;
16817 +};
16818 +
16819 +struct dpdmux_cmd_if_get_link_state {
16820 + u16 if_id;
16821 +};
16822 +
16823 +struct dpdmux_rsp_if_get_link_state {
16824 + u32 pad;
16825 + u8 up;
16826 + u8 pad1[3];
16827 +
16828 + u32 rate;
16829 + u32 pad2;
16830 +
16831 + u64 options;
16832 +};
16833 +
16834 +struct dpdmux_rsp_get_api_version {
16835 + u16 major;
16836 + u16 minor;
16837 +};
16838 +
16839 +struct dpdmux_set_custom_key {
16840 + u64 pad[6];
16841 + u64 key_cfg_iova;
16842 +};
16843 +
16844 +struct dpdmux_cmd_add_custom_cls_entry {
16845 + u8 pad[3];
16846 + u8 key_size;
16847 + u16 pad1;
16848 + u16 dest_if;
16849 + u64 key_iova;
16850 + u64 mask_iova;
16851 +};
16852 +
16853 +struct dpdmux_cmd_remove_custom_cls_entry {
16854 + u8 pad[3];
16855 + u8 key_size;
16856 + u32 pad1;
16857 + u64 key_iova;
16858 + u64 mask_iova;
16859 +};
16860 +
16861 +#endif /* _FSL_DPDMUX_CMD_H */
16862 diff --git a/drivers/staging/fsl-dpaa2/evb/dpdmux.c b/drivers/staging/fsl-dpaa2/evb/dpdmux.c
16863 new file mode 100644
16864 index 00000000..f7a87633
16865 --- /dev/null
16866 +++ b/drivers/staging/fsl-dpaa2/evb/dpdmux.c
16867 @@ -0,0 +1,1112 @@
16868 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
16869 + *
16870 + * Redistribution and use in source and binary forms, with or without
16871 + * modification, are permitted provided that the following conditions are met:
16872 + * * Redistributions of source code must retain the above copyright
16873 + * notice, this list of conditions and the following disclaimer.
16874 + * * Redistributions in binary form must reproduce the above copyright
16875 + * notice, this list of conditions and the following disclaimer in the
16876 + * documentation and/or other materials provided with the distribution.
16877 + * * Neither the name of the above-listed copyright holders nor the
16878 + * names of any contributors may be used to endorse or promote products
16879 + * derived from this software without specific prior written permission.
16880 + *
16881 + *
16882 + * ALTERNATIVELY, this software may be distributed under the terms of the
16883 + * GNU General Public License ("GPL") as published by the Free Software
16884 + * Foundation, either version 2 of that License or (at your option) any
16885 + * later version.
16886 + *
16887 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16888 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16889 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16890 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
16891 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
16892 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
16893 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
16894 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
16895 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
16896 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
16897 + * POSSIBILITY OF SUCH DAMAGE.
16898 + */
16899 +#include "../../fsl-mc/include/mc-sys.h"
16900 +#include "../../fsl-mc/include/mc-cmd.h"
16901 +#include "dpdmux.h"
16902 +#include "dpdmux-cmd.h"
16903 +
16904 +/**
16905 + * dpdmux_open() - Open a control session for the specified object
16906 + * @mc_io: Pointer to MC portal's I/O object
16907 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
16908 + * @dpdmux_id: DPDMUX unique ID
16909 + * @token: Returned token; use in subsequent API calls
16910 + *
16911 + * This function can be used to open a control session for an
16912 + * already created object; an object may have been declared in
16913 + * the DPL or by calling the dpdmux_create() function.
16914 + * This function returns a unique authentication token,
16915 + * associated with the specific object ID and the specific MC
16916 + * portal; this token must be used in all subsequent commands for
16917 + * this specific object.
16918 + *
16919 + * Return: '0' on Success; Error code otherwise.
16920 + */
16921 +int dpdmux_open(struct fsl_mc_io *mc_io,
16922 + u32 cmd_flags,
16923 + int dpdmux_id,
16924 + u16 *token)
16925 +{
16926 + struct mc_command cmd = { 0 };
16927 + struct dpdmux_cmd_open *cmd_params;
16928 + int err;
16929 +
16930 + /* prepare command */
16931 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_OPEN,
16932 + cmd_flags,
16933 + 0);
16934 + cmd_params = (struct dpdmux_cmd_open *)cmd.params;
16935 + cmd_params->dpdmux_id = cpu_to_le32(dpdmux_id);
16936 +
16937 + /* send command to mc*/
16938 + err = mc_send_command(mc_io, &cmd);
16939 + if (err)
16940 + return err;
16941 +
16942 + /* retrieve response parameters */
16943 + *token = mc_cmd_hdr_read_token(&cmd);
16944 +
16945 + return 0;
16946 +}
16947 +
16948 +/**
16949 + * dpdmux_close() - Close the control session of the object
16950 + * @mc_io: Pointer to MC portal's I/O object
16951 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
16952 + * @token: Token of DPDMUX object
16953 + *
16954 + * After this function is called, no further operations are
16955 + * allowed on the object without opening a new control session.
16956 + *
16957 + * Return: '0' on Success; Error code otherwise.
16958 + */
16959 +int dpdmux_close(struct fsl_mc_io *mc_io,
16960 + u32 cmd_flags,
16961 + u16 token)
16962 +{
16963 + struct mc_command cmd = { 0 };
16964 +
16965 + /* prepare command */
16966 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CLOSE,
16967 + cmd_flags,
16968 + token);
16969 +
16970 + /* send command to mc*/
16971 + return mc_send_command(mc_io, &cmd);
16972 +}
16973 +
16974 +/**
16975 + * dpdmux_create() - Create the DPDMUX object
16976 + * @mc_io: Pointer to MC portal's I/O object
16977 + * @dprc_token: Parent container token; '0' for default container
16978 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
16979 + * @cfg: Configuration structure
16980 + * @obj_id: returned object id
16981 + *
16982 + * Create the DPDMUX object, allocate required resources and
16983 + * perform required initialization.
16984 + *
16985 + * The object can be created either by declaring it in the
16986 + * DPL file, or by calling this function.
16987 + *
16988 + * The function accepts an authentication token of a parent
16989 + * container that this object should be assigned to. The token
16990 + * can be '0' so the object will be assigned to the default container.
16991 + * The newly created object can be opened with the returned
16992 + * object id and using the container's associated tokens and MC portals.
16993 + *
16994 + * Return: '0' on Success; Error code otherwise.
16995 + */
16996 +int dpdmux_create(struct fsl_mc_io *mc_io,
16997 + u16 dprc_token,
16998 + u32 cmd_flags,
16999 + const struct dpdmux_cfg *cfg,
17000 + u32 *obj_id)
17001 +{
17002 + struct mc_command cmd = { 0 };
17003 + struct dpdmux_cmd_create *cmd_params;
17004 + int err;
17005 +
17006 + /* prepare command */
17007 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CREATE,
17008 + cmd_flags,
17009 + dprc_token);
17010 + cmd_params = (struct dpdmux_cmd_create *)cmd.params;
17011 + cmd_params->method = cfg->method;
17012 + cmd_params->manip = cfg->manip;
17013 + cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
17014 + cmd_params->adv_max_dmat_entries =
17015 + cpu_to_le16(cfg->adv.max_dmat_entries);
17016 + cmd_params->adv_max_mc_groups = cpu_to_le16(cfg->adv.max_mc_groups);
17017 + cmd_params->adv_max_vlan_ids = cpu_to_le16(cfg->adv.max_vlan_ids);
17018 + cmd_params->options = cpu_to_le64(cfg->adv.options);
17019 +
17020 + /* send command to mc*/
17021 + err = mc_send_command(mc_io, &cmd);
17022 + if (err)
17023 + return err;
17024 +
17025 + /* retrieve response parameters */
17026 + *obj_id = mc_cmd_hdr_read_token(&cmd);
17027 +
17028 + return 0;
17029 +}
17030 +
17031 +/**
17032 + * dpdmux_destroy() - Destroy the DPDMUX object and release all its resources.
17033 + * @mc_io: Pointer to MC portal's I/O object
17034 + * @dprc_token: Parent container token; '0' for default container
17035 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17036 + * @object_id: The object id; it must be a valid id within the container that
17037 + * created this object;
17038 + *
17039 + * The function accepts the authentication token of the parent container that
17040 + * created the object (not the one that currently owns the object). The object
17041 + * is searched within parent using the provided 'object_id'.
17042 + * All tokens to the object must be closed before calling destroy.
17043 + *
17044 + * Return: '0' on Success; error code otherwise.
17045 + */
17046 +int dpdmux_destroy(struct fsl_mc_io *mc_io,
17047 + u16 dprc_token,
17048 + u32 cmd_flags,
17049 + u32 object_id)
17050 +{
17051 + struct mc_command cmd = { 0 };
17052 + struct dpdmux_cmd_destroy *cmd_params;
17053 +
17054 + /* prepare command */
17055 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DESTROY,
17056 + cmd_flags,
17057 + dprc_token);
17058 + cmd_params = (struct dpdmux_cmd_destroy *)cmd.params;
17059 + cmd_params->dpdmux_id = cpu_to_le32(object_id);
17060 +
17061 + /* send command to mc*/
17062 + return mc_send_command(mc_io, &cmd);
17063 +}
17064 +
17065 +/**
17066 + * dpdmux_enable() - Enable DPDMUX functionality
17067 + * @mc_io: Pointer to MC portal's I/O object
17068 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17069 + * @token: Token of DPDMUX object
17070 + *
17071 + * Return: '0' on Success; Error code otherwise.
17072 + */
17073 +int dpdmux_enable(struct fsl_mc_io *mc_io,
17074 + u32 cmd_flags,
17075 + u16 token)
17076 +{
17077 + struct mc_command cmd = { 0 };
17078 +
17079 + /* prepare command */
17080 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_ENABLE,
17081 + cmd_flags,
17082 + token);
17083 +
17084 + /* send command to mc*/
17085 + return mc_send_command(mc_io, &cmd);
17086 +}
17087 +
17088 +/**
17089 + * dpdmux_disable() - Disable DPDMUX functionality
17090 + * @mc_io: Pointer to MC portal's I/O object
17091 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17092 + * @token: Token of DPDMUX object
17093 + *
17094 + * Return: '0' on Success; Error code otherwise.
17095 + */
17096 +int dpdmux_disable(struct fsl_mc_io *mc_io,
17097 + u32 cmd_flags,
17098 + u16 token)
17099 +{
17100 + struct mc_command cmd = { 0 };
17101 +
17102 + /* prepare command */
17103 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DISABLE,
17104 + cmd_flags,
17105 + token);
17106 +
17107 + /* send command to mc*/
17108 + return mc_send_command(mc_io, &cmd);
17109 +}
17110 +
17111 +/**
17112 + * dpdmux_is_enabled() - Check if the DPDMUX is enabled.
17113 + * @mc_io: Pointer to MC portal's I/O object
17114 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17115 + * @token: Token of DPDMUX object
17116 + * @en: Returns '1' if object is enabled; '0' otherwise
17117 + *
17118 + * Return: '0' on Success; Error code otherwise.
17119 + */
17120 +int dpdmux_is_enabled(struct fsl_mc_io *mc_io,
17121 + u32 cmd_flags,
17122 + u16 token,
17123 + int *en)
17124 +{
17125 + struct mc_command cmd = { 0 };
17126 + struct dpdmux_rsp_is_enabled *rsp_params;
17127 + int err;
17128 +
17129 + /* prepare command */
17130 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IS_ENABLED,
17131 + cmd_flags,
17132 + token);
17133 +
17134 + /* send command to mc*/
17135 + err = mc_send_command(mc_io, &cmd);
17136 + if (err)
17137 + return err;
17138 +
17139 + /* retrieve response parameters */
17140 + rsp_params = (struct dpdmux_rsp_is_enabled *)cmd.params;
17141 + *en = dpdmux_get_field(rsp_params->en, ENABLE);
17142 +
17143 + return 0;
17144 +}
17145 +
17146 +/**
17147 + * dpdmux_reset() - Reset the DPDMUX, returns the object to initial state.
17148 + * @mc_io: Pointer to MC portal's I/O object
17149 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17150 + * @token: Token of DPDMUX object
17151 + *
17152 + * Return: '0' on Success; Error code otherwise.
17153 + */
17154 +int dpdmux_reset(struct fsl_mc_io *mc_io,
17155 + u32 cmd_flags,
17156 + u16 token)
17157 +{
17158 + struct mc_command cmd = { 0 };
17159 +
17160 + /* prepare command */
17161 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_RESET,
17162 + cmd_flags,
17163 + token);
17164 +
17165 + /* send command to mc*/
17166 + return mc_send_command(mc_io, &cmd);
17167 +}
17168 +
17169 +/**
17170 + * dpdmux_set_irq_enable() - Set overall interrupt state.
17171 + * @mc_io: Pointer to MC portal's I/O object
17172 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17173 + * @token: Token of DPDMUX object
17174 + * @irq_index: The interrupt index to configure
17175 + * @en: Interrupt state - enable = 1, disable = 0
17176 + *
17177 + * Allows GPP software to control when interrupts are generated.
17178 + * Each interrupt can have up to 32 causes. The enable/disable control's the
17179 + * overall interrupt state. if the interrupt is disabled no causes will cause
17180 + * an interrupt.
17181 + *
17182 + * Return: '0' on Success; Error code otherwise.
17183 + */
17184 +int dpdmux_set_irq_enable(struct fsl_mc_io *mc_io,
17185 + u32 cmd_flags,
17186 + u16 token,
17187 + u8 irq_index,
17188 + u8 en)
17189 +{
17190 + struct mc_command cmd = { 0 };
17191 + struct dpdmux_cmd_set_irq_enable *cmd_params;
17192 +
17193 + /* prepare command */
17194 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_IRQ_ENABLE,
17195 + cmd_flags,
17196 + token);
17197 + cmd_params = (struct dpdmux_cmd_set_irq_enable *)cmd.params;
17198 + cmd_params->enable = en;
17199 + cmd_params->irq_index = irq_index;
17200 +
17201 + /* send command to mc*/
17202 + return mc_send_command(mc_io, &cmd);
17203 +}
17204 +
17205 +/**
17206 + * dpdmux_get_irq_enable() - Get overall interrupt state.
17207 + * @mc_io: Pointer to MC portal's I/O object
17208 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17209 + * @token: Token of DPDMUX object
17210 + * @irq_index: The interrupt index to configure
17211 + * @en: Returned interrupt state - enable = 1, disable = 0
17212 + *
17213 + * Return: '0' on Success; Error code otherwise.
17214 + */
17215 +int dpdmux_get_irq_enable(struct fsl_mc_io *mc_io,
17216 + u32 cmd_flags,
17217 + u16 token,
17218 + u8 irq_index,
17219 + u8 *en)
17220 +{
17221 + struct mc_command cmd = { 0 };
17222 + struct dpdmux_cmd_get_irq_enable *cmd_params;
17223 + struct dpdmux_rsp_get_irq_enable *rsp_params;
17224 + int err;
17225 +
17226 + /* prepare command */
17227 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_ENABLE,
17228 + cmd_flags,
17229 + token);
17230 + cmd_params = (struct dpdmux_cmd_get_irq_enable *)cmd.params;
17231 + cmd_params->irq_index = irq_index;
17232 +
17233 + /* send command to mc*/
17234 + err = mc_send_command(mc_io, &cmd);
17235 + if (err)
17236 + return err;
17237 +
17238 + /* retrieve response parameters */
17239 + rsp_params = (struct dpdmux_rsp_get_irq_enable *)cmd.params;
17240 + *en = rsp_params->enable;
17241 +
17242 + return 0;
17243 +}
17244 +
17245 +/**
17246 + * dpdmux_set_irq_mask() - Set interrupt mask.
17247 + * @mc_io: Pointer to MC portal's I/O object
17248 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17249 + * @token: Token of DPDMUX object
17250 + * @irq_index: The interrupt index to configure
17251 + * @mask: event mask to trigger interrupt;
17252 + * each bit:
17253 + * 0 = ignore event
17254 + * 1 = consider event for asserting IRQ
17255 + *
17256 + * Every interrupt can have up to 32 causes and the interrupt model supports
17257 + * masking/unmasking each cause independently
17258 + *
17259 + * Return: '0' on Success; Error code otherwise.
17260 + */
17261 +int dpdmux_set_irq_mask(struct fsl_mc_io *mc_io,
17262 + u32 cmd_flags,
17263 + u16 token,
17264 + u8 irq_index,
17265 + u32 mask)
17266 +{
17267 + struct mc_command cmd = { 0 };
17268 + struct dpdmux_cmd_set_irq_mask *cmd_params;
17269 +
17270 + /* prepare command */
17271 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_IRQ_MASK,
17272 + cmd_flags,
17273 + token);
17274 + cmd_params = (struct dpdmux_cmd_set_irq_mask *)cmd.params;
17275 + cmd_params->mask = cpu_to_le32(mask);
17276 + cmd_params->irq_index = irq_index;
17277 +
17278 + /* send command to mc*/
17279 + return mc_send_command(mc_io, &cmd);
17280 +}
17281 +
17282 +/**
17283 + * dpdmux_get_irq_mask() - Get interrupt mask.
17284 + * @mc_io: Pointer to MC portal's I/O object
17285 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17286 + * @token: Token of DPDMUX object
17287 + * @irq_index: The interrupt index to configure
17288 + * @mask: Returned event mask to trigger interrupt
17289 + *
17290 + * Every interrupt can have up to 32 causes and the interrupt model supports
17291 + * masking/unmasking each cause independently
17292 + *
17293 + * Return: '0' on Success; Error code otherwise.
17294 + */
17295 +int dpdmux_get_irq_mask(struct fsl_mc_io *mc_io,
17296 + u32 cmd_flags,
17297 + u16 token,
17298 + u8 irq_index,
17299 + u32 *mask)
17300 +{
17301 + struct mc_command cmd = { 0 };
17302 + struct dpdmux_cmd_get_irq_mask *cmd_params;
17303 + struct dpdmux_rsp_get_irq_mask *rsp_params;
17304 + int err;
17305 +
17306 + /* prepare command */
17307 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_MASK,
17308 + cmd_flags,
17309 + token);
17310 + cmd_params = (struct dpdmux_cmd_get_irq_mask *)cmd.params;
17311 + cmd_params->irq_index = irq_index;
17312 +
17313 + /* send command to mc*/
17314 + err = mc_send_command(mc_io, &cmd);
17315 + if (err)
17316 + return err;
17317 +
17318 + /* retrieve response parameters */
17319 + rsp_params = (struct dpdmux_rsp_get_irq_mask *)cmd.params;
17320 + *mask = le32_to_cpu(rsp_params->mask);
17321 +
17322 + return 0;
17323 +}
17324 +
17325 +/**
17326 + * dpdmux_get_irq_status() - Get the current status of any pending interrupts.
17327 + * @mc_io: Pointer to MC portal's I/O object
17328 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17329 + * @token: Token of DPDMUX object
17330 + * @irq_index: The interrupt index to configure
17331 + * @status: Returned interrupts status - one bit per cause:
17332 + * 0 = no interrupt pending
17333 + * 1 = interrupt pending
17334 + *
17335 + * Return: '0' on Success; Error code otherwise.
17336 + */
17337 +int dpdmux_get_irq_status(struct fsl_mc_io *mc_io,
17338 + u32 cmd_flags,
17339 + u16 token,
17340 + u8 irq_index,
17341 + u32 *status)
17342 +{
17343 + struct mc_command cmd = { 0 };
17344 + struct dpdmux_cmd_get_irq_status *cmd_params;
17345 + struct dpdmux_rsp_get_irq_status *rsp_params;
17346 + int err;
17347 +
17348 + /* prepare command */
17349 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_STATUS,
17350 + cmd_flags,
17351 + token);
17352 + cmd_params = (struct dpdmux_cmd_get_irq_status *)cmd.params;
17353 + cmd_params->status = cpu_to_le32(*status);
17354 + cmd_params->irq_index = irq_index;
17355 +
17356 + /* send command to mc*/
17357 + err = mc_send_command(mc_io, &cmd);
17358 + if (err)
17359 + return err;
17360 +
17361 + /* retrieve response parameters */
17362 + rsp_params = (struct dpdmux_rsp_get_irq_status *)cmd.params;
17363 + *status = le32_to_cpu(rsp_params->status);
17364 +
17365 + return 0;
17366 +}
17367 +
17368 +/**
17369 + * dpdmux_clear_irq_status() - Clear a pending interrupt's status
17370 + * @mc_io: Pointer to MC portal's I/O object
17371 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17372 + * @token: Token of DPDMUX object
17373 + * @irq_index: The interrupt index to configure
17374 + * @status: bits to clear (W1C) - one bit per cause:
17375 + * 0 = don't change
17376 + * 1 = clear status bit
17377 + *
17378 + * Return: '0' on Success; Error code otherwise.
17379 + */
17380 +int dpdmux_clear_irq_status(struct fsl_mc_io *mc_io,
17381 + u32 cmd_flags,
17382 + u16 token,
17383 + u8 irq_index,
17384 + u32 status)
17385 +{
17386 + struct mc_command cmd = { 0 };
17387 + struct dpdmux_cmd_clear_irq_status *cmd_params;
17388 +
17389 + /* prepare command */
17390 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CLEAR_IRQ_STATUS,
17391 + cmd_flags,
17392 + token);
17393 + cmd_params = (struct dpdmux_cmd_clear_irq_status *)cmd.params;
17394 + cmd_params->status = cpu_to_le32(status);
17395 + cmd_params->irq_index = irq_index;
17396 +
17397 + /* send command to mc*/
17398 + return mc_send_command(mc_io, &cmd);
17399 +}
17400 +
17401 +/**
17402 + * dpdmux_get_attributes() - Retrieve DPDMUX attributes
17403 + * @mc_io: Pointer to MC portal's I/O object
17404 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17405 + * @token: Token of DPDMUX object
17406 + * @attr: Returned object's attributes
17407 + *
17408 + * Return: '0' on Success; Error code otherwise.
17409 + */
17410 +int dpdmux_get_attributes(struct fsl_mc_io *mc_io,
17411 + u32 cmd_flags,
17412 + u16 token,
17413 + struct dpdmux_attr *attr)
17414 +{
17415 + struct mc_command cmd = { 0 };
17416 + struct dpdmux_rsp_get_attr *rsp_params;
17417 + int err;
17418 +
17419 + /* prepare command */
17420 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_ATTR,
17421 + cmd_flags,
17422 + token);
17423 +
17424 + /* send command to mc*/
17425 + err = mc_send_command(mc_io, &cmd);
17426 + if (err)
17427 + return err;
17428 +
17429 + /* retrieve response parameters */
17430 + rsp_params = (struct dpdmux_rsp_get_attr *)cmd.params;
17431 + attr->id = le32_to_cpu(rsp_params->id);
17432 + attr->options = le64_to_cpu(rsp_params->options);
17433 + attr->method = rsp_params->method;
17434 + attr->manip = rsp_params->manip;
17435 + attr->num_ifs = le16_to_cpu(rsp_params->num_ifs);
17436 + attr->mem_size = le16_to_cpu(rsp_params->mem_size);
17437 +
17438 + return 0;
17439 +}
17440 +
17441 +/**
17442 + * dpdmux_if_enable() - Enable Interface
17443 + * @mc_io: Pointer to MC portal's I/O object
17444 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17445 + * @token: Token of DPDMUX object
17446 + * @if_id: Interface Identifier
17447 + *
17448 + * Return: Completion status. '0' on Success; Error code otherwise.
17449 + */
17450 +int dpdmux_if_enable(struct fsl_mc_io *mc_io,
17451 + u32 cmd_flags,
17452 + u16 token,
17453 + u16 if_id)
17454 +{
17455 + struct dpdmux_cmd_if *cmd_params;
17456 + struct mc_command cmd = { 0 };
17457 +
17458 + /* prepare command */
17459 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_ENABLE,
17460 + cmd_flags,
17461 + token);
17462 + cmd_params = (struct dpdmux_cmd_if *)cmd.params;
17463 + cmd_params->if_id = cpu_to_le16(if_id);
17464 +
17465 + /* send command to mc*/
17466 + return mc_send_command(mc_io, &cmd);
17467 +}
17468 +
17469 +/**
17470 + * dpdmux_if_disable() - Disable Interface
17471 + * @mc_io: Pointer to MC portal's I/O object
17472 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17473 + * @token: Token of DPDMUX object
17474 + * @if_id: Interface Identifier
17475 + *
17476 + * Return: Completion status. '0' on Success; Error code otherwise.
17477 + */
17478 +int dpdmux_if_disable(struct fsl_mc_io *mc_io,
17479 + u32 cmd_flags,
17480 + u16 token,
17481 + u16 if_id)
17482 +{
17483 + struct dpdmux_cmd_if *cmd_params;
17484 + struct mc_command cmd = { 0 };
17485 +
17486 + /* prepare command */
17487 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_DISABLE,
17488 + cmd_flags,
17489 + token);
17490 + cmd_params = (struct dpdmux_cmd_if *)cmd.params;
17491 + cmd_params->if_id = cpu_to_le16(if_id);
17492 +
17493 + /* send command to mc*/
17494 + return mc_send_command(mc_io, &cmd);
17495 +}
17496 +
17497 +/**
17498 + * dpdmux_set_max_frame_length() - Set the maximum frame length in DPDMUX
17499 + * @mc_io: Pointer to MC portal's I/O object
17500 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17501 + * @token: Token of DPDMUX object
17502 + * @max_frame_length: The required maximum frame length
17503 + *
17504 + * Update the maximum frame length on all DMUX interfaces.
17505 + * In case of VEPA, the maximum frame length on all dmux interfaces
17506 + * will be updated with the minimum value of the mfls of the connected
17507 + * dpnis and the actual value of dmux mfl.
17508 + *
17509 + * Return: '0' on Success; Error code otherwise.
17510 + */
17511 +int dpdmux_set_max_frame_length(struct fsl_mc_io *mc_io,
17512 + u32 cmd_flags,
17513 + u16 token,
17514 + u16 max_frame_length)
17515 +{
17516 + struct mc_command cmd = { 0 };
17517 + struct dpdmux_cmd_set_max_frame_length *cmd_params;
17518 +
17519 + /* prepare command */
17520 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_MAX_FRAME_LENGTH,
17521 + cmd_flags,
17522 + token);
17523 + cmd_params = (struct dpdmux_cmd_set_max_frame_length *)cmd.params;
17524 + cmd_params->max_frame_length = cpu_to_le16(max_frame_length);
17525 +
17526 + /* send command to mc*/
17527 + return mc_send_command(mc_io, &cmd);
17528 +}
17529 +
17530 +/**
17531 + * dpdmux_ul_reset_counters() - Function resets the uplink counter
17532 + * @mc_io: Pointer to MC portal's I/O object
17533 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17534 + * @token: Token of DPDMUX object
17535 + *
17536 + * Return: '0' on Success; Error code otherwise.
17537 + */
17538 +int dpdmux_ul_reset_counters(struct fsl_mc_io *mc_io,
17539 + u32 cmd_flags,
17540 + u16 token)
17541 +{
17542 + struct mc_command cmd = { 0 };
17543 +
17544 + /* prepare command */
17545 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_UL_RESET_COUNTERS,
17546 + cmd_flags,
17547 + token);
17548 +
17549 + /* send command to mc*/
17550 + return mc_send_command(mc_io, &cmd);
17551 +}
17552 +
17553 +/**
17554 + * dpdmux_if_set_accepted_frames() - Set the accepted frame types
17555 + * @mc_io: Pointer to MC portal's I/O object
17556 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17557 + * @token: Token of DPDMUX object
17558 + * @if_id: Interface ID (0 for uplink, or 1-num_ifs);
17559 + * @cfg: Frame types configuration
17560 + *
17561 + * if 'DPDMUX_ADMIT_ONLY_VLAN_TAGGED' is set - untagged frames or
17562 + * priority-tagged frames are discarded.
17563 + * if 'DPDMUX_ADMIT_ONLY_UNTAGGED' is set - untagged frames or
17564 + * priority-tagged frames are accepted.
17565 + * if 'DPDMUX_ADMIT_ALL' is set (default mode) - all VLAN tagged,
17566 + * untagged and priority-tagged frame are accepted;
17567 + *
17568 + * Return: '0' on Success; Error code otherwise.
17569 + */
17570 +int dpdmux_if_set_accepted_frames(struct fsl_mc_io *mc_io,
17571 + u32 cmd_flags,
17572 + u16 token,
17573 + u16 if_id,
17574 + const struct dpdmux_accepted_frames *cfg)
17575 +{
17576 + struct mc_command cmd = { 0 };
17577 + struct dpdmux_cmd_if_set_accepted_frames *cmd_params;
17578 +
17579 + /* prepare command */
17580 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_ACCEPTED_FRAMES,
17581 + cmd_flags,
17582 + token);
17583 + cmd_params = (struct dpdmux_cmd_if_set_accepted_frames *)cmd.params;
17584 + cmd_params->if_id = cpu_to_le16(if_id);
17585 + dpdmux_set_field(cmd_params->frames_options, ACCEPTED_FRAMES_TYPE,
17586 + cfg->type);
17587 + dpdmux_set_field(cmd_params->frames_options, UNACCEPTED_FRAMES_ACTION,
17588 + cfg->unaccept_act);
17589 +
17590 + /* send command to mc*/
17591 + return mc_send_command(mc_io, &cmd);
17592 +}
17593 +
17594 +/**
17595 + * dpdmux_if_get_attributes() - Obtain DPDMUX interface attributes
17596 + * @mc_io: Pointer to MC portal's I/O object
17597 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17598 + * @token: Token of DPDMUX object
17599 + * @if_id: Interface ID (0 for uplink, or 1-num_ifs);
17600 + * @attr: Interface attributes
17601 + *
17602 + * Return: '0' on Success; Error code otherwise.
17603 + */
17604 +int dpdmux_if_get_attributes(struct fsl_mc_io *mc_io,
17605 + u32 cmd_flags,
17606 + u16 token,
17607 + u16 if_id,
17608 + struct dpdmux_if_attr *attr)
17609 +{
17610 + struct mc_command cmd = { 0 };
17611 + struct dpdmux_cmd_if *cmd_params;
17612 + struct dpdmux_rsp_if_get_attr *rsp_params;
17613 + int err;
17614 +
17615 + /* prepare command */
17616 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_ATTR,
17617 + cmd_flags,
17618 + token);
17619 + cmd_params = (struct dpdmux_cmd_if *)cmd.params;
17620 + cmd_params->if_id = cpu_to_le16(if_id);
17621 +
17622 + /* send command to mc*/
17623 + err = mc_send_command(mc_io, &cmd);
17624 + if (err)
17625 + return err;
17626 +
17627 + /* retrieve response parameters */
17628 + rsp_params = (struct dpdmux_rsp_if_get_attr *)cmd.params;
17629 + attr->rate = le32_to_cpu(rsp_params->rate);
17630 + attr->enabled = dpdmux_get_field(rsp_params->enabled, ENABLE);
17631 + attr->accept_frame_type =
17632 + dpdmux_get_field(rsp_params->accepted_frames_type,
17633 + ACCEPTED_FRAMES_TYPE);
17634 +
17635 + return 0;
17636 +}
17637 +
17638 +/**
17639 + * dpdmux_if_remove_l2_rule() - Remove L2 rule from DPDMUX table
17640 + * @mc_io: Pointer to MC portal's I/O object
17641 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17642 + * @token: Token of DPDMUX object
17643 + * @if_id: Destination interface ID
17644 + * @rule: L2 rule
17645 + *
17646 + * Function removes a L2 rule from DPDMUX table
17647 + * or adds an interface to an existing multicast address
17648 + *
17649 + * Return: '0' on Success; Error code otherwise.
17650 + */
17651 +int dpdmux_if_remove_l2_rule(struct fsl_mc_io *mc_io,
17652 + u32 cmd_flags,
17653 + u16 token,
17654 + u16 if_id,
17655 + const struct dpdmux_l2_rule *rule)
17656 +{
17657 + struct mc_command cmd = { 0 };
17658 + struct dpdmux_cmd_if_l2_rule *cmd_params;
17659 +
17660 + /* prepare command */
17661 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_REMOVE_L2_RULE,
17662 + cmd_flags,
17663 + token);
17664 + cmd_params = (struct dpdmux_cmd_if_l2_rule *)cmd.params;
17665 + cmd_params->if_id = cpu_to_le16(if_id);
17666 + cmd_params->vlan_id = cpu_to_le16(rule->vlan_id);
17667 + cmd_params->mac_addr5 = rule->mac_addr[5];
17668 + cmd_params->mac_addr4 = rule->mac_addr[4];
17669 + cmd_params->mac_addr3 = rule->mac_addr[3];
17670 + cmd_params->mac_addr2 = rule->mac_addr[2];
17671 + cmd_params->mac_addr1 = rule->mac_addr[1];
17672 + cmd_params->mac_addr0 = rule->mac_addr[0];
17673 +
17674 + /* send command to mc*/
17675 + return mc_send_command(mc_io, &cmd);
17676 +}
17677 +
17678 +/**
17679 + * dpdmux_if_add_l2_rule() - Add L2 rule into DPDMUX table
17680 + * @mc_io: Pointer to MC portal's I/O object
17681 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17682 + * @token: Token of DPDMUX object
17683 + * @if_id: Destination interface ID
17684 + * @rule: L2 rule
17685 + *
17686 + * Function adds a L2 rule into DPDMUX table
17687 + * or adds an interface to an existing multicast address
17688 + *
17689 + * Return: '0' on Success; Error code otherwise.
17690 + */
17691 +int dpdmux_if_add_l2_rule(struct fsl_mc_io *mc_io,
17692 + u32 cmd_flags,
17693 + u16 token,
17694 + u16 if_id,
17695 + const struct dpdmux_l2_rule *rule)
17696 +{
17697 + struct mc_command cmd = { 0 };
17698 + struct dpdmux_cmd_if_l2_rule *cmd_params;
17699 +
17700 + /* prepare command */
17701 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_ADD_L2_RULE,
17702 + cmd_flags,
17703 + token);
17704 + cmd_params = (struct dpdmux_cmd_if_l2_rule *)cmd.params;
17705 + cmd_params->if_id = cpu_to_le16(if_id);
17706 + cmd_params->vlan_id = cpu_to_le16(rule->vlan_id);
17707 + cmd_params->mac_addr5 = rule->mac_addr[5];
17708 + cmd_params->mac_addr4 = rule->mac_addr[4];
17709 + cmd_params->mac_addr3 = rule->mac_addr[3];
17710 + cmd_params->mac_addr2 = rule->mac_addr[2];
17711 + cmd_params->mac_addr1 = rule->mac_addr[1];
17712 + cmd_params->mac_addr0 = rule->mac_addr[0];
17713 +
17714 + /* send command to mc*/
17715 + return mc_send_command(mc_io, &cmd);
17716 +}
17717 +
17718 +/**
17719 + * dpdmux_if_get_counter() - Functions obtains specific counter of an interface
17720 + * @mc_io: Pointer to MC portal's I/O object
17721 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17722 + * @token: Token of DPDMUX object
17723 + * @if_id: Interface Id
17724 + * @counter_type: counter type
17725 + * @counter: Returned specific counter information
17726 + *
17727 + * Return: '0' on Success; Error code otherwise.
17728 + */
17729 +int dpdmux_if_get_counter(struct fsl_mc_io *mc_io,
17730 + u32 cmd_flags,
17731 + u16 token,
17732 + u16 if_id,
17733 + enum dpdmux_counter_type counter_type,
17734 + u64 *counter)
17735 +{
17736 + struct mc_command cmd = { 0 };
17737 + struct dpdmux_cmd_if_get_counter *cmd_params;
17738 + struct dpdmux_rsp_if_get_counter *rsp_params;
17739 + int err;
17740 +
17741 + /* prepare command */
17742 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_COUNTER,
17743 + cmd_flags,
17744 + token);
17745 + cmd_params = (struct dpdmux_cmd_if_get_counter *)cmd.params;
17746 + cmd_params->if_id = cpu_to_le16(if_id);
17747 + cmd_params->counter_type = counter_type;
17748 +
17749 + /* send command to mc*/
17750 + err = mc_send_command(mc_io, &cmd);
17751 + if (err)
17752 + return err;
17753 +
17754 + /* retrieve response parameters */
17755 + rsp_params = (struct dpdmux_rsp_if_get_counter *)cmd.params;
17756 + *counter = le64_to_cpu(rsp_params->counter);
17757 +
17758 + return 0;
17759 +}
17760 +
17761 +/**
17762 + * dpdmux_if_set_link_cfg() - set the link configuration.
17763 + * @mc_io: Pointer to MC portal's I/O object
17764 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17765 + * @token: Token of DPSW object
17766 + * @if_id: interface id
17767 + * @cfg: Link configuration
17768 + *
17769 + * Return: '0' on Success; Error code otherwise.
17770 + */
17771 +int dpdmux_if_set_link_cfg(struct fsl_mc_io *mc_io,
17772 + u32 cmd_flags,
17773 + u16 token,
17774 + u16 if_id,
17775 + struct dpdmux_link_cfg *cfg)
17776 +{
17777 + struct mc_command cmd = { 0 };
17778 + struct dpdmux_cmd_if_set_link_cfg *cmd_params;
17779 +
17780 + /* prepare command */
17781 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_LINK_CFG,
17782 + cmd_flags,
17783 + token);
17784 + cmd_params = (struct dpdmux_cmd_if_set_link_cfg *)cmd.params;
17785 + cmd_params->if_id = cpu_to_le16(if_id);
17786 + cmd_params->rate = cpu_to_le32(cfg->rate);
17787 + cmd_params->options = cpu_to_le64(cfg->options);
17788 +
17789 + /* send command to mc*/
17790 + return mc_send_command(mc_io, &cmd);
17791 +}
17792 +
17793 +/**
17794 + * dpdmux_if_get_link_state - Return the link state
17795 + * @mc_io: Pointer to MC portal's I/O object
17796 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17797 + * @token: Token of DPSW object
17798 + * @if_id: interface id
17799 + * @state: link state
17800 + *
17801 + * @returns '0' on Success; Error code otherwise.
17802 + */
17803 +int dpdmux_if_get_link_state(struct fsl_mc_io *mc_io,
17804 + u32 cmd_flags,
17805 + u16 token,
17806 + u16 if_id,
17807 + struct dpdmux_link_state *state)
17808 +{
17809 + struct mc_command cmd = { 0 };
17810 + struct dpdmux_cmd_if_get_link_state *cmd_params;
17811 + struct dpdmux_rsp_if_get_link_state *rsp_params;
17812 + int err;
17813 +
17814 + /* prepare command */
17815 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_LINK_STATE,
17816 + cmd_flags,
17817 + token);
17818 + cmd_params = (struct dpdmux_cmd_if_get_link_state *)cmd.params;
17819 + cmd_params->if_id = cpu_to_le16(if_id);
17820 +
17821 + /* send command to mc*/
17822 + err = mc_send_command(mc_io, &cmd);
17823 + if (err)
17824 + return err;
17825 +
17826 + /* retrieve response parameters */
17827 + rsp_params = (struct dpdmux_rsp_if_get_link_state *)cmd.params;
17828 + state->rate = le32_to_cpu(rsp_params->rate);
17829 + state->options = le64_to_cpu(rsp_params->options);
17830 + state->up = dpdmux_get_field(rsp_params->up, ENABLE);
17831 +
17832 + return 0;
17833 +}
17834 +
17835 +/**
17836 + * dpdmux_set_custom_key - Set a custom classification key.
17837 + *
17838 + * This API is only available for DPDMUX instance created with
17839 + * DPDMUX_METHOD_CUSTOM. This API must be called before populating the
17840 + * classification table using dpdmux_add_custom_cls_entry.
17841 + *
17842 + * Calls to dpdmux_set_custom_key remove all existing classification entries
17843 + * that may have been added previously using dpdmux_add_custom_cls_entry.
17844 + *
17845 + * @mc_io: Pointer to MC portal's I/O object
17846 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17847 + * @token: Token of DPSW object
17848 + * @if_id: interface id
17849 + * @key_cfg_iova: DMA address of a configuration structure set up using
17850 + * dpkg_prepare_key_cfg. Maximum key size is 24 bytes.
17851 + *
17852 + * @returns '0' on Success; Error code otherwise.
17853 + */
17854 +int dpdmux_set_custom_key(struct fsl_mc_io *mc_io,
17855 + u32 cmd_flags,
17856 + u16 token,
17857 + u64 key_cfg_iova)
17858 +{
17859 + struct dpdmux_set_custom_key *cmd_params;
17860 + struct mc_command cmd = { 0 };
17861 +
17862 + /* prepare command */
17863 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_CUSTOM_KEY,
17864 + cmd_flags,
17865 + token);
17866 + cmd_params = (struct dpdmux_set_custom_key *)cmd.params;
17867 + cmd_params->key_cfg_iova = cpu_to_le64(key_cfg_iova);
17868 +
17869 + /* send command to mc*/
17870 + return mc_send_command(mc_io, &cmd);
17871 +}
17872 +
17873 +/**
17874 + * dpdmux_add_custom_cls_entry - Adds a custom classification entry.
17875 + *
17876 + * This API is only available for DPDMUX instances created with
17877 + * DPDMUX_METHOD_CUSTOM. Before calling this function a classification key
17878 + * composition rule must be set up using dpdmux_set_custom_key.
17879 + *
17880 + * @mc_io: Pointer to MC portal's I/O object
17881 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17882 + * @token: Token of DPSW object
17883 + * @rule: Classification rule to insert. Rules cannot be duplicated, if a
17884 + * matching rule already exists, the action will be replaced.
17885 + * @action: Action to perform for matching traffic.
17886 + *
17887 + * @returns '0' on Success; Error code otherwise.
17888 + */
17889 +int dpdmux_add_custom_cls_entry(struct fsl_mc_io *mc_io,
17890 + u32 cmd_flags,
17891 + u16 token,
17892 + struct dpdmux_rule_cfg *rule,
17893 + struct dpdmux_cls_action *action)
17894 +{
17895 + struct dpdmux_cmd_add_custom_cls_entry *cmd_params;
17896 + struct mc_command cmd = { 0 };
17897 +
17898 + /* prepare command */
17899 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_ADD_CUSTOM_CLS_ENTRY,
17900 + cmd_flags,
17901 + token);
17902 +
17903 + cmd_params = (struct dpdmux_cmd_add_custom_cls_entry *)cmd.params;
17904 + cmd_params->key_size = rule->key_size;
17905 + cmd_params->dest_if = cpu_to_le16(action->dest_if);
17906 + cmd_params->key_iova = cpu_to_le64(rule->key_iova);
17907 + cmd_params->mask_iova = cpu_to_le64(rule->mask_iova);
17908 +
17909 + /* send command to mc*/
17910 + return mc_send_command(mc_io, &cmd);
17911 +}
17912 +
17913 +/**
17914 + * dpdmux_remove_custom_cls_entry - Removes a custom classification entry.
17915 + *
17916 + * This API is only available for DPDMUX instances created with
17917 + * DPDMUX_METHOD_CUSTOM. The API can be used to remove classification
17918 + * entries previously inserted using dpdmux_add_custom_cls_entry.
17919 + *
17920 + * @mc_io: Pointer to MC portal's I/O object
17921 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17922 + * @token: Token of DPSW object
17923 + * @rule: Classification rule to remove
17924 + *
17925 + * @returns '0' on Success; Error code otherwise.
17926 + */
17927 +int dpdmux_remove_custom_cls_entry(struct fsl_mc_io *mc_io,
17928 + u32 cmd_flags,
17929 + u16 token,
17930 + struct dpdmux_rule_cfg *rule)
17931 +{
17932 + struct dpdmux_cmd_remove_custom_cls_entry *cmd_params;
17933 + struct mc_command cmd = { 0 };
17934 +
17935 + /* prepare command */
17936 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_REMOVE_CUSTOM_CLS_ENTRY,
17937 + cmd_flags,
17938 + token);
17939 + cmd_params = (struct dpdmux_cmd_remove_custom_cls_entry *)cmd.params;
17940 + cmd_params->key_size = rule->key_size;
17941 + cmd_params->key_iova = cpu_to_le64(rule->key_iova);
17942 + cmd_params->mask_iova = cpu_to_le64(rule->mask_iova);
17943 +
17944 + /* send command to mc*/
17945 + return mc_send_command(mc_io, &cmd);
17946 +}
17947 +
17948 +/**
17949 + * dpdmux_get_api_version() - Get Data Path Demux API version
17950 + * @mc_io: Pointer to MC portal's I/O object
17951 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17952 + * @major_ver: Major version of data path demux API
17953 + * @minor_ver: Minor version of data path demux API
17954 + *
17955 + * Return: '0' on Success; Error code otherwise.
17956 + */
17957 +int dpdmux_get_api_version(struct fsl_mc_io *mc_io,
17958 + u32 cmd_flags,
17959 + u16 *major_ver,
17960 + u16 *minor_ver)
17961 +{
17962 + struct mc_command cmd = { 0 };
17963 + struct dpdmux_rsp_get_api_version *rsp_params;
17964 + int err;
17965 +
17966 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_API_VERSION,
17967 + cmd_flags,
17968 + 0);
17969 +
17970 + err = mc_send_command(mc_io, &cmd);
17971 + if (err)
17972 + return err;
17973 +
17974 + rsp_params = (struct dpdmux_rsp_get_api_version *)cmd.params;
17975 + *major_ver = le16_to_cpu(rsp_params->major);
17976 + *minor_ver = le16_to_cpu(rsp_params->minor);
17977 +
17978 + return 0;
17979 +}
17980 diff --git a/drivers/staging/fsl-dpaa2/evb/dpdmux.h b/drivers/staging/fsl-dpaa2/evb/dpdmux.h
17981 new file mode 100644
17982 index 00000000..a6ccc7ef
17983 --- /dev/null
17984 +++ b/drivers/staging/fsl-dpaa2/evb/dpdmux.h
17985 @@ -0,0 +1,453 @@
17986 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
17987 + *
17988 + * Redistribution and use in source and binary forms, with or without
17989 + * modification, are permitted provided that the following conditions are met:
17990 + * * Redistributions of source code must retain the above copyright
17991 + * notice, this list of conditions and the following disclaimer.
17992 + * * Redistributions in binary form must reproduce the above copyright
17993 + * notice, this list of conditions and the following disclaimer in the
17994 + * documentation and/or other materials provided with the distribution.
17995 + * * Neither the name of the above-listed copyright holders nor the
17996 + * names of any contributors may be used to endorse or promote products
17997 + * derived from this software without specific prior written permission.
17998 + *
17999 + *
18000 + * ALTERNATIVELY, this software may be distributed under the terms of the
18001 + * GNU General Public License ("GPL") as published by the Free Software
18002 + * Foundation, either version 2 of that License or (at your option) any
18003 + * later version.
18004 + *
18005 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18006 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18007 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18008 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
18009 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18010 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
18011 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
18012 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
18013 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
18014 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
18015 + * POSSIBILITY OF SUCH DAMAGE.
18016 + */
18017 +#ifndef __FSL_DPDMUX_H
18018 +#define __FSL_DPDMUX_H
18019 +
18020 +struct fsl_mc_io;
18021 +
18022 +/* Data Path Demux API
18023 + * Contains API for handling DPDMUX topology and functionality
18024 + */
18025 +
18026 +int dpdmux_open(struct fsl_mc_io *mc_io,
18027 + u32 cmd_flags,
18028 + int dpdmux_id,
18029 + u16 *token);
18030 +
18031 +int dpdmux_close(struct fsl_mc_io *mc_io,
18032 + u32 cmd_flags,
18033 + u16 token);
18034 +
18035 +/**
18036 + * DPDMUX general options
18037 + */
18038 +
18039 +/**
18040 + * Enable bridging between internal interfaces
18041 + */
18042 +#define DPDMUX_OPT_BRIDGE_EN 0x0000000000000002ULL
18043 +
18044 +/**
18045 + * Mask support for classification
18046 + */
18047 +#define DPDMUX_OPT_CLS_MASK_SUPPORT 0x0000000000000020ULL
18048 +
18049 +#define DPDMUX_IRQ_INDEX_IF 0x0000
18050 +#define DPDMUX_IRQ_INDEX 0x0001
18051 +
18052 +/**
18053 + * IRQ event - Indicates that the link state changed
18054 + */
18055 +#define DPDMUX_IRQ_EVENT_LINK_CHANGED 0x0001
18056 +
18057 +/**
18058 + * enum dpdmux_manip - DPDMUX manipulation operations
18059 + * @DPDMUX_MANIP_NONE: No manipulation on frames
18060 + * @DPDMUX_MANIP_ADD_REMOVE_S_VLAN: Add S-VLAN on egress, remove it on ingress
18061 + */
18062 +enum dpdmux_manip {
18063 + DPDMUX_MANIP_NONE = 0x0,
18064 + DPDMUX_MANIP_ADD_REMOVE_S_VLAN = 0x1
18065 +};
18066 +
18067 +/**
18068 + * enum dpdmux_method - DPDMUX method options
18069 + * @DPDMUX_METHOD_NONE: no DPDMUX method
18070 + * @DPDMUX_METHOD_C_VLAN_MAC: DPDMUX based on C-VLAN and MAC address
18071 + * @DPDMUX_METHOD_MAC: DPDMUX based on MAC address
18072 + * @DPDMUX_METHOD_C_VLAN: DPDMUX based on C-VLAN
18073 + * @DPDMUX_METHOD_S_VLAN: DPDMUX based on S-VLAN
18074 + */
18075 +enum dpdmux_method {
18076 + DPDMUX_METHOD_NONE = 0x0,
18077 + DPDMUX_METHOD_C_VLAN_MAC = 0x1,
18078 + DPDMUX_METHOD_MAC = 0x2,
18079 + DPDMUX_METHOD_C_VLAN = 0x3,
18080 + DPDMUX_METHOD_S_VLAN = 0x4,
18081 + DPDMUX_METHOD_CUSTOM = 0x5
18082 +};
18083 +
18084 +/**
18085 + * struct dpdmux_cfg - DPDMUX configuration parameters
18086 + * @method: Defines the operation method for the DPDMUX address table
18087 + * @manip: Required manipulation operation
18088 + * @num_ifs: Number of interfaces (excluding the uplink interface)
18089 + * @adv: Advanced parameters; default is all zeros;
18090 + * use this structure to change default settings
18091 + */
18092 +struct dpdmux_cfg {
18093 + enum dpdmux_method method;
18094 + enum dpdmux_manip manip;
18095 + u16 num_ifs;
18096 + /**
18097 + * struct adv - Advanced parameters
18098 + * @options: DPDMUX options - combination of 'DPDMUX_OPT_<X>' flags
18099 + * @max_dmat_entries: Maximum entries in DPDMUX address table
18100 + * 0 - indicates default: 64 entries per interface.
18101 + * @max_mc_groups: Number of multicast groups in DPDMUX table
18102 + * 0 - indicates default: 32 multicast groups
18103 + * @max_vlan_ids: max vlan ids allowed in the system -
18104 + * relevant only case of working in mac+vlan method.
18105 + * 0 - indicates default 16 vlan ids.
18106 + */
18107 + struct {
18108 + u64 options;
18109 + u16 max_dmat_entries;
18110 + u16 max_mc_groups;
18111 + u16 max_vlan_ids;
18112 + } adv;
18113 +};
18114 +
18115 +int dpdmux_create(struct fsl_mc_io *mc_io,
18116 + u16 dprc_token,
18117 + u32 cmd_flags,
18118 + const struct dpdmux_cfg *cfg,
18119 + u32 *obj_id);
18120 +
18121 +int dpdmux_destroy(struct fsl_mc_io *mc_io,
18122 + u16 dprc_token,
18123 + u32 cmd_flags,
18124 + u32 object_id);
18125 +
18126 +int dpdmux_enable(struct fsl_mc_io *mc_io,
18127 + u32 cmd_flags,
18128 + u16 token);
18129 +
18130 +int dpdmux_disable(struct fsl_mc_io *mc_io,
18131 + u32 cmd_flags,
18132 + u16 token);
18133 +
18134 +int dpdmux_is_enabled(struct fsl_mc_io *mc_io,
18135 + u32 cmd_flags,
18136 + u16 token,
18137 + int *en);
18138 +
18139 +int dpdmux_reset(struct fsl_mc_io *mc_io,
18140 + u32 cmd_flags,
18141 + u16 token);
18142 +
18143 +int dpdmux_set_irq_enable(struct fsl_mc_io *mc_io,
18144 + u32 cmd_flags,
18145 + u16 token,
18146 + u8 irq_index,
18147 + u8 en);
18148 +
18149 +int dpdmux_get_irq_enable(struct fsl_mc_io *mc_io,
18150 + u32 cmd_flags,
18151 + u16 token,
18152 + u8 irq_index,
18153 + u8 *en);
18154 +
18155 +int dpdmux_set_irq_mask(struct fsl_mc_io *mc_io,
18156 + u32 cmd_flags,
18157 + u16 token,
18158 + u8 irq_index,
18159 + u32 mask);
18160 +
18161 +int dpdmux_get_irq_mask(struct fsl_mc_io *mc_io,
18162 + u32 cmd_flags,
18163 + u16 token,
18164 + u8 irq_index,
18165 + u32 *mask);
18166 +
18167 +int dpdmux_get_irq_status(struct fsl_mc_io *mc_io,
18168 + u32 cmd_flags,
18169 + u16 token,
18170 + u8 irq_index,
18171 + u32 *status);
18172 +
18173 +int dpdmux_clear_irq_status(struct fsl_mc_io *mc_io,
18174 + u32 cmd_flags,
18175 + u16 token,
18176 + u8 irq_index,
18177 + u32 status);
18178 +
18179 +/**
18180 + * struct dpdmux_attr - Structure representing DPDMUX attributes
18181 + * @id: DPDMUX object ID
18182 + * @options: Configuration options (bitmap)
18183 + * @method: DPDMUX address table method
18184 + * @manip: DPDMUX manipulation type
18185 + * @num_ifs: Number of interfaces (excluding the uplink interface)
18186 + * @mem_size: DPDMUX frame storage memory size
18187 + */
18188 +struct dpdmux_attr {
18189 + int id;
18190 + u64 options;
18191 + enum dpdmux_method method;
18192 + enum dpdmux_manip manip;
18193 + u16 num_ifs;
18194 + u16 mem_size;
18195 +};
18196 +
18197 +int dpdmux_get_attributes(struct fsl_mc_io *mc_io,
18198 + u32 cmd_flags,
18199 + u16 token,
18200 + struct dpdmux_attr *attr);
18201 +
18202 +int dpdmux_set_max_frame_length(struct fsl_mc_io *mc_io,
18203 + u32 cmd_flags,
18204 + u16 token,
18205 + u16 max_frame_length);
18206 +
18207 +/**
18208 + * enum dpdmux_counter_type - Counter types
18209 + * @DPDMUX_CNT_ING_FRAME: Counts ingress frames
18210 + * @DPDMUX_CNT_ING_BYTE: Counts ingress bytes
18211 + * @DPDMUX_CNT_ING_FLTR_FRAME: Counts filtered ingress frames
18212 + * @DPDMUX_CNT_ING_FRAME_DISCARD: Counts discarded ingress frames
18213 + * @DPDMUX_CNT_ING_MCAST_FRAME: Counts ingress multicast frames
18214 + * @DPDMUX_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes
18215 + * @DPDMUX_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames
18216 + * @DPDMUX_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes
18217 + * @DPDMUX_CNT_EGR_FRAME: Counts egress frames
18218 + * @DPDMUX_CNT_EGR_BYTE: Counts egress bytes
18219 + * @DPDMUX_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames
18220 + */
18221 +enum dpdmux_counter_type {
18222 + DPDMUX_CNT_ING_FRAME = 0x0,
18223 + DPDMUX_CNT_ING_BYTE = 0x1,
18224 + DPDMUX_CNT_ING_FLTR_FRAME = 0x2,
18225 + DPDMUX_CNT_ING_FRAME_DISCARD = 0x3,
18226 + DPDMUX_CNT_ING_MCAST_FRAME = 0x4,
18227 + DPDMUX_CNT_ING_MCAST_BYTE = 0x5,
18228 + DPDMUX_CNT_ING_BCAST_FRAME = 0x6,
18229 + DPDMUX_CNT_ING_BCAST_BYTES = 0x7,
18230 + DPDMUX_CNT_EGR_FRAME = 0x8,
18231 + DPDMUX_CNT_EGR_BYTE = 0x9,
18232 + DPDMUX_CNT_EGR_FRAME_DISCARD = 0xa
18233 +};
18234 +
18235 +/**
18236 + * enum dpdmux_accepted_frames_type - DPDMUX frame types
18237 + * @DPDMUX_ADMIT_ALL: The device accepts VLAN tagged, untagged and
18238 + * priority-tagged frames
18239 + * @DPDMUX_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or
18240 + * priority-tagged frames that are received on this
18241 + * interface
18242 + * @DPDMUX_ADMIT_ONLY_UNTAGGED: Untagged frames or priority-tagged frames
18243 + * received on this interface are accepted
18244 + */
18245 +enum dpdmux_accepted_frames_type {
18246 + DPDMUX_ADMIT_ALL = 0,
18247 + DPDMUX_ADMIT_ONLY_VLAN_TAGGED = 1,
18248 + DPDMUX_ADMIT_ONLY_UNTAGGED = 2
18249 +};
18250 +
18251 +/**
18252 + * enum dpdmux_action - DPDMUX action for un-accepted frames
18253 + * @DPDMUX_ACTION_DROP: Drop un-accepted frames
18254 + * @DPDMUX_ACTION_REDIRECT_TO_CTRL: Redirect un-accepted frames to the
18255 + * control interface
18256 + */
18257 +enum dpdmux_action {
18258 + DPDMUX_ACTION_DROP = 0,
18259 + DPDMUX_ACTION_REDIRECT_TO_CTRL = 1
18260 +};
18261 +
18262 +/**
18263 + * struct dpdmux_accepted_frames - Frame types configuration
18264 + * @type: Defines ingress accepted frames
18265 + * @unaccept_act: Defines action on frames not accepted
18266 + */
18267 +struct dpdmux_accepted_frames {
18268 + enum dpdmux_accepted_frames_type type;
18269 + enum dpdmux_action unaccept_act;
18270 +};
18271 +
18272 +int dpdmux_if_set_accepted_frames(struct fsl_mc_io *mc_io,
18273 + u32 cmd_flags,
18274 + u16 token,
18275 + u16 if_id,
18276 + const struct dpdmux_accepted_frames *cfg);
18277 +
18278 +/**
18279 + * struct dpdmux_if_attr - Structure representing frame types configuration
18280 + * @rate: Configured interface rate (in bits per second)
18281 + * @enabled: Indicates if interface is enabled
18282 + * @accept_frame_type: Indicates type of accepted frames for the interface
18283 + */
18284 +struct dpdmux_if_attr {
18285 + u32 rate;
18286 + int enabled;
18287 + enum dpdmux_accepted_frames_type accept_frame_type;
18288 +};
18289 +
18290 +int dpdmux_if_get_attributes(struct fsl_mc_io *mc_io,
18291 + u32 cmd_flags,
18292 + u16 token,
18293 + u16 if_id,
18294 + struct dpdmux_if_attr *attr);
18295 +
18296 +int dpdmux_if_enable(struct fsl_mc_io *mc_io,
18297 + u32 cmd_flags,
18298 + u16 token,
18299 + u16 if_id);
18300 +
18301 +int dpdmux_if_disable(struct fsl_mc_io *mc_io,
18302 + u32 cmd_flags,
18303 + u16 token,
18304 + u16 if_id);
18305 +
18306 +/**
18307 + * struct dpdmux_l2_rule - Structure representing L2 rule
18308 + * @mac_addr: MAC address
18309 + * @vlan_id: VLAN ID
18310 + */
18311 +struct dpdmux_l2_rule {
18312 + u8 mac_addr[6];
18313 + u16 vlan_id;
18314 +};
18315 +
18316 +int dpdmux_if_remove_l2_rule(struct fsl_mc_io *mc_io,
18317 + u32 cmd_flags,
18318 + u16 token,
18319 + u16 if_id,
18320 + const struct dpdmux_l2_rule *rule);
18321 +
18322 +int dpdmux_if_add_l2_rule(struct fsl_mc_io *mc_io,
18323 + u32 cmd_flags,
18324 + u16 token,
18325 + u16 if_id,
18326 + const struct dpdmux_l2_rule *rule);
18327 +
18328 +int dpdmux_if_get_counter(struct fsl_mc_io *mc_io,
18329 + u32 cmd_flags,
18330 + u16 token,
18331 + u16 if_id,
18332 + enum dpdmux_counter_type counter_type,
18333 + u64 *counter);
18334 +
18335 +int dpdmux_ul_reset_counters(struct fsl_mc_io *mc_io,
18336 + u32 cmd_flags,
18337 + u16 token);
18338 +
18339 +/**
18340 + * Enable auto-negotiation
18341 + */
18342 +#define DPDMUX_LINK_OPT_AUTONEG 0x0000000000000001ULL
18343 +/**
18344 + * Enable half-duplex mode
18345 + */
18346 +#define DPDMUX_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
18347 +/**
18348 + * Enable pause frames
18349 + */
18350 +#define DPDMUX_LINK_OPT_PAUSE 0x0000000000000004ULL
18351 +/**
18352 + * Enable a-symmetric pause frames
18353 + */
18354 +#define DPDMUX_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
18355 +
18356 +/**
18357 + * struct dpdmux_link_cfg - Structure representing DPDMUX link configuration
18358 + * @rate: Rate
18359 + * @options: Mask of available options; use 'DPDMUX_LINK_OPT_<X>' values
18360 + */
18361 +struct dpdmux_link_cfg {
18362 + u32 rate;
18363 + u64 options;
18364 +};
18365 +
18366 +int dpdmux_if_set_link_cfg(struct fsl_mc_io *mc_io,
18367 + u32 cmd_flags,
18368 + u16 token,
18369 + u16 if_id,
18370 + struct dpdmux_link_cfg *cfg);
18371 +/**
18372 + * struct dpdmux_link_state - Structure representing DPDMUX link state
18373 + * @rate: Rate
18374 + * @options: Mask of available options; use 'DPDMUX_LINK_OPT_<X>' values
18375 + * @up: 0 - down, 1 - up
18376 + */
18377 +struct dpdmux_link_state {
18378 + u32 rate;
18379 + u64 options;
18380 + int up;
18381 +};
18382 +
18383 +int dpdmux_if_get_link_state(struct fsl_mc_io *mc_io,
18384 + u32 cmd_flags,
18385 + u16 token,
18386 + u16 if_id,
18387 + struct dpdmux_link_state *state);
18388 +
18389 +int dpdmux_set_custom_key(struct fsl_mc_io *mc_io,
18390 + u32 cmd_flags,
18391 + u16 token,
18392 + u64 key_cfg_iova);
18393 +
18394 +/**
18395 + * struct dpdmux_rule_cfg - Custom classification rule.
18396 + *
18397 + * @key_iova: DMA address of buffer storing the look-up value
18398 + * @mask_iova: DMA address of the mask used for TCAM classification
18399 + * @key_size: size, in bytes, of the look-up value. This must match the size
18400 + * of the look-up key defined using dpdmux_set_custom_key, otherwise the
18401 + * entry will never be hit
18402 + */
18403 +struct dpdmux_rule_cfg {
18404 + u64 key_iova;
18405 + u64 mask_iova;
18406 + u8 key_size;
18407 +};
18408 +
18409 +/**
18410 + * struct dpdmux_cls_action - Action to execute for frames matching the
18411 + * classification entry
18412 + *
18413 + * @dest_if: Interface to forward the frames to. Port numbering is similar to
18414 + * the one used to connect interfaces:
18415 + * - 0 is the uplink port,
18416 + * - all others are downlink ports.
18417 + */
18418 +struct dpdmux_cls_action {
18419 + u16 dest_if;
18420 +};
18421 +
18422 +int dpdmux_add_custom_cls_entry(struct fsl_mc_io *mc_io,
18423 + u32 cmd_flags,
18424 + u16 token,
18425 + struct dpdmux_rule_cfg *rule,
18426 + struct dpdmux_cls_action *action);
18427 +
18428 +int dpdmux_remove_custom_cls_entry(struct fsl_mc_io *mc_io,
18429 + u32 cmd_flags,
18430 + u16 token,
18431 + struct dpdmux_rule_cfg *rule);
18432 +
18433 +int dpdmux_get_api_version(struct fsl_mc_io *mc_io,
18434 + u32 cmd_flags,
18435 + u16 *major_ver,
18436 + u16 *minor_ver);
18437 +
18438 +#endif /* __FSL_DPDMUX_H */
18439 diff --git a/drivers/staging/fsl-dpaa2/evb/evb.c b/drivers/staging/fsl-dpaa2/evb/evb.c
18440 new file mode 100644
18441 index 00000000..9ee09b42
18442 --- /dev/null
18443 +++ b/drivers/staging/fsl-dpaa2/evb/evb.c
18444 @@ -0,0 +1,1350 @@
18445 +/* Copyright 2015 Freescale Semiconductor Inc.
18446 + *
18447 + * Redistribution and use in source and binary forms, with or without
18448 + * modification, are permitted provided that the following conditions are met:
18449 + * * Redistributions of source code must retain the above copyright
18450 + * notice, this list of conditions and the following disclaimer.
18451 + * * Redistributions in binary form must reproduce the above copyright
18452 + * notice, this list of conditions and the following disclaimer in the
18453 + * documentation and/or other materials provided with the distribution.
18454 + * * Neither the name of Freescale Semiconductor nor the
18455 + * names of its contributors may be used to endorse or promote products
18456 + * derived from this software without specific prior written permission.
18457 + *
18458 + *
18459 + * ALTERNATIVELY, this software may be distributed under the terms of the
18460 + * GNU General Public License ("GPL") as published by the Free Software
18461 + * Foundation, either version 2 of that License or (at your option) any
18462 + * later version.
18463 + *
18464 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
18465 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18466 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18467 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
18468 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
18469 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
18470 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
18471 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
18472 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
18473 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
18474 + */
18475 +#include <linux/module.h>
18476 +#include <linux/msi.h>
18477 +#include <linux/netdevice.h>
18478 +#include <linux/etherdevice.h>
18479 +#include <linux/rtnetlink.h>
18480 +#include <linux/if_vlan.h>
18481 +
18482 +#include <uapi/linux/if_bridge.h>
18483 +#include <net/netlink.h>
18484 +
18485 +#include "../../fsl-mc/include/mc.h"
18486 +
18487 +#include "dpdmux.h"
18488 +#include "dpdmux-cmd.h"
18489 +
18490 +static const char evb_drv_version[] = "0.1";
18491 +
18492 +/* Minimal supported DPDMUX version */
18493 +#define DPDMUX_MIN_VER_MAJOR 6
18494 +#define DPDMUX_MIN_VER_MINOR 0
18495 +
18496 +/* IRQ index */
18497 +#define DPDMUX_MAX_IRQ_NUM 2
18498 +
18499 +/* MAX FRAME LENGTH (currently 10k) */
18500 +#define EVB_MAX_FRAME_LENGTH (10 * 1024)
18501 +/* MIN FRAME LENGTH (64 bytes + 4 bytes CRC) */
18502 +#define EVB_MIN_FRAME_LENGTH 68
18503 +
18504 +struct evb_port_priv {
18505 + struct net_device *netdev;
18506 + struct list_head list;
18507 + u16 port_index;
18508 + struct evb_priv *evb_priv;
18509 + u8 vlans[VLAN_VID_MASK + 1];
18510 +};
18511 +
18512 +struct evb_priv {
18513 + /* keep first */
18514 + struct evb_port_priv uplink;
18515 +
18516 + struct fsl_mc_io *mc_io;
18517 + struct list_head port_list;
18518 + struct dpdmux_attr attr;
18519 + u16 mux_handle;
18520 + int dev_id;
18521 +};
18522 +
18523 +static int _evb_port_carrier_state_sync(struct net_device *netdev)
18524 +{
18525 + struct evb_port_priv *port_priv = netdev_priv(netdev);
18526 + struct dpdmux_link_state state;
18527 + int err;
18528 +
18529 + err = dpdmux_if_get_link_state(port_priv->evb_priv->mc_io, 0,
18530 + port_priv->evb_priv->mux_handle,
18531 + port_priv->port_index, &state);
18532 + if (unlikely(err)) {
18533 + netdev_err(netdev, "dpdmux_if_get_link_state() err %d\n", err);
18534 + return err;
18535 + }
18536 +
18537 + WARN_ONCE(state.up > 1, "Garbage read into link_state");
18538 +
18539 + if (state.up)
18540 + netif_carrier_on(port_priv->netdev);
18541 + else
18542 + netif_carrier_off(port_priv->netdev);
18543 +
18544 + return 0;
18545 +}
18546 +
18547 +static int evb_port_open(struct net_device *netdev)
18548 +{
18549 + int err;
18550 +
18551 + /* FIXME: enable port when support added */
18552 +
18553 + err = _evb_port_carrier_state_sync(netdev);
18554 + if (err) {
18555 + netdev_err(netdev, "ethsw_port_carrier_state_sync err %d\n",
18556 + err);
18557 + return err;
18558 + }
18559 +
18560 + return 0;
18561 +}
18562 +
18563 +static netdev_tx_t evb_dropframe(struct sk_buff *skb, struct net_device *dev)
18564 +{
18565 + /* we don't support I/O for now, drop the frame */
18566 + dev_kfree_skb_any(skb);
18567 + return NETDEV_TX_OK;
18568 +}
18569 +
18570 +static int evb_links_state_update(struct evb_priv *priv)
18571 +{
18572 + struct evb_port_priv *port_priv;
18573 + struct list_head *pos;
18574 + int err;
18575 +
18576 + list_for_each(pos, &priv->port_list) {
18577 + port_priv = list_entry(pos, struct evb_port_priv, list);
18578 +
18579 + err = _evb_port_carrier_state_sync(port_priv->netdev);
18580 + if (err)
18581 + netdev_err(port_priv->netdev,
18582 + "_evb_port_carrier_state_sync err %d\n",
18583 + err);
18584 + }
18585 +
18586 + return 0;
18587 +}
18588 +
18589 +static irqreturn_t evb_irq0_handler(int irq_num, void *arg)
18590 +{
18591 + return IRQ_WAKE_THREAD;
18592 +}
18593 +
18594 +static irqreturn_t _evb_irq0_handler_thread(int irq_num, void *arg)
18595 +{
18596 + struct device *dev = (struct device *)arg;
18597 + struct fsl_mc_device *evb_dev = to_fsl_mc_device(dev);
18598 + struct net_device *netdev = dev_get_drvdata(dev);
18599 + struct evb_priv *priv = netdev_priv(netdev);
18600 + struct fsl_mc_io *io = priv->mc_io;
18601 + u16 token = priv->mux_handle;
18602 + int irq_index = DPDMUX_IRQ_INDEX_IF;
18603 +
18604 + /* Mask the events and the if_id reserved bits to be cleared on read */
18605 + u32 status = DPDMUX_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000;
18606 + int err;
18607 +
18608 + /* Sanity check */
18609 + if (WARN_ON(!evb_dev || !evb_dev->irqs || !evb_dev->irqs[irq_index]))
18610 + goto out;
18611 + if (WARN_ON(evb_dev->irqs[irq_index]->msi_desc->irq != (u32)irq_num))
18612 + goto out;
18613 +
18614 + err = dpdmux_get_irq_status(io, 0, token, irq_index, &status);
18615 + if (unlikely(err)) {
18616 + netdev_err(netdev, "Can't get irq status (err %d)", err);
18617 + err = dpdmux_clear_irq_status(io, 0, token, irq_index,
18618 + 0xFFFFFFFF);
18619 + if (unlikely(err))
18620 + netdev_err(netdev, "Can't clear irq status (err %d)",
18621 + err);
18622 + goto out;
18623 + }
18624 +
18625 + if (status & DPDMUX_IRQ_EVENT_LINK_CHANGED) {
18626 + err = evb_links_state_update(priv);
18627 + if (unlikely(err))
18628 + goto out;
18629 + }
18630 +
18631 +out:
18632 + return IRQ_HANDLED;
18633 +}
18634 +
18635 +static int evb_setup_irqs(struct fsl_mc_device *evb_dev)
18636 +{
18637 + struct device *dev = &evb_dev->dev;
18638 + struct net_device *netdev = dev_get_drvdata(dev);
18639 + struct evb_priv *priv = netdev_priv(netdev);
18640 + int err = 0;
18641 + struct fsl_mc_device_irq *irq;
18642 + const int irq_index = DPDMUX_IRQ_INDEX_IF;
18643 + u32 mask = DPDMUX_IRQ_EVENT_LINK_CHANGED;
18644 +
18645 + err = fsl_mc_allocate_irqs(evb_dev);
18646 + if (unlikely(err)) {
18647 + dev_err(dev, "MC irqs allocation failed\n");
18648 + return err;
18649 + }
18650 +
18651 + if (WARN_ON(evb_dev->obj_desc.irq_count != DPDMUX_MAX_IRQ_NUM)) {
18652 + err = -EINVAL;
18653 + goto free_irq;
18654 + }
18655 +
18656 + err = dpdmux_set_irq_enable(priv->mc_io, 0, priv->mux_handle,
18657 + irq_index, 0);
18658 + if (unlikely(err)) {
18659 + dev_err(dev, "dpdmux_set_irq_enable err %d\n", err);
18660 + goto free_irq;
18661 + }
18662 +
18663 + irq = evb_dev->irqs[irq_index];
18664 +
18665 + err = devm_request_threaded_irq(dev, irq->msi_desc->irq,
18666 + evb_irq0_handler,
18667 + _evb_irq0_handler_thread,
18668 + IRQF_NO_SUSPEND | IRQF_ONESHOT,
18669 + dev_name(dev), dev);
18670 + if (unlikely(err)) {
18671 + dev_err(dev, "devm_request_threaded_irq(): %d", err);
18672 + goto free_irq;
18673 + }
18674 +
18675 + err = dpdmux_set_irq_mask(priv->mc_io, 0, priv->mux_handle,
18676 + irq_index, mask);
18677 + if (unlikely(err)) {
18678 + dev_err(dev, "dpdmux_set_irq_mask(): %d", err);
18679 + goto free_devm_irq;
18680 + }
18681 +
18682 + err = dpdmux_set_irq_enable(priv->mc_io, 0, priv->mux_handle,
18683 + irq_index, 1);
18684 + if (unlikely(err)) {
18685 + dev_err(dev, "dpdmux_set_irq_enable(): %d", err);
18686 + goto free_devm_irq;
18687 + }
18688 +
18689 + return 0;
18690 +
18691 +free_devm_irq:
18692 + devm_free_irq(dev, irq->msi_desc->irq, dev);
18693 +free_irq:
18694 + fsl_mc_free_irqs(evb_dev);
18695 + return err;
18696 +}
18697 +
18698 +static void evb_teardown_irqs(struct fsl_mc_device *evb_dev)
18699 +{
18700 + struct device *dev = &evb_dev->dev;
18701 + struct net_device *netdev = dev_get_drvdata(dev);
18702 + struct evb_priv *priv = netdev_priv(netdev);
18703 +
18704 + dpdmux_set_irq_enable(priv->mc_io, 0, priv->mux_handle,
18705 + DPDMUX_IRQ_INDEX_IF, 0);
18706 +
18707 + devm_free_irq(dev,
18708 + evb_dev->irqs[DPDMUX_IRQ_INDEX_IF]->msi_desc->irq,
18709 + dev);
18710 + fsl_mc_free_irqs(evb_dev);
18711 +}
18712 +
18713 +static int evb_port_add_rule(struct net_device *netdev,
18714 + const unsigned char *addr, u16 vid)
18715 +{
18716 + struct evb_port_priv *port_priv = netdev_priv(netdev);
18717 + struct dpdmux_l2_rule rule = { .vlan_id = vid };
18718 + int err;
18719 +
18720 + if (addr)
18721 + ether_addr_copy(rule.mac_addr, addr);
18722 +
18723 + err = dpdmux_if_add_l2_rule(port_priv->evb_priv->mc_io,
18724 + 0,
18725 + port_priv->evb_priv->mux_handle,
18726 + port_priv->port_index, &rule);
18727 + if (unlikely(err))
18728 + netdev_err(netdev, "dpdmux_if_add_l2_rule err %d\n", err);
18729 + return err;
18730 +}
18731 +
18732 +static int evb_port_del_rule(struct net_device *netdev,
18733 + const unsigned char *addr, u16 vid)
18734 +{
18735 + struct evb_port_priv *port_priv = netdev_priv(netdev);
18736 + struct dpdmux_l2_rule rule = { .vlan_id = vid };
18737 + int err;
18738 +
18739 + if (addr)
18740 + ether_addr_copy(rule.mac_addr, addr);
18741 +
18742 + err = dpdmux_if_remove_l2_rule(port_priv->evb_priv->mc_io,
18743 + 0,
18744 + port_priv->evb_priv->mux_handle,
18745 + port_priv->port_index, &rule);
18746 + if (unlikely(err))
18747 + netdev_err(netdev, "dpdmux_if_remove_l2_rule err %d\n", err);
18748 + return err;
18749 +}
18750 +
18751 +static bool _lookup_address(struct net_device *netdev,
18752 + const unsigned char *addr)
18753 +{
18754 + struct netdev_hw_addr *ha;
18755 + struct netdev_hw_addr_list *list = (is_unicast_ether_addr(addr)) ?
18756 + &netdev->uc : &netdev->mc;
18757 +
18758 + netif_addr_lock_bh(netdev);
18759 + list_for_each_entry(ha, &list->list, list) {
18760 + if (ether_addr_equal(ha->addr, addr)) {
18761 + netif_addr_unlock_bh(netdev);
18762 + return true;
18763 + }
18764 + }
18765 + netif_addr_unlock_bh(netdev);
18766 + return false;
18767 +}
18768 +
18769 +static inline int evb_port_fdb_prep(struct nlattr *tb[],
18770 + struct net_device *netdev,
18771 + const unsigned char *addr, u16 *vid,
18772 + bool del)
18773 +{
18774 + struct evb_port_priv *port_priv = netdev_priv(netdev);
18775 + struct evb_priv *evb_priv = port_priv->evb_priv;
18776 +
18777 + *vid = 0;
18778 +
18779 + if (evb_priv->attr.method != DPDMUX_METHOD_MAC &&
18780 + evb_priv->attr.method != DPDMUX_METHOD_C_VLAN_MAC) {
18781 + netdev_err(netdev,
18782 + "EVB mode does not support MAC classification\n");
18783 + return -EOPNOTSUPP;
18784 + }
18785 +
18786 + /* check if the address is configured on this port */
18787 + if (_lookup_address(netdev, addr)) {
18788 + if (!del)
18789 + return -EEXIST;
18790 + } else {
18791 + if (del)
18792 + return -ENOENT;
18793 + }
18794 +
18795 + if (tb[NDA_VLAN] && evb_priv->attr.method == DPDMUX_METHOD_C_VLAN_MAC) {
18796 + if (nla_len(tb[NDA_VLAN]) != sizeof(unsigned short)) {
18797 + netdev_err(netdev, "invalid vlan size %d\n",
18798 + nla_len(tb[NDA_VLAN]));
18799 + return -EINVAL;
18800 + }
18801 +
18802 + *vid = nla_get_u16(tb[NDA_VLAN]);
18803 +
18804 + if (!*vid || *vid >= VLAN_VID_MASK) {
18805 + netdev_err(netdev, "invalid vid value 0x%04x\n", *vid);
18806 + return -EINVAL;
18807 + }
18808 + } else if (evb_priv->attr.method == DPDMUX_METHOD_C_VLAN_MAC) {
18809 + netdev_err(netdev,
18810 + "EVB mode requires explicit VLAN configuration\n");
18811 + return -EINVAL;
18812 + } else if (tb[NDA_VLAN]) {
18813 + netdev_warn(netdev, "VLAN not supported, argument ignored\n");
18814 + }
18815 +
18816 + return 0;
18817 +}
18818 +
18819 +static int evb_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
18820 + struct net_device *netdev,
18821 + const unsigned char *addr, u16 vid, u16 flags)
18822 +{
18823 + u16 _vid;
18824 + int err;
18825 +
18826 + /* TODO: add replace support when added to iproute bridge */
18827 + if (!(flags & NLM_F_REQUEST)) {
18828 + netdev_err(netdev,
18829 + "evb_port_fdb_add unexpected flags value %08x\n",
18830 + flags);
18831 + return -EINVAL;
18832 + }
18833 +
18834 + err = evb_port_fdb_prep(tb, netdev, addr, &_vid, 0);
18835 + if (unlikely(err))
18836 + return err;
18837 +
18838 + err = evb_port_add_rule(netdev, addr, _vid);
18839 + if (unlikely(err))
18840 + return err;
18841 +
18842 + if (is_unicast_ether_addr(addr)) {
18843 + err = dev_uc_add(netdev, addr);
18844 + if (unlikely(err)) {
18845 + netdev_err(netdev, "dev_uc_add err %d\n", err);
18846 + return err;
18847 + }
18848 + } else {
18849 + err = dev_mc_add(netdev, addr);
18850 + if (unlikely(err)) {
18851 + netdev_err(netdev, "dev_mc_add err %d\n", err);
18852 + return err;
18853 + }
18854 + }
18855 +
18856 + return 0;
18857 +}
18858 +
18859 +static int evb_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
18860 + struct net_device *netdev,
18861 + const unsigned char *addr, u16 vid)
18862 +{
18863 + u16 _vid;
18864 + int err;
18865 +
18866 + err = evb_port_fdb_prep(tb, netdev, addr, &_vid, 1);
18867 + if (unlikely(err))
18868 + return err;
18869 +
18870 + err = evb_port_del_rule(netdev, addr, _vid);
18871 + if (unlikely(err))
18872 + return err;
18873 +
18874 + if (is_unicast_ether_addr(addr)) {
18875 + err = dev_uc_del(netdev, addr);
18876 + if (unlikely(err)) {
18877 + netdev_err(netdev, "dev_uc_del err %d\n", err);
18878 + return err;
18879 + }
18880 + } else {
18881 + err = dev_mc_del(netdev, addr);
18882 + if (unlikely(err)) {
18883 + netdev_err(netdev, "dev_mc_del err %d\n", err);
18884 + return err;
18885 + }
18886 + }
18887 +
18888 + return 0;
18889 +}
18890 +
18891 +static int evb_change_mtu(struct net_device *netdev,
18892 + int mtu)
18893 +{
18894 + struct evb_port_priv *port_priv = netdev_priv(netdev);
18895 + struct evb_priv *evb_priv = port_priv->evb_priv;
18896 + struct list_head *pos;
18897 + int err = 0;
18898 +
18899 + /* This operation is not permitted on downlinks */
18900 + if (port_priv->port_index > 0)
18901 + return -EPERM;
18902 +
18903 + if (mtu < EVB_MIN_FRAME_LENGTH || mtu > EVB_MAX_FRAME_LENGTH) {
18904 + netdev_err(netdev, "Invalid MTU %d. Valid range is: %d..%d\n",
18905 + mtu, EVB_MIN_FRAME_LENGTH, EVB_MAX_FRAME_LENGTH);
18906 + return -EINVAL;
18907 + }
18908 +
18909 + err = dpdmux_set_max_frame_length(evb_priv->mc_io,
18910 + 0,
18911 + evb_priv->mux_handle,
18912 + (uint16_t)mtu);
18913 +
18914 + if (unlikely(err)) {
18915 + netdev_err(netdev, "dpdmux_ul_set_max_frame_length err %d\n",
18916 + err);
18917 + return err;
18918 + }
18919 +
18920 + /* Update the max frame length for downlinks */
18921 + list_for_each(pos, &evb_priv->port_list) {
18922 + port_priv = list_entry(pos, struct evb_port_priv, list);
18923 + port_priv->netdev->mtu = mtu;
18924 + }
18925 +
18926 + netdev->mtu = mtu;
18927 + return 0;
18928 +}
18929 +
18930 +static const struct nla_policy ifla_br_policy[IFLA_MAX + 1] = {
18931 + [IFLA_BRIDGE_FLAGS] = { .type = NLA_U16 },
18932 + [IFLA_BRIDGE_MODE] = { .type = NLA_U16 },
18933 + [IFLA_BRIDGE_VLAN_INFO] = { .type = NLA_BINARY,
18934 + .len = sizeof(struct bridge_vlan_info), },
18935 +};
18936 +
18937 +static int evb_setlink_af_spec(struct net_device *netdev,
18938 + struct nlattr **tb)
18939 +{
18940 + struct bridge_vlan_info *vinfo;
18941 + struct evb_port_priv *port_priv = netdev_priv(netdev);
18942 + int err = 0;
18943 +
18944 + if (!tb[IFLA_BRIDGE_VLAN_INFO]) {
18945 + netdev_err(netdev, "no VLAN INFO in nlmsg\n");
18946 + return -EOPNOTSUPP;
18947 + }
18948 +
18949 + vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
18950 +
18951 + if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK)
18952 + return -EINVAL;
18953 +
18954 + err = evb_port_add_rule(netdev, NULL, vinfo->vid);
18955 + if (unlikely(err))
18956 + return err;
18957 +
18958 + port_priv->vlans[vinfo->vid] = 1;
18959 +
18960 + return 0;
18961 +}
18962 +
18963 +static int evb_setlink(struct net_device *netdev,
18964 + struct nlmsghdr *nlh,
18965 + u16 flags)
18966 +{
18967 + struct evb_port_priv *port_priv = netdev_priv(netdev);
18968 + struct evb_priv *evb_priv = port_priv->evb_priv;
18969 + struct nlattr *attr;
18970 + struct nlattr *tb[(IFLA_BRIDGE_MAX > IFLA_BRPORT_MAX) ?
18971 + IFLA_BRIDGE_MAX : IFLA_BRPORT_MAX + 1];
18972 + int err = 0;
18973 +
18974 + if (evb_priv->attr.method != DPDMUX_METHOD_C_VLAN &&
18975 + evb_priv->attr.method != DPDMUX_METHOD_S_VLAN) {
18976 + netdev_err(netdev,
18977 + "EVB mode does not support VLAN only classification\n");
18978 + return -EOPNOTSUPP;
18979 + }
18980 +
18981 + attr = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
18982 + if (attr) {
18983 + err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, attr,
18984 + ifla_br_policy);
18985 + if (unlikely(err)) {
18986 + netdev_err(netdev,
18987 + "nla_parse_nested for br_policy err %d\n",
18988 + err);
18989 + return err;
18990 + }
18991 +
18992 + err = evb_setlink_af_spec(netdev, tb);
18993 + return err;
18994 + }
18995 +
18996 + netdev_err(netdev, "nlmsg_find_attr found no AF_SPEC\n");
18997 + return -EOPNOTSUPP;
18998 +}
18999 +
19000 +static int __nla_put_netdev(struct sk_buff *skb, struct net_device *netdev)
19001 +{
19002 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19003 + struct evb_priv *evb_priv = port_priv->evb_priv;
19004 + u8 operstate = netif_running(netdev) ?
19005 + netdev->operstate : IF_OPER_DOWN;
19006 + int iflink;
19007 + int err;
19008 +
19009 + err = nla_put_string(skb, IFLA_IFNAME, netdev->name);
19010 + if (unlikely(err))
19011 + goto nla_put_err;
19012 + err = nla_put_u32(skb, IFLA_MASTER, evb_priv->uplink.netdev->ifindex);
19013 + if (unlikely(err))
19014 + goto nla_put_err;
19015 + err = nla_put_u32(skb, IFLA_MTU, netdev->mtu);
19016 + if (unlikely(err))
19017 + goto nla_put_err;
19018 + err = nla_put_u8(skb, IFLA_OPERSTATE, operstate);
19019 + if (unlikely(err))
19020 + goto nla_put_err;
19021 + if (netdev->addr_len) {
19022 + err = nla_put(skb, IFLA_ADDRESS, netdev->addr_len,
19023 + netdev->dev_addr);
19024 + if (unlikely(err))
19025 + goto nla_put_err;
19026 + }
19027 +
19028 + iflink = dev_get_iflink(netdev);
19029 + if (netdev->ifindex != iflink) {
19030 + err = nla_put_u32(skb, IFLA_LINK, iflink);
19031 + if (unlikely(err))
19032 + goto nla_put_err;
19033 + }
19034 +
19035 + return 0;
19036 +
19037 +nla_put_err:
19038 + netdev_err(netdev, "nla_put_ err %d\n", err);
19039 + return err;
19040 +}
19041 +
19042 +static int __nla_put_port(struct sk_buff *skb, struct net_device *netdev)
19043 +{
19044 + struct nlattr *nest;
19045 + int err;
19046 +
19047 + nest = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
19048 + if (!nest) {
19049 + netdev_err(netdev, "nla_nest_start failed\n");
19050 + return -ENOMEM;
19051 + }
19052 +
19053 + err = nla_put_u8(skb, IFLA_BRPORT_STATE, BR_STATE_FORWARDING);
19054 + if (unlikely(err))
19055 + goto nla_put_err;
19056 + err = nla_put_u16(skb, IFLA_BRPORT_PRIORITY, 0);
19057 + if (unlikely(err))
19058 + goto nla_put_err;
19059 + err = nla_put_u32(skb, IFLA_BRPORT_COST, 0);
19060 + if (unlikely(err))
19061 + goto nla_put_err;
19062 + err = nla_put_u8(skb, IFLA_BRPORT_MODE, 0);
19063 + if (unlikely(err))
19064 + goto nla_put_err;
19065 + err = nla_put_u8(skb, IFLA_BRPORT_GUARD, 0);
19066 + if (unlikely(err))
19067 + goto nla_put_err;
19068 + err = nla_put_u8(skb, IFLA_BRPORT_PROTECT, 0);
19069 + if (unlikely(err))
19070 + goto nla_put_err;
19071 + err = nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, 0);
19072 + if (unlikely(err))
19073 + goto nla_put_err;
19074 + err = nla_put_u8(skb, IFLA_BRPORT_LEARNING, 0);
19075 + if (unlikely(err))
19076 + goto nla_put_err;
19077 + err = nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, 1);
19078 + if (unlikely(err))
19079 + goto nla_put_err;
19080 + nla_nest_end(skb, nest);
19081 +
19082 + return 0;
19083 +
19084 +nla_put_err:
19085 + netdev_err(netdev, "nla_put_ err %d\n", err);
19086 + nla_nest_cancel(skb, nest);
19087 + return err;
19088 +}
19089 +
19090 +static int __nla_put_vlan(struct sk_buff *skb, struct net_device *netdev)
19091 +{
19092 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19093 + struct nlattr *nest;
19094 + struct bridge_vlan_info vinfo;
19095 + const u8 *vlans = port_priv->vlans;
19096 + u16 i;
19097 + int err;
19098 +
19099 + nest = nla_nest_start(skb, IFLA_AF_SPEC);
19100 + if (!nest) {
19101 + netdev_err(netdev, "nla_nest_start failed");
19102 + return -ENOMEM;
19103 + }
19104 +
19105 + for (i = 0; i < VLAN_VID_MASK + 1; i++) {
19106 + if (!vlans[i])
19107 + continue;
19108 +
19109 + vinfo.flags = 0;
19110 + vinfo.vid = i;
19111 +
19112 + err = nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
19113 + sizeof(vinfo), &vinfo);
19114 + if (unlikely(err))
19115 + goto nla_put_err;
19116 + }
19117 +
19118 + nla_nest_end(skb, nest);
19119 +
19120 + return 0;
19121 +
19122 +nla_put_err:
19123 + netdev_err(netdev, "nla_put_ err %d\n", err);
19124 + nla_nest_cancel(skb, nest);
19125 + return err;
19126 +}
19127 +
19128 +static int evb_getlink(struct sk_buff *skb, u32 pid, u32 seq,
19129 + struct net_device *netdev, u32 filter_mask, int nlflags)
19130 +{
19131 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19132 + struct evb_priv *evb_priv = port_priv->evb_priv;
19133 + struct ifinfomsg *hdr;
19134 + struct nlmsghdr *nlh;
19135 + int err;
19136 +
19137 + if (evb_priv->attr.method != DPDMUX_METHOD_C_VLAN &&
19138 + evb_priv->attr.method != DPDMUX_METHOD_S_VLAN) {
19139 + return 0;
19140 + }
19141 +
19142 + nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*hdr), NLM_F_MULTI);
19143 + if (!nlh)
19144 + return -EMSGSIZE;
19145 +
19146 + hdr = nlmsg_data(nlh);
19147 + memset(hdr, 0, sizeof(*hdr));
19148 + hdr->ifi_family = AF_BRIDGE;
19149 + hdr->ifi_type = netdev->type;
19150 + hdr->ifi_index = netdev->ifindex;
19151 + hdr->ifi_flags = dev_get_flags(netdev);
19152 +
19153 + err = __nla_put_netdev(skb, netdev);
19154 + if (unlikely(err))
19155 + goto nla_put_err;
19156 +
19157 + err = __nla_put_port(skb, netdev);
19158 + if (unlikely(err))
19159 + goto nla_put_err;
19160 +
19161 + /* Check if the VID information is requested */
19162 + if (filter_mask & RTEXT_FILTER_BRVLAN) {
19163 + err = __nla_put_vlan(skb, netdev);
19164 + if (unlikely(err))
19165 + goto nla_put_err;
19166 + }
19167 +
19168 + nlmsg_end(skb, nlh);
19169 + return skb->len;
19170 +
19171 +nla_put_err:
19172 + nlmsg_cancel(skb, nlh);
19173 + return -EMSGSIZE;
19174 +}
19175 +
19176 +static int evb_dellink(struct net_device *netdev,
19177 + struct nlmsghdr *nlh,
19178 + u16 flags)
19179 +{
19180 + struct nlattr *tb[IFLA_BRIDGE_MAX + 1];
19181 + struct nlattr *spec;
19182 + struct bridge_vlan_info *vinfo;
19183 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19184 + int err = 0;
19185 +
19186 + spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
19187 + if (!spec)
19188 + return 0;
19189 +
19190 + err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, spec, ifla_br_policy);
19191 + if (unlikely(err))
19192 + return err;
19193 +
19194 + if (!tb[IFLA_BRIDGE_VLAN_INFO])
19195 + return -EOPNOTSUPP;
19196 +
19197 + vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
19198 +
19199 + if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK)
19200 + return -EINVAL;
19201 +
19202 + err = evb_port_del_rule(netdev, NULL, vinfo->vid);
19203 + if (unlikely(err)) {
19204 + netdev_err(netdev, "evb_port_del_rule err %d\n", err);
19205 + return err;
19206 + }
19207 + port_priv->vlans[vinfo->vid] = 0;
19208 +
19209 + return 0;
19210 +}
19211 +
19212 +void evb_port_get_stats(struct net_device *netdev,
19213 + struct rtnl_link_stats64 *storage)
19214 +{
19215 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19216 + u64 tmp;
19217 + int err;
19218 +
19219 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19220 + 0,
19221 + port_priv->evb_priv->mux_handle,
19222 + port_priv->port_index,
19223 + DPDMUX_CNT_ING_FRAME, &storage->rx_packets);
19224 + if (unlikely(err))
19225 + goto error;
19226 +
19227 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19228 + 0,
19229 + port_priv->evb_priv->mux_handle,
19230 + port_priv->port_index,
19231 + DPDMUX_CNT_ING_BYTE, &storage->rx_bytes);
19232 + if (unlikely(err))
19233 + goto error;
19234 +
19235 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19236 + 0,
19237 + port_priv->evb_priv->mux_handle,
19238 + port_priv->port_index,
19239 + DPDMUX_CNT_ING_FLTR_FRAME, &tmp);
19240 + if (unlikely(err))
19241 + goto error;
19242 +
19243 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19244 + 0,
19245 + port_priv->evb_priv->mux_handle,
19246 + port_priv->port_index,
19247 + DPDMUX_CNT_ING_FRAME_DISCARD,
19248 + &storage->rx_dropped);
19249 + if (unlikely(err)) {
19250 + storage->rx_dropped = tmp;
19251 + goto error;
19252 + }
19253 + storage->rx_dropped += tmp;
19254 +
19255 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19256 + 0,
19257 + port_priv->evb_priv->mux_handle,
19258 + port_priv->port_index,
19259 + DPDMUX_CNT_ING_MCAST_FRAME,
19260 + &storage->multicast);
19261 + if (unlikely(err))
19262 + goto error;
19263 +
19264 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19265 + 0,
19266 + port_priv->evb_priv->mux_handle,
19267 + port_priv->port_index,
19268 + DPDMUX_CNT_EGR_FRAME, &storage->tx_packets);
19269 + if (unlikely(err))
19270 + goto error;
19271 +
19272 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19273 + 0,
19274 + port_priv->evb_priv->mux_handle,
19275 + port_priv->port_index,
19276 + DPDMUX_CNT_EGR_BYTE, &storage->tx_bytes);
19277 + if (unlikely(err))
19278 + goto error;
19279 +
19280 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19281 + 0,
19282 + port_priv->evb_priv->mux_handle,
19283 + port_priv->port_index,
19284 + DPDMUX_CNT_EGR_FRAME_DISCARD,
19285 + &storage->tx_dropped);
19286 + if (unlikely(err))
19287 + goto error;
19288 +
19289 + return;
19290 +
19291 +error:
19292 + netdev_err(netdev, "dpdmux_if_get_counter err %d\n", err);
19293 +}
19294 +
19295 +static const struct net_device_ops evb_port_ops = {
19296 + .ndo_open = &evb_port_open,
19297 +
19298 + .ndo_start_xmit = &evb_dropframe,
19299 +
19300 + .ndo_fdb_add = &evb_port_fdb_add,
19301 + .ndo_fdb_del = &evb_port_fdb_del,
19302 +
19303 + .ndo_get_stats64 = &evb_port_get_stats,
19304 + .ndo_change_mtu = &evb_change_mtu,
19305 +};
19306 +
19307 +static void evb_get_drvinfo(struct net_device *netdev,
19308 + struct ethtool_drvinfo *drvinfo)
19309 +{
19310 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19311 + u16 version_major, version_minor;
19312 + int err;
19313 +
19314 + strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
19315 + strlcpy(drvinfo->version, evb_drv_version, sizeof(drvinfo->version));
19316 +
19317 + err = dpdmux_get_api_version(port_priv->evb_priv->mc_io, 0,
19318 + &version_major,
19319 + &version_minor);
19320 + if (err)
19321 + strlcpy(drvinfo->fw_version, "N/A",
19322 + sizeof(drvinfo->fw_version));
19323 + else
19324 + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
19325 + "%u.%u", version_major, version_minor);
19326 +
19327 + strlcpy(drvinfo->bus_info, dev_name(netdev->dev.parent->parent),
19328 + sizeof(drvinfo->bus_info));
19329 +}
19330 +
19331 +static int evb_get_settings(struct net_device *netdev,
19332 + struct ethtool_cmd *cmd)
19333 +{
19334 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19335 + struct dpdmux_link_state state = {0};
19336 + int err = 0;
19337 +
19338 + err = dpdmux_if_get_link_state(port_priv->evb_priv->mc_io, 0,
19339 + port_priv->evb_priv->mux_handle,
19340 + port_priv->port_index,
19341 + &state);
19342 + if (err) {
19343 + netdev_err(netdev, "ERROR %d getting link state", err);
19344 + goto out;
19345 + }
19346 +
19347 + /* At the moment, we have no way of interrogating the DPMAC
19348 + * from the DPDMUX side or there may not exist a DPMAC at all.
19349 + * Report only autoneg state, duplexity and speed.
19350 + */
19351 + if (state.options & DPDMUX_LINK_OPT_AUTONEG)
19352 + cmd->autoneg = AUTONEG_ENABLE;
19353 + if (!(state.options & DPDMUX_LINK_OPT_HALF_DUPLEX))
19354 + cmd->duplex = DUPLEX_FULL;
19355 + ethtool_cmd_speed_set(cmd, state.rate);
19356 +
19357 +out:
19358 + return err;
19359 +}
19360 +
19361 +static int evb_set_settings(struct net_device *netdev,
19362 + struct ethtool_cmd *cmd)
19363 +{
19364 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19365 + struct dpdmux_link_state state = {0};
19366 + struct dpdmux_link_cfg cfg = {0};
19367 + int err = 0;
19368 +
19369 + netdev_dbg(netdev, "Setting link parameters...");
19370 +
19371 + err = dpdmux_if_get_link_state(port_priv->evb_priv->mc_io, 0,
19372 + port_priv->evb_priv->mux_handle,
19373 + port_priv->port_index,
19374 + &state);
19375 + if (err) {
19376 + netdev_err(netdev, "ERROR %d getting link state", err);
19377 + goto out;
19378 + }
19379 +
19380 + /* Due to a temporary MC limitation, the DPDMUX port must be down
19381 + * in order to be able to change link settings. Taking steps to let
19382 + * the user know that.
19383 + */
19384 + if (netif_running(netdev)) {
19385 + netdev_info(netdev,
19386 + "Sorry, interface must be brought down first.\n");
19387 + return -EACCES;
19388 + }
19389 +
19390 + cfg.options = state.options;
19391 + cfg.rate = ethtool_cmd_speed(cmd);
19392 + if (cmd->autoneg == AUTONEG_ENABLE)
19393 + cfg.options |= DPDMUX_LINK_OPT_AUTONEG;
19394 + else
19395 + cfg.options &= ~DPDMUX_LINK_OPT_AUTONEG;
19396 + if (cmd->duplex == DUPLEX_HALF)
19397 + cfg.options |= DPDMUX_LINK_OPT_HALF_DUPLEX;
19398 + else
19399 + cfg.options &= ~DPDMUX_LINK_OPT_HALF_DUPLEX;
19400 +
19401 + err = dpdmux_if_set_link_cfg(port_priv->evb_priv->mc_io, 0,
19402 + port_priv->evb_priv->mux_handle,
19403 + port_priv->port_index,
19404 + &cfg);
19405 + if (err)
19406 + /* ethtool will be loud enough if we return an error; no point
19407 + * in putting our own error message on the console by default
19408 + */
19409 + netdev_dbg(netdev, "ERROR %d setting link cfg", err);
19410 +
19411 +out:
19412 + return err;
19413 +}
19414 +
19415 +static struct {
19416 + enum dpdmux_counter_type id;
19417 + char name[ETH_GSTRING_LEN];
19418 +} evb_ethtool_counters[] = {
19419 + {DPDMUX_CNT_ING_FRAME, "rx frames"},
19420 + {DPDMUX_CNT_ING_BYTE, "rx bytes"},
19421 + {DPDMUX_CNT_ING_FLTR_FRAME, "rx filtered frames"},
19422 + {DPDMUX_CNT_ING_FRAME_DISCARD, "rx discarded frames"},
19423 + {DPDMUX_CNT_ING_BCAST_FRAME, "rx b-cast frames"},
19424 + {DPDMUX_CNT_ING_BCAST_BYTES, "rx b-cast bytes"},
19425 + {DPDMUX_CNT_ING_MCAST_FRAME, "rx m-cast frames"},
19426 + {DPDMUX_CNT_ING_MCAST_BYTE, "rx m-cast bytes"},
19427 + {DPDMUX_CNT_EGR_FRAME, "tx frames"},
19428 + {DPDMUX_CNT_EGR_BYTE, "tx bytes"},
19429 + {DPDMUX_CNT_EGR_FRAME_DISCARD, "tx discarded frames"},
19430 +};
19431 +
19432 +static int evb_ethtool_get_sset_count(struct net_device *dev, int sset)
19433 +{
19434 + switch (sset) {
19435 + case ETH_SS_STATS:
19436 + return ARRAY_SIZE(evb_ethtool_counters);
19437 + default:
19438 + return -EOPNOTSUPP;
19439 + }
19440 +}
19441 +
19442 +static void evb_ethtool_get_strings(struct net_device *netdev,
19443 + u32 stringset, u8 *data)
19444 +{
19445 + u32 i;
19446 +
19447 + switch (stringset) {
19448 + case ETH_SS_STATS:
19449 + for (i = 0; i < ARRAY_SIZE(evb_ethtool_counters); i++)
19450 + memcpy(data + i * ETH_GSTRING_LEN,
19451 + evb_ethtool_counters[i].name, ETH_GSTRING_LEN);
19452 + break;
19453 + }
19454 +}
19455 +
19456 +static void evb_ethtool_get_stats(struct net_device *netdev,
19457 + struct ethtool_stats *stats,
19458 + u64 *data)
19459 +{
19460 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19461 + u32 i;
19462 + int err;
19463 +
19464 + for (i = 0; i < ARRAY_SIZE(evb_ethtool_counters); i++) {
19465 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19466 + 0,
19467 + port_priv->evb_priv->mux_handle,
19468 + port_priv->port_index,
19469 + evb_ethtool_counters[i].id,
19470 + &data[i]);
19471 + if (err)
19472 + netdev_err(netdev, "dpdmux_if_get_counter[%s] err %d\n",
19473 + evb_ethtool_counters[i].name, err);
19474 + }
19475 +}
19476 +
19477 +static const struct ethtool_ops evb_port_ethtool_ops = {
19478 + .get_drvinfo = &evb_get_drvinfo,
19479 + .get_link = &ethtool_op_get_link,
19480 + .get_settings = &evb_get_settings,
19481 + .set_settings = &evb_set_settings,
19482 + .get_strings = &evb_ethtool_get_strings,
19483 + .get_ethtool_stats = &evb_ethtool_get_stats,
19484 + .get_sset_count = &evb_ethtool_get_sset_count,
19485 +};
19486 +
19487 +static int evb_open(struct net_device *netdev)
19488 +{
19489 + struct evb_priv *priv = netdev_priv(netdev);
19490 + int err = 0;
19491 +
19492 + err = dpdmux_enable(priv->mc_io, 0, priv->mux_handle);
19493 + if (unlikely(err))
19494 + netdev_err(netdev, "dpdmux_enable err %d\n", err);
19495 +
19496 + return err;
19497 +}
19498 +
19499 +static int evb_close(struct net_device *netdev)
19500 +{
19501 + struct evb_priv *priv = netdev_priv(netdev);
19502 + int err = 0;
19503 +
19504 + err = dpdmux_disable(priv->mc_io, 0, priv->mux_handle);
19505 + if (unlikely(err))
19506 + netdev_err(netdev, "dpdmux_disable err %d\n", err);
19507 +
19508 + return err;
19509 +}
19510 +
19511 +static const struct net_device_ops evb_ops = {
19512 + .ndo_start_xmit = &evb_dropframe,
19513 + .ndo_open = &evb_open,
19514 + .ndo_stop = &evb_close,
19515 +
19516 + .ndo_bridge_setlink = &evb_setlink,
19517 + .ndo_bridge_getlink = &evb_getlink,
19518 + .ndo_bridge_dellink = &evb_dellink,
19519 +
19520 + .ndo_get_stats64 = &evb_port_get_stats,
19521 + .ndo_change_mtu = &evb_change_mtu,
19522 +};
19523 +
19524 +static int evb_takedown(struct fsl_mc_device *evb_dev)
19525 +{
19526 + struct device *dev = &evb_dev->dev;
19527 + struct net_device *netdev = dev_get_drvdata(dev);
19528 + struct evb_priv *priv = netdev_priv(netdev);
19529 + int err;
19530 +
19531 + err = dpdmux_close(priv->mc_io, 0, priv->mux_handle);
19532 + if (unlikely(err))
19533 + dev_warn(dev, "dpdmux_close err %d\n", err);
19534 +
19535 + return 0;
19536 +}
19537 +
19538 +static int evb_init(struct fsl_mc_device *evb_dev)
19539 +{
19540 + struct device *dev = &evb_dev->dev;
19541 + struct net_device *netdev = dev_get_drvdata(dev);
19542 + struct evb_priv *priv = netdev_priv(netdev);
19543 + u16 version_major;
19544 + u16 version_minor;
19545 + int err = 0;
19546 +
19547 + priv->dev_id = evb_dev->obj_desc.id;
19548 +
19549 + err = dpdmux_open(priv->mc_io, 0, priv->dev_id, &priv->mux_handle);
19550 + if (unlikely(err)) {
19551 + dev_err(dev, "dpdmux_open err %d\n", err);
19552 + goto err_exit;
19553 + }
19554 + if (!priv->mux_handle) {
19555 + dev_err(dev, "dpdmux_open returned null handle but no error\n");
19556 + err = -EFAULT;
19557 + goto err_exit;
19558 + }
19559 +
19560 + err = dpdmux_get_attributes(priv->mc_io, 0, priv->mux_handle,
19561 + &priv->attr);
19562 + if (unlikely(err)) {
19563 + dev_err(dev, "dpdmux_get_attributes err %d\n", err);
19564 + goto err_close;
19565 + }
19566 +
19567 + err = dpdmux_get_api_version(priv->mc_io, 0,
19568 + &version_major,
19569 + &version_minor);
19570 + if (unlikely(err)) {
19571 + dev_err(dev, "dpdmux_get_api_version err %d\n", err);
19572 + goto err_close;
19573 + }
19574 +
19575 + /* Minimum supported DPDMUX version check */
19576 + if (version_major < DPDMUX_MIN_VER_MAJOR ||
19577 + (version_major == DPDMUX_MIN_VER_MAJOR &&
19578 + version_minor < DPDMUX_MIN_VER_MINOR)) {
19579 + dev_err(dev, "DPDMUX version %d.%d not supported. Use %d.%d or greater.\n",
19580 + version_major, version_minor,
19581 + DPDMUX_MIN_VER_MAJOR, DPDMUX_MIN_VER_MAJOR);
19582 + err = -ENOTSUPP;
19583 + goto err_close;
19584 + }
19585 +
19586 + err = dpdmux_reset(priv->mc_io, 0, priv->mux_handle);
19587 + if (unlikely(err)) {
19588 + dev_err(dev, "dpdmux_reset err %d\n", err);
19589 + goto err_close;
19590 + }
19591 +
19592 + return 0;
19593 +
19594 +err_close:
19595 + dpdmux_close(priv->mc_io, 0, priv->mux_handle);
19596 +err_exit:
19597 + return err;
19598 +}
19599 +
19600 +static int evb_remove(struct fsl_mc_device *evb_dev)
19601 +{
19602 + struct device *dev = &evb_dev->dev;
19603 + struct net_device *netdev = dev_get_drvdata(dev);
19604 + struct evb_priv *priv = netdev_priv(netdev);
19605 + struct evb_port_priv *port_priv;
19606 + struct list_head *pos;
19607 +
19608 + list_for_each(pos, &priv->port_list) {
19609 + port_priv = list_entry(pos, struct evb_port_priv, list);
19610 +
19611 + rtnl_lock();
19612 + netdev_upper_dev_unlink(port_priv->netdev, netdev);
19613 + rtnl_unlock();
19614 +
19615 + unregister_netdev(port_priv->netdev);
19616 + free_netdev(port_priv->netdev);
19617 + }
19618 +
19619 + evb_teardown_irqs(evb_dev);
19620 +
19621 + unregister_netdev(netdev);
19622 +
19623 + evb_takedown(evb_dev);
19624 + fsl_mc_portal_free(priv->mc_io);
19625 +
19626 + dev_set_drvdata(dev, NULL);
19627 + free_netdev(netdev);
19628 +
19629 + return 0;
19630 +}
19631 +
19632 +static int evb_probe(struct fsl_mc_device *evb_dev)
19633 +{
19634 + struct device *dev;
19635 + struct evb_priv *priv = NULL;
19636 + struct net_device *netdev = NULL;
19637 + char port_name[IFNAMSIZ];
19638 + int i;
19639 + int err = 0;
19640 +
19641 + dev = &evb_dev->dev;
19642 +
19643 + /* register switch device, it's for management only - no I/O */
19644 + netdev = alloc_etherdev(sizeof(*priv));
19645 + if (!netdev) {
19646 + dev_err(dev, "alloc_etherdev error\n");
19647 + return -ENOMEM;
19648 + }
19649 + netdev->netdev_ops = &evb_ops;
19650 +
19651 + dev_set_drvdata(dev, netdev);
19652 +
19653 + priv = netdev_priv(netdev);
19654 +
19655 + err = fsl_mc_portal_allocate(evb_dev, 0, &priv->mc_io);
19656 + if (unlikely(err)) {
19657 + dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
19658 + goto err_free_netdev;
19659 + }
19660 + if (!priv->mc_io) {
19661 + dev_err(dev, "fsl_mc_portal_allocate returned null handle but no error\n");
19662 + err = -EFAULT;
19663 + goto err_free_netdev;
19664 + }
19665 +
19666 + err = evb_init(evb_dev);
19667 + if (unlikely(err)) {
19668 + dev_err(dev, "evb init err %d\n", err);
19669 + goto err_free_cmdport;
19670 + }
19671 +
19672 + INIT_LIST_HEAD(&priv->port_list);
19673 + netdev->flags |= IFF_PROMISC | IFF_MASTER;
19674 +
19675 + dev_alloc_name(netdev, "evb%d");
19676 +
19677 + /* register switch ports */
19678 + snprintf(port_name, IFNAMSIZ, "%sp%%d", netdev->name);
19679 +
19680 + /* only register downlinks? */
19681 + for (i = 0; i < priv->attr.num_ifs + 1; i++) {
19682 + struct net_device *port_netdev;
19683 + struct evb_port_priv *port_priv;
19684 +
19685 + if (i) {
19686 + port_netdev =
19687 + alloc_etherdev(sizeof(struct evb_port_priv));
19688 + if (!port_netdev) {
19689 + dev_err(dev, "alloc_etherdev error\n");
19690 + goto err_takedown;
19691 + }
19692 +
19693 + port_priv = netdev_priv(port_netdev);
19694 +
19695 + port_netdev->flags |= IFF_PROMISC | IFF_SLAVE;
19696 +
19697 + dev_alloc_name(port_netdev, port_name);
19698 + } else {
19699 + port_netdev = netdev;
19700 + port_priv = &priv->uplink;
19701 + }
19702 +
19703 + port_priv->netdev = port_netdev;
19704 + port_priv->evb_priv = priv;
19705 + port_priv->port_index = i;
19706 +
19707 + SET_NETDEV_DEV(port_netdev, dev);
19708 +
19709 + if (i) {
19710 + port_netdev->netdev_ops = &evb_port_ops;
19711 +
19712 + err = register_netdev(port_netdev);
19713 + if (err < 0) {
19714 + dev_err(dev, "register_netdev err %d\n", err);
19715 + free_netdev(port_netdev);
19716 + goto err_takedown;
19717 + }
19718 +
19719 + rtnl_lock();
19720 + err = netdev_master_upper_dev_link(port_netdev, netdev,
19721 + NULL, NULL);
19722 + if (unlikely(err)) {
19723 + dev_err(dev, "netdev_master_upper_dev_link err %d\n",
19724 + err);
19725 + unregister_netdev(port_netdev);
19726 + free_netdev(port_netdev);
19727 + rtnl_unlock();
19728 + goto err_takedown;
19729 + }
19730 + rtmsg_ifinfo(RTM_NEWLINK, port_netdev,
19731 + IFF_SLAVE, GFP_KERNEL);
19732 + rtnl_unlock();
19733 +
19734 + list_add(&port_priv->list, &priv->port_list);
19735 + } else {
19736 + err = register_netdev(netdev);
19737 +
19738 + if (err < 0) {
19739 + dev_err(dev, "register_netdev error %d\n", err);
19740 + goto err_takedown;
19741 + }
19742 + }
19743 +
19744 + port_netdev->ethtool_ops = &evb_port_ethtool_ops;
19745 +
19746 + /* ports are up from init */
19747 + rtnl_lock();
19748 + err = dev_open(port_netdev);
19749 + rtnl_unlock();
19750 + if (unlikely(err))
19751 + dev_warn(dev, "dev_open err %d\n", err);
19752 + }
19753 +
19754 + /* setup irqs */
19755 + err = evb_setup_irqs(evb_dev);
19756 + if (unlikely(err)) {
19757 + dev_warn(dev, "evb_setup_irqs err %d\n", err);
19758 + goto err_takedown;
19759 + }
19760 +
19761 + dev_info(dev, "probed evb device with %d ports\n",
19762 + priv->attr.num_ifs);
19763 + return 0;
19764 +
19765 +err_takedown:
19766 + evb_remove(evb_dev);
19767 +err_free_cmdport:
19768 + fsl_mc_portal_free(priv->mc_io);
19769 +err_free_netdev:
19770 + return err;
19771 +}
19772 +
19773 +static const struct fsl_mc_device_id evb_match_id_table[] = {
19774 + {
19775 + .vendor = FSL_MC_VENDOR_FREESCALE,
19776 + .obj_type = "dpdmux",
19777 + },
19778 + {}
19779 +};
19780 +
19781 +static struct fsl_mc_driver evb_drv = {
19782 + .driver = {
19783 + .name = KBUILD_MODNAME,
19784 + .owner = THIS_MODULE,
19785 + },
19786 + .probe = evb_probe,
19787 + .remove = evb_remove,
19788 + .match_id_table = evb_match_id_table,
19789 +};
19790 +
19791 +module_fsl_mc_driver(evb_drv);
19792 +
19793 +MODULE_LICENSE("GPL");
19794 +MODULE_DESCRIPTION("Layerscape DPAA Edge Virtual Bridge driver (prototype)");
19795 diff --git a/drivers/staging/fsl-dpaa2/mac/Kconfig b/drivers/staging/fsl-dpaa2/mac/Kconfig
19796 new file mode 100644
19797 index 00000000..c94f7c1b
19798 --- /dev/null
19799 +++ b/drivers/staging/fsl-dpaa2/mac/Kconfig
19800 @@ -0,0 +1,23 @@
19801 +config FSL_DPAA2_MAC
19802 + tristate "DPAA2 MAC / PHY interface"
19803 + depends on FSL_MC_BUS && FSL_DPAA2
19804 + select MDIO_BUS_MUX_MMIOREG
19805 + select FSL_XGMAC_MDIO
19806 + select FIXED_PHY
19807 + ---help---
19808 + Prototype driver for DPAA2 MAC / PHY interface object.
19809 + This driver works as a proxy between phylib including phy drivers and
19810 + the MC firmware. It receives updates on link state changes from PHY
19811 + lib and forwards them to MC and receives interrupt from MC whenever
19812 + a request is made to change the link state.
19813 +
19814 +
19815 +config FSL_DPAA2_MAC_NETDEVS
19816 + bool "Expose net interfaces for PHYs"
19817 + default n
19818 + depends on FSL_DPAA2_MAC
19819 + ---help---
19820 + Exposes macX net interfaces which allow direct control over MACs and
19821 + PHYs.
19822 + .
19823 + Leave disabled if unsure.
19824 diff --git a/drivers/staging/fsl-dpaa2/mac/Makefile b/drivers/staging/fsl-dpaa2/mac/Makefile
19825 new file mode 100644
19826 index 00000000..bda94101
19827 --- /dev/null
19828 +++ b/drivers/staging/fsl-dpaa2/mac/Makefile
19829 @@ -0,0 +1,10 @@
19830 +
19831 +obj-$(CONFIG_FSL_DPAA2_MAC) += dpaa2-mac.o
19832 +
19833 +dpaa2-mac-objs := mac.o dpmac.o
19834 +
19835 +all:
19836 + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
19837 +
19838 +clean:
19839 + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
19840 diff --git a/drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h b/drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h
19841 new file mode 100644
19842 index 00000000..abdc3c0d
19843 --- /dev/null
19844 +++ b/drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h
19845 @@ -0,0 +1,172 @@
19846 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
19847 + *
19848 + * Redistribution and use in source and binary forms, with or without
19849 + * modification, are permitted provided that the following conditions are met:
19850 + * * Redistributions of source code must retain the above copyright
19851 + * notice, this list of conditions and the following disclaimer.
19852 + * * Redistributions in binary form must reproduce the above copyright
19853 + * notice, this list of conditions and the following disclaimer in the
19854 + * documentation and/or other materials provided with the distribution.
19855 + * * Neither the name of the above-listed copyright holders nor the
19856 + * names of any contributors may be used to endorse or promote products
19857 + * derived from this software without specific prior written permission.
19858 + *
19859 + *
19860 + * ALTERNATIVELY, this software may be distributed under the terms of the
19861 + * GNU General Public License ("GPL") as published by the Free Software
19862 + * Foundation, either version 2 of that License or (at your option) any
19863 + * later version.
19864 + *
19865 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19866 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19867 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19868 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
19869 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
19870 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19871 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
19872 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
19873 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
19874 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
19875 + * POSSIBILITY OF SUCH DAMAGE.
19876 + */
19877 +#ifndef _FSL_DPMAC_CMD_H
19878 +#define _FSL_DPMAC_CMD_H
19879 +
19880 +/* DPMAC Version */
19881 +#define DPMAC_VER_MAJOR 4
19882 +#define DPMAC_VER_MINOR 2
19883 +#define DPMAC_CMD_BASE_VERSION 1
19884 +#define DPMAC_CMD_ID_OFFSET 4
19885 +
19886 +#define DPMAC_CMD(id) (((id) << DPMAC_CMD_ID_OFFSET) | DPMAC_CMD_BASE_VERSION)
19887 +
19888 +/* Command IDs */
19889 +#define DPMAC_CMDID_CLOSE DPMAC_CMD(0x800)
19890 +#define DPMAC_CMDID_OPEN DPMAC_CMD(0x80c)
19891 +#define DPMAC_CMDID_CREATE DPMAC_CMD(0x90c)
19892 +#define DPMAC_CMDID_DESTROY DPMAC_CMD(0x98c)
19893 +#define DPMAC_CMDID_GET_API_VERSION DPMAC_CMD(0xa0c)
19894 +
19895 +#define DPMAC_CMDID_GET_ATTR DPMAC_CMD(0x004)
19896 +#define DPMAC_CMDID_RESET DPMAC_CMD(0x005)
19897 +
19898 +#define DPMAC_CMDID_SET_IRQ_ENABLE DPMAC_CMD(0x012)
19899 +#define DPMAC_CMDID_GET_IRQ_ENABLE DPMAC_CMD(0x013)
19900 +#define DPMAC_CMDID_SET_IRQ_MASK DPMAC_CMD(0x014)
19901 +#define DPMAC_CMDID_GET_IRQ_MASK DPMAC_CMD(0x015)
19902 +#define DPMAC_CMDID_GET_IRQ_STATUS DPMAC_CMD(0x016)
19903 +#define DPMAC_CMDID_CLEAR_IRQ_STATUS DPMAC_CMD(0x017)
19904 +
19905 +#define DPMAC_CMDID_GET_LINK_CFG DPMAC_CMD(0x0c2)
19906 +#define DPMAC_CMDID_SET_LINK_STATE DPMAC_CMD(0x0c3)
19907 +#define DPMAC_CMDID_GET_COUNTER DPMAC_CMD(0x0c4)
19908 +
19909 +#define DPMAC_CMDID_SET_PORT_MAC_ADDR DPMAC_CMD(0x0c5)
19910 +
19911 +/* Macros for accessing command fields smaller than 1byte */
19912 +#define DPMAC_MASK(field) \
19913 + GENMASK(DPMAC_##field##_SHIFT + DPMAC_##field##_SIZE - 1, \
19914 + DPMAC_##field##_SHIFT)
19915 +#define dpmac_set_field(var, field, val) \
19916 + ((var) |= (((val) << DPMAC_##field##_SHIFT) & DPMAC_MASK(field)))
19917 +#define dpmac_get_field(var, field) \
19918 + (((var) & DPMAC_MASK(field)) >> DPMAC_##field##_SHIFT)
19919 +
19920 +struct dpmac_cmd_open {
19921 + u32 dpmac_id;
19922 +};
19923 +
19924 +struct dpmac_cmd_create {
19925 + u32 mac_id;
19926 +};
19927 +
19928 +struct dpmac_cmd_destroy {
19929 + u32 dpmac_id;
19930 +};
19931 +
19932 +struct dpmac_cmd_set_irq_enable {
19933 + u8 enable;
19934 + u8 pad[3];
19935 + u8 irq_index;
19936 +};
19937 +
19938 +struct dpmac_cmd_get_irq_enable {
19939 + u32 pad;
19940 + u8 irq_index;
19941 +};
19942 +
19943 +struct dpmac_rsp_get_irq_enable {
19944 + u8 enabled;
19945 +};
19946 +
19947 +struct dpmac_cmd_set_irq_mask {
19948 + u32 mask;
19949 + u8 irq_index;
19950 +};
19951 +
19952 +struct dpmac_cmd_get_irq_mask {
19953 + u32 pad;
19954 + u8 irq_index;
19955 +};
19956 +
19957 +struct dpmac_rsp_get_irq_mask {
19958 + u32 mask;
19959 +};
19960 +
19961 +struct dpmac_cmd_get_irq_status {
19962 + u32 status;
19963 + u8 irq_index;
19964 +};
19965 +
19966 +struct dpmac_rsp_get_irq_status {
19967 + u32 status;
19968 +};
19969 +
19970 +struct dpmac_cmd_clear_irq_status {
19971 + u32 status;
19972 + u8 irq_index;
19973 +};
19974 +
19975 +struct dpmac_rsp_get_attributes {
19976 + u8 eth_if;
19977 + u8 link_type;
19978 + u16 id;
19979 + u32 max_rate;
19980 +};
19981 +
19982 +struct dpmac_rsp_get_link_cfg {
19983 + u64 options;
19984 + u32 rate;
19985 +};
19986 +
19987 +#define DPMAC_STATE_SIZE 1
19988 +#define DPMAC_STATE_SHIFT 0
19989 +
19990 +struct dpmac_cmd_set_link_state {
19991 + u64 options;
19992 + u32 rate;
19993 + u32 pad;
19994 + /* only least significant bit is valid */
19995 + u8 up;
19996 +};
19997 +
19998 +struct dpmac_cmd_get_counter {
19999 + u8 type;
20000 +};
20001 +
20002 +struct dpmac_rsp_get_counter {
20003 + u64 pad;
20004 + u64 counter;
20005 +};
20006 +
20007 +struct dpmac_rsp_get_api_version {
20008 + u16 major;
20009 + u16 minor;
20010 +};
20011 +
20012 +struct dpmac_cmd_set_port_mac_addr {
20013 + u8 pad[2];
20014 + u8 addr[6];
20015 +};
20016 +
20017 +#endif /* _FSL_DPMAC_CMD_H */
20018 diff --git a/drivers/staging/fsl-dpaa2/mac/dpmac.c b/drivers/staging/fsl-dpaa2/mac/dpmac.c
20019 new file mode 100644
20020 index 00000000..f7827423
20021 --- /dev/null
20022 +++ b/drivers/staging/fsl-dpaa2/mac/dpmac.c
20023 @@ -0,0 +1,620 @@
20024 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
20025 + *
20026 + * Redistribution and use in source and binary forms, with or without
20027 + * modification, are permitted provided that the following conditions are met:
20028 + * * Redistributions of source code must retain the above copyright
20029 + * notice, this list of conditions and the following disclaimer.
20030 + * * Redistributions in binary form must reproduce the above copyright
20031 + * notice, this list of conditions and the following disclaimer in the
20032 + * documentation and/or other materials provided with the distribution.
20033 + * * Neither the name of the above-listed copyright holders nor the
20034 + * names of any contributors may be used to endorse or promote products
20035 + * derived from this software without specific prior written permission.
20036 + *
20037 + *
20038 + * ALTERNATIVELY, this software may be distributed under the terms of the
20039 + * GNU General Public License ("GPL") as published by the Free Software
20040 + * Foundation, either version 2 of that License or (at your option) any
20041 + * later version.
20042 + *
20043 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20044 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20045 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20046 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
20047 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20048 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
20049 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20050 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
20051 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
20052 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
20053 + * POSSIBILITY OF SUCH DAMAGE.
20054 + */
20055 +#include "../../fsl-mc/include/mc-sys.h"
20056 +#include "../../fsl-mc/include/mc-cmd.h"
20057 +#include "dpmac.h"
20058 +#include "dpmac-cmd.h"
20059 +
20060 +/**
20061 + * dpmac_open() - Open a control session for the specified object.
20062 + * @mc_io: Pointer to MC portal's I/O object
20063 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20064 + * @dpmac_id: DPMAC unique ID
20065 + * @token: Returned token; use in subsequent API calls
20066 + *
20067 + * This function can be used to open a control session for an
20068 + * already created object; an object may have been declared in
20069 + * the DPL or by calling the dpmac_create function.
20070 + * This function returns a unique authentication token,
20071 + * associated with the specific object ID and the specific MC
20072 + * portal; this token must be used in all subsequent commands for
20073 + * this specific object
20074 + *
20075 + * Return: '0' on Success; Error code otherwise.
20076 + */
20077 +int dpmac_open(struct fsl_mc_io *mc_io,
20078 + u32 cmd_flags,
20079 + int dpmac_id,
20080 + u16 *token)
20081 +{
20082 + struct dpmac_cmd_open *cmd_params;
20083 + struct mc_command cmd = { 0 };
20084 + int err;
20085 +
20086 + /* prepare command */
20087 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_OPEN,
20088 + cmd_flags,
20089 + 0);
20090 + cmd_params = (struct dpmac_cmd_open *)cmd.params;
20091 + cmd_params->dpmac_id = cpu_to_le32(dpmac_id);
20092 +
20093 + /* send command to mc*/
20094 + err = mc_send_command(mc_io, &cmd);
20095 + if (err)
20096 + return err;
20097 +
20098 + /* retrieve response parameters */
20099 + *token = mc_cmd_hdr_read_token(&cmd);
20100 +
20101 + return err;
20102 +}
20103 +
20104 +/**
20105 + * dpmac_close() - Close the control session of the object
20106 + * @mc_io: Pointer to MC portal's I/O object
20107 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20108 + * @token: Token of DPMAC object
20109 + *
20110 + * After this function is called, no further operations are
20111 + * allowed on the object without opening a new control session.
20112 + *
20113 + * Return: '0' on Success; Error code otherwise.
20114 + */
20115 +int dpmac_close(struct fsl_mc_io *mc_io,
20116 + u32 cmd_flags,
20117 + u16 token)
20118 +{
20119 + struct mc_command cmd = { 0 };
20120 +
20121 + /* prepare command */
20122 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLOSE, cmd_flags,
20123 + token);
20124 +
20125 + /* send command to mc*/
20126 + return mc_send_command(mc_io, &cmd);
20127 +}
20128 +
20129 +/**
20130 + * dpmac_create() - Create the DPMAC object.
20131 + * @mc_io: Pointer to MC portal's I/O object
20132 + * @dprc_token: Parent container token; '0' for default container
20133 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20134 + * @cfg: Configuration structure
20135 + * @obj_id: Returned object id
20136 + *
20137 + * Create the DPMAC object, allocate required resources and
20138 + * perform required initialization.
20139 + *
20140 + * The function accepts an authentication token of a parent
20141 + * container that this object should be assigned to. The token
20142 + * can be '0' so the object will be assigned to the default container.
20143 + * The newly created object can be opened with the returned
20144 + * object id and using the container's associated tokens and MC portals.
20145 + *
20146 + * Return: '0' on Success; Error code otherwise.
20147 + */
20148 +int dpmac_create(struct fsl_mc_io *mc_io,
20149 + u16 dprc_token,
20150 + u32 cmd_flags,
20151 + const struct dpmac_cfg *cfg,
20152 + u32 *obj_id)
20153 +{
20154 + struct dpmac_cmd_create *cmd_params;
20155 + struct mc_command cmd = { 0 };
20156 + int err;
20157 +
20158 + /* prepare command */
20159 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CREATE,
20160 + cmd_flags,
20161 + dprc_token);
20162 + cmd_params = (struct dpmac_cmd_create *)cmd.params;
20163 + cmd_params->mac_id = cpu_to_le32(cfg->mac_id);
20164 +
20165 + /* send command to mc*/
20166 + err = mc_send_command(mc_io, &cmd);
20167 + if (err)
20168 + return err;
20169 +
20170 + /* retrieve response parameters */
20171 + *obj_id = mc_cmd_read_object_id(&cmd);
20172 +
20173 + return 0;
20174 +}
20175 +
20176 +/**
20177 + * dpmac_destroy() - Destroy the DPMAC object and release all its resources.
20178 + * @mc_io: Pointer to MC portal's I/O object
20179 + * @dprc_token: Parent container token; '0' for default container
20180 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20181 + * @object_id: The object id; it must be a valid id within the container that
20182 + * created this object;
20183 + *
20184 + * The function accepts the authentication token of the parent container that
20185 + * created the object (not the one that currently owns the object). The object
20186 + * is searched within parent using the provided 'object_id'.
20187 + * All tokens to the object must be closed before calling destroy.
20188 + *
20189 + * Return: '0' on Success; error code otherwise.
20190 + */
20191 +int dpmac_destroy(struct fsl_mc_io *mc_io,
20192 + u16 dprc_token,
20193 + u32 cmd_flags,
20194 + u32 object_id)
20195 +{
20196 + struct dpmac_cmd_destroy *cmd_params;
20197 + struct mc_command cmd = { 0 };
20198 +
20199 + /* prepare command */
20200 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_DESTROY,
20201 + cmd_flags,
20202 + dprc_token);
20203 + cmd_params = (struct dpmac_cmd_destroy *)cmd.params;
20204 + cmd_params->dpmac_id = cpu_to_le32(object_id);
20205 +
20206 + /* send command to mc*/
20207 + return mc_send_command(mc_io, &cmd);
20208 +}
20209 +
20210 +/**
20211 + * dpmac_set_irq_enable() - Set overall interrupt state.
20212 + * @mc_io: Pointer to MC portal's I/O object
20213 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20214 + * @token: Token of DPMAC object
20215 + * @irq_index: The interrupt index to configure
20216 + * @en: Interrupt state - enable = 1, disable = 0
20217 + *
20218 + * Allows GPP software to control when interrupts are generated.
20219 + * Each interrupt can have up to 32 causes. The enable/disable control's the
20220 + * overall interrupt state. if the interrupt is disabled no causes will cause
20221 + * an interrupt.
20222 + *
20223 + * Return: '0' on Success; Error code otherwise.
20224 + */
20225 +int dpmac_set_irq_enable(struct fsl_mc_io *mc_io,
20226 + u32 cmd_flags,
20227 + u16 token,
20228 + u8 irq_index,
20229 + u8 en)
20230 +{
20231 + struct dpmac_cmd_set_irq_enable *cmd_params;
20232 + struct mc_command cmd = { 0 };
20233 +
20234 + /* prepare command */
20235 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_ENABLE,
20236 + cmd_flags,
20237 + token);
20238 + cmd_params = (struct dpmac_cmd_set_irq_enable *)cmd.params;
20239 + cmd_params->irq_index = irq_index;
20240 + cmd_params->enable = en;
20241 +
20242 + /* send command to mc*/
20243 + return mc_send_command(mc_io, &cmd);
20244 +}
20245 +
20246 +/**
20247 + * dpmac_get_irq_enable() - Get overall interrupt state
20248 + * @mc_io: Pointer to MC portal's I/O object
20249 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20250 + * @token: Token of DPMAC object
20251 + * @irq_index: The interrupt index to configure
20252 + * @en: Returned interrupt state - enable = 1, disable = 0
20253 + *
20254 + * Return: '0' on Success; Error code otherwise.
20255 + */
20256 +int dpmac_get_irq_enable(struct fsl_mc_io *mc_io,
20257 + u32 cmd_flags,
20258 + u16 token,
20259 + u8 irq_index,
20260 + u8 *en)
20261 +{
20262 + struct dpmac_cmd_get_irq_enable *cmd_params;
20263 + struct dpmac_rsp_get_irq_enable *rsp_params;
20264 + struct mc_command cmd = { 0 };
20265 + int err;
20266 +
20267 + /* prepare command */
20268 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_ENABLE,
20269 + cmd_flags,
20270 + token);
20271 + cmd_params = (struct dpmac_cmd_get_irq_enable *)cmd.params;
20272 + cmd_params->irq_index = irq_index;
20273 +
20274 + /* send command to mc*/
20275 + err = mc_send_command(mc_io, &cmd);
20276 + if (err)
20277 + return err;
20278 +
20279 + /* retrieve response parameters */
20280 + rsp_params = (struct dpmac_rsp_get_irq_enable *)cmd.params;
20281 + *en = rsp_params->enabled;
20282 +
20283 + return 0;
20284 +}
20285 +
20286 +/**
20287 + * dpmac_set_irq_mask() - Set interrupt mask.
20288 + * @mc_io: Pointer to MC portal's I/O object
20289 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20290 + * @token: Token of DPMAC object
20291 + * @irq_index: The interrupt index to configure
20292 + * @mask: Event mask to trigger interrupt;
20293 + * each bit:
20294 + * 0 = ignore event
20295 + * 1 = consider event for asserting IRQ
20296 + *
20297 + * Every interrupt can have up to 32 causes and the interrupt model supports
20298 + * masking/unmasking each cause independently
20299 + *
20300 + * Return: '0' on Success; Error code otherwise.
20301 + */
20302 +int dpmac_set_irq_mask(struct fsl_mc_io *mc_io,
20303 + u32 cmd_flags,
20304 + u16 token,
20305 + u8 irq_index,
20306 + u32 mask)
20307 +{
20308 + struct dpmac_cmd_set_irq_mask *cmd_params;
20309 + struct mc_command cmd = { 0 };
20310 +
20311 + /* prepare command */
20312 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_MASK,
20313 + cmd_flags,
20314 + token);
20315 + cmd_params = (struct dpmac_cmd_set_irq_mask *)cmd.params;
20316 + cmd_params->mask = cpu_to_le32(mask);
20317 + cmd_params->irq_index = irq_index;
20318 +
20319 + /* send command to mc*/
20320 + return mc_send_command(mc_io, &cmd);
20321 +}
20322 +
20323 +/**
20324 + * dpmac_get_irq_mask() - Get interrupt mask.
20325 + * @mc_io: Pointer to MC portal's I/O object
20326 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20327 + * @token: Token of DPMAC object
20328 + * @irq_index: The interrupt index to configure
20329 + * @mask: Returned event mask to trigger interrupt
20330 + *
20331 + * Every interrupt can have up to 32 causes and the interrupt model supports
20332 + * masking/unmasking each cause independently
20333 + *
20334 + * Return: '0' on Success; Error code otherwise.
20335 + */
20336 +int dpmac_get_irq_mask(struct fsl_mc_io *mc_io,
20337 + u32 cmd_flags,
20338 + u16 token,
20339 + u8 irq_index,
20340 + u32 *mask)
20341 +{
20342 + struct dpmac_cmd_get_irq_mask *cmd_params;
20343 + struct dpmac_rsp_get_irq_mask *rsp_params;
20344 + struct mc_command cmd = { 0 };
20345 + int err;
20346 +
20347 + /* prepare command */
20348 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_MASK,
20349 + cmd_flags,
20350 + token);
20351 + cmd_params = (struct dpmac_cmd_get_irq_mask *)cmd.params;
20352 + cmd_params->irq_index = irq_index;
20353 +
20354 + /* send command to mc*/
20355 + err = mc_send_command(mc_io, &cmd);
20356 + if (err)
20357 + return err;
20358 +
20359 + /* retrieve response parameters */
20360 + rsp_params = (struct dpmac_rsp_get_irq_mask *)cmd.params;
20361 + *mask = le32_to_cpu(rsp_params->mask);
20362 +
20363 + return 0;
20364 +}
20365 +
20366 +/**
20367 + * dpmac_get_irq_status() - Get the current status of any pending interrupts.
20368 + *
20369 + * @mc_io: Pointer to MC portal's I/O object
20370 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20371 + * @token: Token of DPMAC object
20372 + * @irq_index: The interrupt index to configure
20373 + * @status: Returned interrupts status - one bit per cause:
20374 + * 0 = no interrupt pending
20375 + * 1 = interrupt pending
20376 + *
20377 + * Return: '0' on Success; Error code otherwise.
20378 + */
20379 +int dpmac_get_irq_status(struct fsl_mc_io *mc_io,
20380 + u32 cmd_flags,
20381 + u16 token,
20382 + u8 irq_index,
20383 + u32 *status)
20384 +{
20385 + struct dpmac_cmd_get_irq_status *cmd_params;
20386 + struct dpmac_rsp_get_irq_status *rsp_params;
20387 + struct mc_command cmd = { 0 };
20388 + int err;
20389 +
20390 + /* prepare command */
20391 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_STATUS,
20392 + cmd_flags,
20393 + token);
20394 + cmd_params = (struct dpmac_cmd_get_irq_status *)cmd.params;
20395 + cmd_params->status = cpu_to_le32(*status);
20396 + cmd_params->irq_index = irq_index;
20397 +
20398 + /* send command to mc*/
20399 + err = mc_send_command(mc_io, &cmd);
20400 + if (err)
20401 + return err;
20402 +
20403 + /* retrieve response parameters */
20404 + rsp_params = (struct dpmac_rsp_get_irq_status *)cmd.params;
20405 + *status = le32_to_cpu(rsp_params->status);
20406 +
20407 + return 0;
20408 +}
20409 +
20410 +/**
20411 + * dpmac_clear_irq_status() - Clear a pending interrupt's status
20412 + *
20413 + * @mc_io: Pointer to MC portal's I/O object
20414 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20415 + * @token: Token of DPMAC object
20416 + * @irq_index: The interrupt index to configure
20417 + * @status: Bits to clear (W1C) - one bit per cause:
20418 + * 0 = don't change
20419 + * 1 = clear status bit
20420 + *
20421 + * Return: '0' on Success; Error code otherwise.
20422 + */
20423 +int dpmac_clear_irq_status(struct fsl_mc_io *mc_io,
20424 + u32 cmd_flags,
20425 + u16 token,
20426 + u8 irq_index,
20427 + u32 status)
20428 +{
20429 + struct dpmac_cmd_clear_irq_status *cmd_params;
20430 + struct mc_command cmd = { 0 };
20431 +
20432 + /* prepare command */
20433 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLEAR_IRQ_STATUS,
20434 + cmd_flags,
20435 + token);
20436 + cmd_params = (struct dpmac_cmd_clear_irq_status *)cmd.params;
20437 + cmd_params->status = cpu_to_le32(status);
20438 + cmd_params->irq_index = irq_index;
20439 +
20440 + /* send command to mc*/
20441 + return mc_send_command(mc_io, &cmd);
20442 +}
20443 +
20444 +/**
20445 + * dpmac_get_attributes - Retrieve DPMAC attributes.
20446 + *
20447 + * @mc_io: Pointer to MC portal's I/O object
20448 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20449 + * @token: Token of DPMAC object
20450 + * @attr: Returned object's attributes
20451 + *
20452 + * Return: '0' on Success; Error code otherwise.
20453 + */
20454 +int dpmac_get_attributes(struct fsl_mc_io *mc_io,
20455 + u32 cmd_flags,
20456 + u16 token,
20457 + struct dpmac_attr *attr)
20458 +{
20459 + struct dpmac_rsp_get_attributes *rsp_params;
20460 + struct mc_command cmd = { 0 };
20461 + int err;
20462 +
20463 + /* prepare command */
20464 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_ATTR,
20465 + cmd_flags,
20466 + token);
20467 +
20468 + /* send command to mc*/
20469 + err = mc_send_command(mc_io, &cmd);
20470 + if (err)
20471 + return err;
20472 +
20473 + /* retrieve response parameters */
20474 + rsp_params = (struct dpmac_rsp_get_attributes *)cmd.params;
20475 + attr->eth_if = rsp_params->eth_if;
20476 + attr->link_type = rsp_params->link_type;
20477 + attr->id = le16_to_cpu(rsp_params->id);
20478 + attr->max_rate = le32_to_cpu(rsp_params->max_rate);
20479 +
20480 + return 0;
20481 +}
20482 +
20483 +/**
20484 + * dpmac_get_link_cfg() - Get Ethernet link configuration
20485 + * @mc_io: Pointer to opaque I/O object
20486 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20487 + * @token: Token of DPMAC object
20488 + * @cfg: Returned structure with the link configuration
20489 + *
20490 + * Return: '0' on Success; Error code otherwise.
20491 + */
20492 +int dpmac_get_link_cfg(struct fsl_mc_io *mc_io,
20493 + u32 cmd_flags,
20494 + u16 token,
20495 + struct dpmac_link_cfg *cfg)
20496 +{
20497 + struct dpmac_rsp_get_link_cfg *rsp_params;
20498 + struct mc_command cmd = { 0 };
20499 + int err = 0;
20500 +
20501 + /* prepare command */
20502 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_LINK_CFG,
20503 + cmd_flags,
20504 + token);
20505 +
20506 + /* send command to mc*/
20507 + err = mc_send_command(mc_io, &cmd);
20508 + if (err)
20509 + return err;
20510 +
20511 + rsp_params = (struct dpmac_rsp_get_link_cfg *)cmd.params;
20512 + cfg->options = le64_to_cpu(rsp_params->options);
20513 + cfg->rate = le32_to_cpu(rsp_params->rate);
20514 +
20515 + return 0;
20516 +}
20517 +
20518 +/**
20519 + * dpmac_set_link_state() - Set the Ethernet link status
20520 + * @mc_io: Pointer to opaque I/O object
20521 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20522 + * @token: Token of DPMAC object
20523 + * @link_state: Link state configuration
20524 + *
20525 + * Return: '0' on Success; Error code otherwise.
20526 + */
20527 +int dpmac_set_link_state(struct fsl_mc_io *mc_io,
20528 + u32 cmd_flags,
20529 + u16 token,
20530 + struct dpmac_link_state *link_state)
20531 +{
20532 + struct dpmac_cmd_set_link_state *cmd_params;
20533 + struct mc_command cmd = { 0 };
20534 +
20535 + /* prepare command */
20536 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_LINK_STATE,
20537 + cmd_flags,
20538 + token);
20539 + cmd_params = (struct dpmac_cmd_set_link_state *)cmd.params;
20540 + cmd_params->options = cpu_to_le64(link_state->options);
20541 + cmd_params->rate = cpu_to_le32(link_state->rate);
20542 + cmd_params->up = dpmac_get_field(link_state->up, STATE);
20543 +
20544 + /* send command to mc*/
20545 + return mc_send_command(mc_io, &cmd);
20546 +}
20547 +
20548 +/**
20549 + * dpmac_get_counter() - Read a specific DPMAC counter
20550 + * @mc_io: Pointer to opaque I/O object
20551 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20552 + * @token: Token of DPMAC object
20553 + * @type: The requested counter
20554 + * @counter: Returned counter value
20555 + *
20556 + * Return: The requested counter; '0' otherwise.
20557 + */
20558 +int dpmac_get_counter(struct fsl_mc_io *mc_io,
20559 + u32 cmd_flags,
20560 + u16 token,
20561 + enum dpmac_counter type,
20562 + u64 *counter)
20563 +{
20564 + struct dpmac_cmd_get_counter *dpmac_cmd;
20565 + struct dpmac_rsp_get_counter *dpmac_rsp;
20566 + struct mc_command cmd = { 0 };
20567 + int err = 0;
20568 +
20569 + /* prepare command */
20570 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_COUNTER,
20571 + cmd_flags,
20572 + token);
20573 + dpmac_cmd = (struct dpmac_cmd_get_counter *)cmd.params;
20574 + dpmac_cmd->type = type;
20575 +
20576 + /* send command to mc*/
20577 + err = mc_send_command(mc_io, &cmd);
20578 + if (err)
20579 + return err;
20580 +
20581 + dpmac_rsp = (struct dpmac_rsp_get_counter *)cmd.params;
20582 + *counter = le64_to_cpu(dpmac_rsp->counter);
20583 +
20584 + return 0;
20585 +}
20586 +
20587 +/* untested */
20588 +int dpmac_set_port_mac_addr(struct fsl_mc_io *mc_io,
20589 + u32 cmd_flags,
20590 + u16 token,
20591 + const u8 addr[6])
20592 +{
20593 + struct dpmac_cmd_set_port_mac_addr *dpmac_cmd;
20594 + struct mc_command cmd = { 0 };
20595 +
20596 + /* prepare command */
20597 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_PORT_MAC_ADDR,
20598 + cmd_flags,
20599 + token);
20600 + dpmac_cmd = (struct dpmac_cmd_set_port_mac_addr *)cmd.params;
20601 + dpmac_cmd->addr[0] = addr[5];
20602 + dpmac_cmd->addr[1] = addr[4];
20603 + dpmac_cmd->addr[2] = addr[3];
20604 + dpmac_cmd->addr[3] = addr[2];
20605 + dpmac_cmd->addr[4] = addr[1];
20606 + dpmac_cmd->addr[5] = addr[0];
20607 +
20608 + /* send command to mc*/
20609 + return mc_send_command(mc_io, &cmd);
20610 +}
20611 +
20612 +/**
20613 + * dpmac_get_api_version() - Get Data Path MAC version
20614 + * @mc_io: Pointer to MC portal's I/O object
20615 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20616 + * @major_ver: Major version of data path mac API
20617 + * @minor_ver: Minor version of data path mac API
20618 + *
20619 + * Return: '0' on Success; Error code otherwise.
20620 + */
20621 +int dpmac_get_api_version(struct fsl_mc_io *mc_io,
20622 + u32 cmd_flags,
20623 + u16 *major_ver,
20624 + u16 *minor_ver)
20625 +{
20626 + struct dpmac_rsp_get_api_version *rsp_params;
20627 + struct mc_command cmd = { 0 };
20628 + int err;
20629 +
20630 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_API_VERSION,
20631 + cmd_flags,
20632 + 0);
20633 +
20634 + err = mc_send_command(mc_io, &cmd);
20635 + if (err)
20636 + return err;
20637 +
20638 + rsp_params = (struct dpmac_rsp_get_api_version *)cmd.params;
20639 + *major_ver = le16_to_cpu(rsp_params->major);
20640 + *minor_ver = le16_to_cpu(rsp_params->minor);
20641 +
20642 + return 0;
20643 +}
20644 diff --git a/drivers/staging/fsl-dpaa2/mac/dpmac.h b/drivers/staging/fsl-dpaa2/mac/dpmac.h
20645 new file mode 100644
20646 index 00000000..32d4ada2
20647 --- /dev/null
20648 +++ b/drivers/staging/fsl-dpaa2/mac/dpmac.h
20649 @@ -0,0 +1,342 @@
20650 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
20651 + *
20652 + * Redistribution and use in source and binary forms, with or without
20653 + * modification, are permitted provided that the following conditions are met:
20654 + * * Redistributions of source code must retain the above copyright
20655 + * notice, this list of conditions and the following disclaimer.
20656 + * * Redistributions in binary form must reproduce the above copyright
20657 + * notice, this list of conditions and the following disclaimer in the
20658 + * documentation and/or other materials provided with the distribution.
20659 + * * Neither the name of the above-listed copyright holders nor the
20660 + * names of any contributors may be used to endorse or promote products
20661 + * derived from this software without specific prior written permission.
20662 + *
20663 + *
20664 + * ALTERNATIVELY, this software may be distributed under the terms of the
20665 + * GNU General Public License ("GPL") as published by the Free Software
20666 + * Foundation, either version 2 of that License or (at your option) any
20667 + * later version.
20668 + *
20669 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20670 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20671 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20672 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
20673 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20674 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
20675 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20676 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
20677 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
20678 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
20679 + * POSSIBILITY OF SUCH DAMAGE.
20680 + */
20681 +#ifndef __FSL_DPMAC_H
20682 +#define __FSL_DPMAC_H
20683 +
20684 +/* Data Path MAC API
20685 + * Contains initialization APIs and runtime control APIs for DPMAC
20686 + */
20687 +
20688 +struct fsl_mc_io;
20689 +
20690 +int dpmac_open(struct fsl_mc_io *mc_io,
20691 + u32 cmd_flags,
20692 + int dpmac_id,
20693 + u16 *token);
20694 +
20695 +int dpmac_close(struct fsl_mc_io *mc_io,
20696 + u32 cmd_flags,
20697 + u16 token);
20698 +
20699 +/**
20700 + * enum dpmac_link_type - DPMAC link type
20701 + * @DPMAC_LINK_TYPE_NONE: No link
20702 + * @DPMAC_LINK_TYPE_FIXED: Link is fixed type
20703 + * @DPMAC_LINK_TYPE_PHY: Link by PHY ID
20704 + * @DPMAC_LINK_TYPE_BACKPLANE: Backplane link type
20705 + */
20706 +enum dpmac_link_type {
20707 + DPMAC_LINK_TYPE_NONE,
20708 + DPMAC_LINK_TYPE_FIXED,
20709 + DPMAC_LINK_TYPE_PHY,
20710 + DPMAC_LINK_TYPE_BACKPLANE
20711 +};
20712 +
20713 +/**
20714 + * enum dpmac_eth_if - DPMAC Ethrnet interface
20715 + * @DPMAC_ETH_IF_MII: MII interface
20716 + * @DPMAC_ETH_IF_RMII: RMII interface
20717 + * @DPMAC_ETH_IF_SMII: SMII interface
20718 + * @DPMAC_ETH_IF_GMII: GMII interface
20719 + * @DPMAC_ETH_IF_RGMII: RGMII interface
20720 + * @DPMAC_ETH_IF_SGMII: SGMII interface
20721 + * @DPMAC_ETH_IF_QSGMII: QSGMII interface
20722 + * @DPMAC_ETH_IF_XAUI: XAUI interface
20723 + * @DPMAC_ETH_IF_XFI: XFI interface
20724 + */
20725 +enum dpmac_eth_if {
20726 + DPMAC_ETH_IF_MII,
20727 + DPMAC_ETH_IF_RMII,
20728 + DPMAC_ETH_IF_SMII,
20729 + DPMAC_ETH_IF_GMII,
20730 + DPMAC_ETH_IF_RGMII,
20731 + DPMAC_ETH_IF_SGMII,
20732 + DPMAC_ETH_IF_QSGMII,
20733 + DPMAC_ETH_IF_XAUI,
20734 + DPMAC_ETH_IF_XFI
20735 +};
20736 +
20737 +/**
20738 + * struct dpmac_cfg - Structure representing DPMAC configuration
20739 + * @mac_id: Represents the Hardware MAC ID; in case of multiple WRIOP,
20740 + * the MAC IDs are continuous.
20741 + * For example: 2 WRIOPs, 16 MACs in each:
20742 + * MAC IDs for the 1st WRIOP: 1-16,
20743 + * MAC IDs for the 2nd WRIOP: 17-32.
20744 + */
20745 +struct dpmac_cfg {
20746 + u16 mac_id;
20747 +};
20748 +
20749 +int dpmac_create(struct fsl_mc_io *mc_io,
20750 + u16 dprc_token,
20751 + u32 cmd_flags,
20752 + const struct dpmac_cfg *cfg,
20753 + u32 *obj_id);
20754 +
20755 +int dpmac_destroy(struct fsl_mc_io *mc_io,
20756 + u16 dprc_token,
20757 + u32 cmd_flags,
20758 + u32 object_id);
20759 +
20760 +/**
20761 + * DPMAC IRQ Index and Events
20762 + */
20763 +
20764 +/**
20765 + * IRQ index
20766 + */
20767 +#define DPMAC_IRQ_INDEX 0
20768 +/**
20769 + * IRQ event - indicates a change in link state
20770 + */
20771 +#define DPMAC_IRQ_EVENT_LINK_CFG_REQ 0x00000001
20772 +/**
20773 + * IRQ event - Indicates that the link state changed
20774 + */
20775 +#define DPMAC_IRQ_EVENT_LINK_CHANGED 0x00000002
20776 +
20777 +int dpmac_set_irq_enable(struct fsl_mc_io *mc_io,
20778 + u32 cmd_flags,
20779 + u16 token,
20780 + u8 irq_index,
20781 + u8 en);
20782 +
20783 +int dpmac_get_irq_enable(struct fsl_mc_io *mc_io,
20784 + u32 cmd_flags,
20785 + u16 token,
20786 + u8 irq_index,
20787 + u8 *en);
20788 +
20789 +int dpmac_set_irq_mask(struct fsl_mc_io *mc_io,
20790 + u32 cmd_flags,
20791 + u16 token,
20792 + u8 irq_index,
20793 + u32 mask);
20794 +
20795 +int dpmac_get_irq_mask(struct fsl_mc_io *mc_io,
20796 + u32 cmd_flags,
20797 + u16 token,
20798 + u8 irq_index,
20799 + u32 *mask);
20800 +
20801 +int dpmac_get_irq_status(struct fsl_mc_io *mc_io,
20802 + u32 cmd_flags,
20803 + u16 token,
20804 + u8 irq_index,
20805 + u32 *status);
20806 +
20807 +int dpmac_clear_irq_status(struct fsl_mc_io *mc_io,
20808 + u32 cmd_flags,
20809 + u16 token,
20810 + u8 irq_index,
20811 + u32 status);
20812 +
20813 +/**
20814 + * struct dpmac_attr - Structure representing DPMAC attributes
20815 + * @id: DPMAC object ID
20816 + * @max_rate: Maximum supported rate - in Mbps
20817 + * @eth_if: Ethernet interface
20818 + * @link_type: link type
20819 + */
20820 +struct dpmac_attr {
20821 + u16 id;
20822 + u32 max_rate;
20823 + enum dpmac_eth_if eth_if;
20824 + enum dpmac_link_type link_type;
20825 +};
20826 +
20827 +int dpmac_get_attributes(struct fsl_mc_io *mc_io,
20828 + u32 cmd_flags,
20829 + u16 token,
20830 + struct dpmac_attr *attr);
20831 +
20832 +/**
20833 + * DPMAC link configuration/state options
20834 + */
20835 +
20836 +/**
20837 + * Enable auto-negotiation
20838 + */
20839 +#define DPMAC_LINK_OPT_AUTONEG 0x0000000000000001ULL
20840 +/**
20841 + * Enable half-duplex mode
20842 + */
20843 +#define DPMAC_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
20844 +/**
20845 + * Enable pause frames
20846 + */
20847 +#define DPMAC_LINK_OPT_PAUSE 0x0000000000000004ULL
20848 +/**
20849 + * Enable a-symmetric pause frames
20850 + */
20851 +#define DPMAC_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
20852 +
20853 +/**
20854 + * struct dpmac_link_cfg - Structure representing DPMAC link configuration
20855 + * @rate: Link's rate - in Mbps
20856 + * @options: Enable/Disable DPMAC link cfg features (bitmap)
20857 + */
20858 +struct dpmac_link_cfg {
20859 + u32 rate;
20860 + u64 options;
20861 +};
20862 +
20863 +int dpmac_get_link_cfg(struct fsl_mc_io *mc_io,
20864 + u32 cmd_flags,
20865 + u16 token,
20866 + struct dpmac_link_cfg *cfg);
20867 +
20868 +/**
20869 + * struct dpmac_link_state - DPMAC link configuration request
20870 + * @rate: Rate in Mbps
20871 + * @options: Enable/Disable DPMAC link cfg features (bitmap)
20872 + * @up: Link state
20873 + */
20874 +struct dpmac_link_state {
20875 + u32 rate;
20876 + u64 options;
20877 + int up;
20878 +};
20879 +
20880 +int dpmac_set_link_state(struct fsl_mc_io *mc_io,
20881 + u32 cmd_flags,
20882 + u16 token,
20883 + struct dpmac_link_state *link_state);
20884 +
20885 +/**
20886 + * enum dpmac_counter - DPMAC counter types
20887 + * @DPMAC_CNT_ING_FRAME_64: counts 64-bytes frames, good or bad.
20888 + * @DPMAC_CNT_ING_FRAME_127: counts 65- to 127-bytes frames, good or bad.
20889 + * @DPMAC_CNT_ING_FRAME_255: counts 128- to 255-bytes frames, good or bad.
20890 + * @DPMAC_CNT_ING_FRAME_511: counts 256- to 511-bytes frames, good or bad.
20891 + * @DPMAC_CNT_ING_FRAME_1023: counts 512- to 1023-bytes frames, good or bad.
20892 + * @DPMAC_CNT_ING_FRAME_1518: counts 1024- to 1518-bytes frames, good or bad.
20893 + * @DPMAC_CNT_ING_FRAME_1519_MAX: counts 1519-bytes frames and larger
20894 + * (up to max frame length specified),
20895 + * good or bad.
20896 + * @DPMAC_CNT_ING_FRAG: counts frames which are shorter than 64 bytes received
20897 + * with a wrong CRC
20898 + * @DPMAC_CNT_ING_JABBER: counts frames longer than the maximum frame length
20899 + * specified, with a bad frame check sequence.
20900 + * @DPMAC_CNT_ING_FRAME_DISCARD: counts dropped frames due to internal errors.
20901 + * Occurs when a receive FIFO overflows.
20902 + * Includes also frames truncated as a result of
20903 + * the receive FIFO overflow.
20904 + * @DPMAC_CNT_ING_ALIGN_ERR: counts frames with an alignment error
20905 + * (optional used for wrong SFD).
20906 + * @DPMAC_CNT_EGR_UNDERSIZED: counts frames transmitted that was less than 64
20907 + * bytes long with a good CRC.
20908 + * @DPMAC_CNT_ING_OVERSIZED: counts frames longer than the maximum frame length
20909 + * specified, with a good frame check sequence.
20910 + * @DPMAC_CNT_ING_VALID_PAUSE_FRAME: counts valid pause frames (regular and PFC)
20911 + * @DPMAC_CNT_EGR_VALID_PAUSE_FRAME: counts valid pause frames transmitted
20912 + * (regular and PFC).
20913 + * @DPMAC_CNT_ING_BYTE: counts bytes received except preamble for all valid
20914 + * frames and valid pause frames.
20915 + * @DPMAC_CNT_ING_MCAST_FRAME: counts received multicast frames.
20916 + * @DPMAC_CNT_ING_BCAST_FRAME: counts received broadcast frames.
20917 + * @DPMAC_CNT_ING_ALL_FRAME: counts each good or bad frames received.
20918 + * @DPMAC_CNT_ING_UCAST_FRAME: counts received unicast frames.
20919 + * @DPMAC_CNT_ING_ERR_FRAME: counts frames received with an error
20920 + * (except for undersized/fragment frame).
20921 + * @DPMAC_CNT_EGR_BYTE: counts bytes transmitted except preamble for all valid
20922 + * frames and valid pause frames transmitted.
20923 + * @DPMAC_CNT_EGR_MCAST_FRAME: counts transmitted multicast frames.
20924 + * @DPMAC_CNT_EGR_BCAST_FRAME: counts transmitted broadcast frames.
20925 + * @DPMAC_CNT_EGR_UCAST_FRAME: counts transmitted unicast frames.
20926 + * @DPMAC_CNT_EGR_ERR_FRAME: counts frames transmitted with an error.
20927 + * @DPMAC_CNT_ING_GOOD_FRAME: counts frames received without error, including
20928 + * pause frames.
20929 + * @DPMAC_CNT_ENG_GOOD_FRAME: counts frames transmitted without error, including
20930 + * pause frames.
20931 + */
20932 +enum dpmac_counter {
20933 + DPMAC_CNT_ING_FRAME_64,
20934 + DPMAC_CNT_ING_FRAME_127,
20935 + DPMAC_CNT_ING_FRAME_255,
20936 + DPMAC_CNT_ING_FRAME_511,
20937 + DPMAC_CNT_ING_FRAME_1023,
20938 + DPMAC_CNT_ING_FRAME_1518,
20939 + DPMAC_CNT_ING_FRAME_1519_MAX,
20940 + DPMAC_CNT_ING_FRAG,
20941 + DPMAC_CNT_ING_JABBER,
20942 + DPMAC_CNT_ING_FRAME_DISCARD,
20943 + DPMAC_CNT_ING_ALIGN_ERR,
20944 + DPMAC_CNT_EGR_UNDERSIZED,
20945 + DPMAC_CNT_ING_OVERSIZED,
20946 + DPMAC_CNT_ING_VALID_PAUSE_FRAME,
20947 + DPMAC_CNT_EGR_VALID_PAUSE_FRAME,
20948 + DPMAC_CNT_ING_BYTE,
20949 + DPMAC_CNT_ING_MCAST_FRAME,
20950 + DPMAC_CNT_ING_BCAST_FRAME,
20951 + DPMAC_CNT_ING_ALL_FRAME,
20952 + DPMAC_CNT_ING_UCAST_FRAME,
20953 + DPMAC_CNT_ING_ERR_FRAME,
20954 + DPMAC_CNT_EGR_BYTE,
20955 + DPMAC_CNT_EGR_MCAST_FRAME,
20956 + DPMAC_CNT_EGR_BCAST_FRAME,
20957 + DPMAC_CNT_EGR_UCAST_FRAME,
20958 + DPMAC_CNT_EGR_ERR_FRAME,
20959 + DPMAC_CNT_ING_GOOD_FRAME,
20960 + DPMAC_CNT_ENG_GOOD_FRAME
20961 +};
20962 +
20963 +int dpmac_get_counter(struct fsl_mc_io *mc_io,
20964 + u32 cmd_flags,
20965 + u16 token,
20966 + enum dpmac_counter type,
20967 + u64 *counter);
20968 +
20969 +/**
20970 + * dpmac_set_port_mac_addr() - Set a MAC address associated with the physical
20971 + * port. This is not used for filtering, MAC is always in
20972 + * promiscuous mode, it is passed to DPNIs through DPNI API for
20973 + * application used.
20974 + * @mc_io: Pointer to opaque I/O object
20975 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20976 + * @token: Token of DPMAC object
20977 + * @addr: MAC address to set
20978 + *
20979 + * Return: The requested counter; '0' otherwise.
20980 + */
20981 +int dpmac_set_port_mac_addr(struct fsl_mc_io *mc_io,
20982 + u32 cmd_flags,
20983 + u16 token,
20984 + const u8 addr[6]);
20985 +
20986 +int dpmac_get_api_version(struct fsl_mc_io *mc_io,
20987 + u32 cmd_flags,
20988 + u16 *major_ver,
20989 + u16 *minor_ver);
20990 +
20991 +#endif /* __FSL_DPMAC_H */
20992 diff --git a/drivers/staging/fsl-dpaa2/mac/mac.c b/drivers/staging/fsl-dpaa2/mac/mac.c
20993 new file mode 100644
20994 index 00000000..30169639
20995 --- /dev/null
20996 +++ b/drivers/staging/fsl-dpaa2/mac/mac.c
20997 @@ -0,0 +1,666 @@
20998 +/* Copyright 2015 Freescale Semiconductor Inc.
20999 + *
21000 + * Redistribution and use in source and binary forms, with or without
21001 + * modification, are permitted provided that the following conditions are met:
21002 + * * Redistributions of source code must retain the above copyright
21003 + * notice, this list of conditions and the following disclaimer.
21004 + * * Redistributions in binary form must reproduce the above copyright
21005 + * notice, this list of conditions and the following disclaimer in the
21006 + * documentation and/or other materials provided with the distribution.
21007 + * * Neither the name of Freescale Semiconductor nor the
21008 + * names of its contributors may be used to endorse or promote products
21009 + * derived from this software without specific prior written permission.
21010 + *
21011 + *
21012 + * ALTERNATIVELY, this software may be distributed under the terms of the
21013 + * GNU General Public License ("GPL") as published by the Free Software
21014 + * Foundation, either version 2 of that License or (at your option) any
21015 + * later version.
21016 + *
21017 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
21018 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21019 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21020 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
21021 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21022 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21023 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
21024 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21025 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
21026 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
21027 + */
21028 +
21029 +#include <linux/module.h>
21030 +
21031 +#include <linux/netdevice.h>
21032 +#include <linux/etherdevice.h>
21033 +#include <linux/msi.h>
21034 +#include <linux/rtnetlink.h>
21035 +#include <linux/if_vlan.h>
21036 +
21037 +#include <uapi/linux/if_bridge.h>
21038 +#include <net/netlink.h>
21039 +
21040 +#include <linux/of.h>
21041 +#include <linux/of_mdio.h>
21042 +#include <linux/of_net.h>
21043 +#include <linux/phy.h>
21044 +#include <linux/phy_fixed.h>
21045 +
21046 +#include "../../fsl-mc/include/mc.h"
21047 +#include "../../fsl-mc/include/mc-sys.h"
21048 +
21049 +#include "dpmac.h"
21050 +#include "dpmac-cmd.h"
21051 +
21052 +struct dpaa2_mac_priv {
21053 + struct net_device *netdev;
21054 + struct fsl_mc_device *mc_dev;
21055 + struct dpmac_attr attr;
21056 + struct dpmac_link_state old_state;
21057 +};
21058 +
21059 +/* TODO: fix the 10G modes, mapping can't be right:
21060 + * XGMII is paralel
21061 + * XAUI is serial, using 8b/10b encoding
21062 + * XFI is also serial but using 64b/66b encoding
21063 + * they can't all map to XGMII...
21064 + *
21065 + * This must be kept in sync with enum dpmac_eth_if.
21066 + */
21067 +static phy_interface_t dpaa2_mac_iface_mode[] = {
21068 + PHY_INTERFACE_MODE_MII, /* DPMAC_ETH_IF_MII */
21069 + PHY_INTERFACE_MODE_RMII, /* DPMAC_ETH_IF_RMII */
21070 + PHY_INTERFACE_MODE_SMII, /* DPMAC_ETH_IF_SMII */
21071 + PHY_INTERFACE_MODE_GMII, /* DPMAC_ETH_IF_GMII */
21072 + PHY_INTERFACE_MODE_RGMII, /* DPMAC_ETH_IF_RGMII */
21073 + PHY_INTERFACE_MODE_SGMII, /* DPMAC_ETH_IF_SGMII */
21074 + PHY_INTERFACE_MODE_QSGMII, /* DPMAC_ETH_IF_QSGMII */
21075 + PHY_INTERFACE_MODE_XGMII, /* DPMAC_ETH_IF_XAUI */
21076 + PHY_INTERFACE_MODE_XGMII, /* DPMAC_ETH_IF_XFI */
21077 +};
21078 +
21079 +static void dpaa2_mac_link_changed(struct net_device *netdev)
21080 +{
21081 + struct phy_device *phydev;
21082 + struct dpmac_link_state state = { 0 };
21083 + struct dpaa2_mac_priv *priv = netdev_priv(netdev);
21084 + int err;
21085 +
21086 + /* the PHY just notified us of link state change */
21087 + phydev = netdev->phydev;
21088 +
21089 + state.up = !!phydev->link;
21090 + if (phydev->link) {
21091 + state.rate = phydev->speed;
21092 +
21093 + if (!phydev->duplex)
21094 + state.options |= DPMAC_LINK_OPT_HALF_DUPLEX;
21095 + if (phydev->autoneg)
21096 + state.options |= DPMAC_LINK_OPT_AUTONEG;
21097 +
21098 + netif_carrier_on(netdev);
21099 + } else {
21100 + netif_carrier_off(netdev);
21101 + }
21102 +
21103 + if (priv->old_state.up != state.up ||
21104 + priv->old_state.rate != state.rate ||
21105 + priv->old_state.options != state.options) {
21106 + priv->old_state = state;
21107 + phy_print_status(phydev);
21108 + }
21109 +
21110 + /* We must interrogate MC at all times, because we don't know
21111 + * when and whether a potential DPNI may have read the link state.
21112 + */
21113 + err = dpmac_set_link_state(priv->mc_dev->mc_io, 0,
21114 + priv->mc_dev->mc_handle, &state);
21115 + if (unlikely(err))
21116 + dev_err(&priv->mc_dev->dev, "dpmac_set_link_state: %d\n", err);
21117 +}
21118 +
21119 +#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
21120 +static netdev_tx_t dpaa2_mac_drop_frame(struct sk_buff *skb,
21121 + struct net_device *dev)
21122 +{
21123 + /* we don't support I/O for now, drop the frame */
21124 + dev_kfree_skb_any(skb);
21125 + return NETDEV_TX_OK;
21126 +}
21127 +
21128 +static int dpaa2_mac_open(struct net_device *netdev)
21129 +{
21130 + /* start PHY state machine */
21131 + phy_start(netdev->phydev);
21132 +
21133 + return 0;
21134 +}
21135 +
21136 +static int dpaa2_mac_stop(struct net_device *netdev)
21137 +{
21138 + if (!netdev->phydev)
21139 + goto done;
21140 +
21141 + /* stop PHY state machine */
21142 + phy_stop(netdev->phydev);
21143 +
21144 + /* signal link down to firmware */
21145 + netdev->phydev->link = 0;
21146 + dpaa2_mac_link_changed(netdev);
21147 +
21148 +done:
21149 + return 0;
21150 +}
21151 +
21152 +static int dpaa2_mac_get_settings(struct net_device *netdev,
21153 + struct ethtool_cmd *cmd)
21154 +{
21155 + return phy_ethtool_gset(netdev->phydev, cmd);
21156 +}
21157 +
21158 +static int dpaa2_mac_set_settings(struct net_device *netdev,
21159 + struct ethtool_cmd *cmd)
21160 +{
21161 + return phy_ethtool_sset(netdev->phydev, cmd);
21162 +}
21163 +
21164 +static void dpaa2_mac_get_stats(struct net_device *netdev,
21165 + struct rtnl_link_stats64 *storage)
21166 +{
21167 + struct dpaa2_mac_priv *priv = netdev_priv(netdev);
21168 + u64 tmp;
21169 + int err;
21170 +
21171 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21172 + DPMAC_CNT_EGR_MCAST_FRAME,
21173 + &storage->tx_packets);
21174 + if (err)
21175 + goto error;
21176 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21177 + DPMAC_CNT_EGR_BCAST_FRAME, &tmp);
21178 + if (err)
21179 + goto error;
21180 + storage->tx_packets += tmp;
21181 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21182 + DPMAC_CNT_EGR_UCAST_FRAME, &tmp);
21183 + if (err)
21184 + goto error;
21185 + storage->tx_packets += tmp;
21186 +
21187 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21188 + DPMAC_CNT_EGR_UNDERSIZED, &storage->tx_dropped);
21189 + if (err)
21190 + goto error;
21191 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21192 + DPMAC_CNT_EGR_BYTE, &storage->tx_bytes);
21193 + if (err)
21194 + goto error;
21195 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21196 + DPMAC_CNT_EGR_ERR_FRAME, &storage->tx_errors);
21197 + if (err)
21198 + goto error;
21199 +
21200 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21201 + DPMAC_CNT_ING_ALL_FRAME, &storage->rx_packets);
21202 + if (err)
21203 + goto error;
21204 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21205 + DPMAC_CNT_ING_MCAST_FRAME, &storage->multicast);
21206 + if (err)
21207 + goto error;
21208 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21209 + DPMAC_CNT_ING_FRAME_DISCARD,
21210 + &storage->rx_dropped);
21211 + if (err)
21212 + goto error;
21213 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21214 + DPMAC_CNT_ING_ALIGN_ERR, &storage->rx_errors);
21215 + if (err)
21216 + goto error;
21217 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21218 + DPMAC_CNT_ING_OVERSIZED, &tmp);
21219 + if (err)
21220 + goto error;
21221 + storage->rx_errors += tmp;
21222 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21223 + DPMAC_CNT_ING_BYTE, &storage->rx_bytes);
21224 + if (err)
21225 + goto error;
21226 +
21227 + return;
21228 +error:
21229 + netdev_err(netdev, "dpmac_get_counter err %d\n", err);
21230 +}
21231 +
21232 +static struct {
21233 + enum dpmac_counter id;
21234 + char name[ETH_GSTRING_LEN];
21235 +} dpaa2_mac_counters[] = {
21236 + {DPMAC_CNT_ING_ALL_FRAME, "rx all frames"},
21237 + {DPMAC_CNT_ING_GOOD_FRAME, "rx frames ok"},
21238 + {DPMAC_CNT_ING_ERR_FRAME, "rx frame errors"},
21239 + {DPMAC_CNT_ING_FRAME_DISCARD, "rx frame discards"},
21240 + {DPMAC_CNT_ING_UCAST_FRAME, "rx u-cast"},
21241 + {DPMAC_CNT_ING_BCAST_FRAME, "rx b-cast"},
21242 + {DPMAC_CNT_ING_MCAST_FRAME, "rx m-cast"},
21243 + {DPMAC_CNT_ING_FRAME_64, "rx 64 bytes"},
21244 + {DPMAC_CNT_ING_FRAME_127, "rx 65-127 bytes"},
21245 + {DPMAC_CNT_ING_FRAME_255, "rx 128-255 bytes"},
21246 + {DPMAC_CNT_ING_FRAME_511, "rx 256-511 bytes"},
21247 + {DPMAC_CNT_ING_FRAME_1023, "rx 512-1023 bytes"},
21248 + {DPMAC_CNT_ING_FRAME_1518, "rx 1024-1518 bytes"},
21249 + {DPMAC_CNT_ING_FRAME_1519_MAX, "rx 1519-max bytes"},
21250 + {DPMAC_CNT_ING_FRAG, "rx frags"},
21251 + {DPMAC_CNT_ING_JABBER, "rx jabber"},
21252 + {DPMAC_CNT_ING_ALIGN_ERR, "rx align errors"},
21253 + {DPMAC_CNT_ING_OVERSIZED, "rx oversized"},
21254 + {DPMAC_CNT_ING_VALID_PAUSE_FRAME, "rx pause"},
21255 + {DPMAC_CNT_ING_BYTE, "rx bytes"},
21256 + {DPMAC_CNT_ENG_GOOD_FRAME, "tx frames ok"},
21257 + {DPMAC_CNT_EGR_UCAST_FRAME, "tx u-cast"},
21258 + {DPMAC_CNT_EGR_MCAST_FRAME, "tx m-cast"},
21259 + {DPMAC_CNT_EGR_BCAST_FRAME, "tx b-cast"},
21260 + {DPMAC_CNT_EGR_ERR_FRAME, "tx frame errors"},
21261 + {DPMAC_CNT_EGR_UNDERSIZED, "tx undersized"},
21262 + {DPMAC_CNT_EGR_VALID_PAUSE_FRAME, "tx b-pause"},
21263 + {DPMAC_CNT_EGR_BYTE, "tx bytes"},
21264 +
21265 +};
21266 +
21267 +static void dpaa2_mac_get_strings(struct net_device *netdev,
21268 + u32 stringset, u8 *data)
21269 +{
21270 + int i;
21271 +
21272 + switch (stringset) {
21273 + case ETH_SS_STATS:
21274 + for (i = 0; i < ARRAY_SIZE(dpaa2_mac_counters); i++)
21275 + memcpy(data + i * ETH_GSTRING_LEN,
21276 + dpaa2_mac_counters[i].name,
21277 + ETH_GSTRING_LEN);
21278 + break;
21279 + }
21280 +}
21281 +
21282 +static void dpaa2_mac_get_ethtool_stats(struct net_device *netdev,
21283 + struct ethtool_stats *stats,
21284 + u64 *data)
21285 +{
21286 + struct dpaa2_mac_priv *priv = netdev_priv(netdev);
21287 + int i;
21288 + int err;
21289 +
21290 + for (i = 0; i < ARRAY_SIZE(dpaa2_mac_counters); i++) {
21291 + err = dpmac_get_counter(priv->mc_dev->mc_io,
21292 + 0,
21293 + priv->mc_dev->mc_handle,
21294 + dpaa2_mac_counters[i].id, &data[i]);
21295 + if (err)
21296 + netdev_err(netdev, "dpmac_get_counter[%s] err %d\n",
21297 + dpaa2_mac_counters[i].name, err);
21298 + }
21299 +}
21300 +
21301 +static int dpaa2_mac_get_sset_count(struct net_device *dev, int sset)
21302 +{
21303 + switch (sset) {
21304 + case ETH_SS_STATS:
21305 + return ARRAY_SIZE(dpaa2_mac_counters);
21306 + default:
21307 + return -EOPNOTSUPP;
21308 + }
21309 +}
21310 +
21311 +static const struct net_device_ops dpaa2_mac_ndo_ops = {
21312 + .ndo_start_xmit = &dpaa2_mac_drop_frame,
21313 + .ndo_open = &dpaa2_mac_open,
21314 + .ndo_stop = &dpaa2_mac_stop,
21315 + .ndo_get_stats64 = &dpaa2_mac_get_stats,
21316 +};
21317 +
21318 +static const struct ethtool_ops dpaa2_mac_ethtool_ops = {
21319 + .get_settings = &dpaa2_mac_get_settings,
21320 + .set_settings = &dpaa2_mac_set_settings,
21321 + .get_strings = &dpaa2_mac_get_strings,
21322 + .get_ethtool_stats = &dpaa2_mac_get_ethtool_stats,
21323 + .get_sset_count = &dpaa2_mac_get_sset_count,
21324 +};
21325 +#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */
21326 +
21327 +static void configure_link(struct dpaa2_mac_priv *priv,
21328 + struct dpmac_link_cfg *cfg)
21329 +{
21330 + struct phy_device *phydev = priv->netdev->phydev;
21331 +
21332 + if (unlikely(!phydev))
21333 + return;
21334 +
21335 + phydev->speed = cfg->rate;
21336 + phydev->duplex = !!(cfg->options & DPMAC_LINK_OPT_HALF_DUPLEX);
21337 +
21338 + if (cfg->options & DPMAC_LINK_OPT_AUTONEG) {
21339 + phydev->autoneg = 1;
21340 + phydev->advertising |= ADVERTISED_Autoneg;
21341 + } else {
21342 + phydev->autoneg = 0;
21343 + phydev->advertising &= ~ADVERTISED_Autoneg;
21344 + }
21345 +
21346 + phy_start_aneg(phydev);
21347 +}
21348 +
21349 +static irqreturn_t dpaa2_mac_irq_handler(int irq_num, void *arg)
21350 +{
21351 + struct device *dev = (struct device *)arg;
21352 + struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
21353 + struct dpaa2_mac_priv *priv = dev_get_drvdata(dev);
21354 + struct dpmac_link_cfg link_cfg;
21355 + u32 status;
21356 + int err;
21357 +
21358 + err = dpmac_get_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle,
21359 + DPMAC_IRQ_INDEX, &status);
21360 + if (unlikely(err || !status))
21361 + return IRQ_NONE;
21362 +
21363 + /* DPNI-initiated link configuration; 'ifconfig up' also calls this */
21364 + if (status & DPMAC_IRQ_EVENT_LINK_CFG_REQ) {
21365 + err = dpmac_get_link_cfg(mc_dev->mc_io, 0, mc_dev->mc_handle,
21366 + &link_cfg);
21367 + if (unlikely(err))
21368 + goto out;
21369 +
21370 + configure_link(priv, &link_cfg);
21371 + }
21372 +
21373 +out:
21374 + dpmac_clear_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle,
21375 + DPMAC_IRQ_INDEX, status);
21376 +
21377 + return IRQ_HANDLED;
21378 +}
21379 +
21380 +static int setup_irqs(struct fsl_mc_device *mc_dev)
21381 +{
21382 + int err = 0;
21383 + struct fsl_mc_device_irq *irq;
21384 +
21385 + err = fsl_mc_allocate_irqs(mc_dev);
21386 + if (err) {
21387 + dev_err(&mc_dev->dev, "fsl_mc_allocate_irqs err %d\n", err);
21388 + return err;
21389 + }
21390 +
21391 + irq = mc_dev->irqs[0];
21392 + err = devm_request_threaded_irq(&mc_dev->dev, irq->msi_desc->irq,
21393 + NULL, &dpaa2_mac_irq_handler,
21394 + IRQF_NO_SUSPEND | IRQF_ONESHOT,
21395 + dev_name(&mc_dev->dev), &mc_dev->dev);
21396 + if (err) {
21397 + dev_err(&mc_dev->dev, "devm_request_threaded_irq err %d\n",
21398 + err);
21399 + goto free_irq;
21400 + }
21401 +
21402 + err = dpmac_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle,
21403 + DPMAC_IRQ_INDEX, DPMAC_IRQ_EVENT_LINK_CFG_REQ);
21404 + if (err) {
21405 + dev_err(&mc_dev->dev, "dpmac_set_irq_mask err %d\n", err);
21406 + goto free_irq;
21407 + }
21408 + err = dpmac_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle,
21409 + DPMAC_IRQ_INDEX, 1);
21410 + if (err) {
21411 + dev_err(&mc_dev->dev, "dpmac_set_irq_enable err %d\n", err);
21412 + goto free_irq;
21413 + }
21414 +
21415 + return 0;
21416 +
21417 +free_irq:
21418 + fsl_mc_free_irqs(mc_dev);
21419 +
21420 + return err;
21421 +}
21422 +
21423 +static void teardown_irqs(struct fsl_mc_device *mc_dev)
21424 +{
21425 + int err;
21426 +
21427 + err = dpmac_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle,
21428 + DPMAC_IRQ_INDEX, 0);
21429 + if (err)
21430 + dev_err(&mc_dev->dev, "dpmac_set_irq_enable err %d\n", err);
21431 +
21432 + fsl_mc_free_irqs(mc_dev);
21433 +}
21434 +
21435 +static struct device_node *find_dpmac_node(struct device *dev, u16 dpmac_id)
21436 +{
21437 + struct device_node *dpmacs, *dpmac = NULL;
21438 + struct device_node *mc_node = dev->of_node;
21439 + u32 id;
21440 + int err;
21441 +
21442 + dpmacs = of_find_node_by_name(mc_node, "dpmacs");
21443 + if (!dpmacs) {
21444 + dev_err(dev, "No dpmacs subnode in device-tree\n");
21445 + return NULL;
21446 + }
21447 +
21448 + while ((dpmac = of_get_next_child(dpmacs, dpmac))) {
21449 + err = of_property_read_u32(dpmac, "reg", &id);
21450 + if (err)
21451 + continue;
21452 + if (id == dpmac_id)
21453 + return dpmac;
21454 + }
21455 +
21456 + return NULL;
21457 +}
21458 +
21459 +static int dpaa2_mac_probe(struct fsl_mc_device *mc_dev)
21460 +{
21461 + struct device *dev;
21462 + struct dpaa2_mac_priv *priv = NULL;
21463 + struct device_node *phy_node, *dpmac_node;
21464 + struct net_device *netdev;
21465 + phy_interface_t if_mode;
21466 + int err = 0;
21467 +
21468 + dev = &mc_dev->dev;
21469 +
21470 + /* prepare a net_dev structure to make the phy lib API happy */
21471 + netdev = alloc_etherdev(sizeof(*priv));
21472 + if (!netdev) {
21473 + dev_err(dev, "alloc_etherdev error\n");
21474 + err = -ENOMEM;
21475 + goto err_exit;
21476 + }
21477 + priv = netdev_priv(netdev);
21478 + priv->mc_dev = mc_dev;
21479 + priv->netdev = netdev;
21480 +
21481 + SET_NETDEV_DEV(netdev, dev);
21482 +
21483 +#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
21484 + snprintf(netdev->name, IFNAMSIZ, "mac%d", mc_dev->obj_desc.id);
21485 +#endif
21486 +
21487 + dev_set_drvdata(dev, priv);
21488 +
21489 + err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io);
21490 + if (err || !mc_dev->mc_io) {
21491 + dev_err(dev, "fsl_mc_portal_allocate error: %d\n", err);
21492 + err = -ENODEV;
21493 + goto err_free_netdev;
21494 + }
21495 +
21496 + err = dpmac_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
21497 + &mc_dev->mc_handle);
21498 + if (err || !mc_dev->mc_handle) {
21499 + dev_err(dev, "dpmac_open error: %d\n", err);
21500 + err = -ENODEV;
21501 + goto err_free_mcp;
21502 + }
21503 +
21504 + err = dpmac_get_attributes(mc_dev->mc_io, 0,
21505 + mc_dev->mc_handle, &priv->attr);
21506 + if (err) {
21507 + dev_err(dev, "dpmac_get_attributes err %d\n", err);
21508 + err = -EINVAL;
21509 + goto err_close;
21510 + }
21511 +
21512 + /* Look up the DPMAC node in the device-tree. */
21513 + dpmac_node = find_dpmac_node(dev, priv->attr.id);
21514 + if (!dpmac_node) {
21515 + dev_err(dev, "No dpmac@%d subnode found.\n", priv->attr.id);
21516 + err = -ENODEV;
21517 + goto err_close;
21518 + }
21519 +
21520 + err = setup_irqs(mc_dev);
21521 + if (err) {
21522 + err = -EFAULT;
21523 + goto err_close;
21524 + }
21525 +
21526 +#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
21527 + /* OPTIONAL, register netdev just to make it visible to the user */
21528 + netdev->netdev_ops = &dpaa2_mac_ndo_ops;
21529 + netdev->ethtool_ops = &dpaa2_mac_ethtool_ops;
21530 +
21531 + /* phy starts up enabled so netdev should be up too */
21532 + netdev->flags |= IFF_UP;
21533 +
21534 + err = register_netdev(priv->netdev);
21535 + if (err < 0) {
21536 + dev_err(dev, "register_netdev error %d\n", err);
21537 + err = -ENODEV;
21538 + goto err_free_irq;
21539 + }
21540 +#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */
21541 +
21542 + /* probe the PHY as a fixed-link if the link type declared in DPC
21543 + * explicitly mandates this
21544 + */
21545 +
21546 + phy_node = of_parse_phandle(dpmac_node, "phy-handle", 0);
21547 + if (!phy_node) {
21548 + goto probe_fixed_link;
21549 + }
21550 +
21551 + if (priv->attr.eth_if < ARRAY_SIZE(dpaa2_mac_iface_mode)) {
21552 + if_mode = dpaa2_mac_iface_mode[priv->attr.eth_if];
21553 + dev_dbg(dev, "\tusing if mode %s for eth_if %d\n",
21554 + phy_modes(if_mode), priv->attr.eth_if);
21555 + } else {
21556 + dev_warn(dev, "Unexpected interface mode %d, will probe as fixed link\n",
21557 + priv->attr.eth_if);
21558 + goto probe_fixed_link;
21559 + }
21560 +
21561 + /* try to connect to the PHY */
21562 + netdev->phydev = of_phy_connect(netdev, phy_node,
21563 + &dpaa2_mac_link_changed, 0, if_mode);
21564 + if (!netdev->phydev) {
21565 + /* No need for dev_err(); the kernel's loud enough as it is. */
21566 + dev_dbg(dev, "Can't of_phy_connect() now.\n");
21567 + /* We might be waiting for the MDIO MUX to probe, so defer
21568 + * our own probing.
21569 + */
21570 + err = -EPROBE_DEFER;
21571 + goto err_defer;
21572 + }
21573 + dev_info(dev, "Connected to %s PHY.\n", phy_modes(if_mode));
21574 +
21575 +probe_fixed_link:
21576 + if (!netdev->phydev) {
21577 + struct fixed_phy_status status = {
21578 + .link = 1,
21579 + /* fixed-phys don't support 10Gbps speed for now */
21580 + .speed = 1000,
21581 + .duplex = 1,
21582 + };
21583 +
21584 + /* try to register a fixed link phy */
21585 + netdev->phydev = fixed_phy_register(PHY_POLL, &status, -1,
21586 + NULL);
21587 + if (!netdev->phydev || IS_ERR(netdev->phydev)) {
21588 + dev_err(dev, "error trying to register fixed PHY\n");
21589 + /* So we don't crash unregister_netdev() later on */
21590 + netdev->phydev = NULL;
21591 + err = -EFAULT;
21592 + goto err_no_phy;
21593 + }
21594 + dev_info(dev, "Registered fixed PHY.\n");
21595 + }
21596 +
21597 + /* start PHY state machine */
21598 +#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
21599 + dpaa2_mac_open(netdev);
21600 +#else /* CONFIG_FSL_DPAA2_MAC_NETDEVS */
21601 + phy_start(netdev->phydev);
21602 +#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */
21603 + return 0;
21604 +
21605 +err_defer:
21606 +err_no_phy:
21607 +#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
21608 + unregister_netdev(netdev);
21609 +err_free_irq:
21610 +#endif
21611 + teardown_irqs(mc_dev);
21612 +err_close:
21613 + dpmac_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
21614 +err_free_mcp:
21615 + fsl_mc_portal_free(mc_dev->mc_io);
21616 +err_free_netdev:
21617 + free_netdev(netdev);
21618 +err_exit:
21619 + return err;
21620 +}
21621 +
21622 +static int dpaa2_mac_remove(struct fsl_mc_device *mc_dev)
21623 +{
21624 + struct device *dev = &mc_dev->dev;
21625 + struct dpaa2_mac_priv *priv = dev_get_drvdata(dev);
21626 +
21627 +#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
21628 + unregister_netdev(priv->netdev);
21629 +#endif
21630 + teardown_irqs(priv->mc_dev);
21631 + dpmac_close(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle);
21632 + fsl_mc_portal_free(priv->mc_dev->mc_io);
21633 + free_netdev(priv->netdev);
21634 +
21635 + dev_set_drvdata(dev, NULL);
21636 + kfree(priv);
21637 +
21638 + return 0;
21639 +}
21640 +
21641 +static const struct fsl_mc_device_id dpaa2_mac_match_id_table[] = {
21642 + {
21643 + .vendor = FSL_MC_VENDOR_FREESCALE,
21644 + .obj_type = "dpmac",
21645 + },
21646 + { .vendor = 0x0 }
21647 +};
21648 +MODULE_DEVICE_TABLE(fslmc, dpaa2_mac_match_id_table);
21649 +
21650 +static struct fsl_mc_driver dpaa2_mac_drv = {
21651 + .driver = {
21652 + .name = KBUILD_MODNAME,
21653 + .owner = THIS_MODULE,
21654 + },
21655 + .probe = dpaa2_mac_probe,
21656 + .remove = dpaa2_mac_remove,
21657 + .match_id_table = dpaa2_mac_match_id_table,
21658 +};
21659 +
21660 +module_fsl_mc_driver(dpaa2_mac_drv);
21661 +
21662 +MODULE_LICENSE("GPL");
21663 +MODULE_DESCRIPTION("DPAA2 PHY proxy interface driver");
21664 diff --git a/drivers/staging/fsl-dpaa2/rtc/Makefile b/drivers/staging/fsl-dpaa2/rtc/Makefile
21665 new file mode 100644
21666 index 00000000..541a7acd
21667 --- /dev/null
21668 +++ b/drivers/staging/fsl-dpaa2/rtc/Makefile
21669 @@ -0,0 +1,10 @@
21670 +
21671 +obj-$(CONFIG_PTP_1588_CLOCK_DPAA2) += dpaa2-rtc.o
21672 +
21673 +dpaa2-rtc-objs := rtc.o dprtc.o
21674 +
21675 +all:
21676 + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
21677 +
21678 +clean:
21679 + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
21680 diff --git a/drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h b/drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h
21681 new file mode 100644
21682 index 00000000..618c7e54
21683 --- /dev/null
21684 +++ b/drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h
21685 @@ -0,0 +1,160 @@
21686 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
21687 + *
21688 + * Redistribution and use in source and binary forms, with or without
21689 + * modification, are permitted provided that the following conditions are met:
21690 + * * Redistributions of source code must retain the above copyright
21691 + * notice, this list of conditions and the following disclaimer.
21692 + * * Redistributions in binary form must reproduce the above copyright
21693 + * notice, this list of conditions and the following disclaimer in the
21694 + * documentation and/or other materials provided with the distribution.
21695 + * * Neither the name of the above-listed copyright holders nor the
21696 + * names of any contributors may be used to endorse or promote products
21697 + * derived from this software without specific prior written permission.
21698 + *
21699 + *
21700 + * ALTERNATIVELY, this software may be distributed under the terms of the
21701 + * GNU General Public License ("GPL") as published by the Free Software
21702 + * Foundation, either version 2 of that License or (at your option) any
21703 + * later version.
21704 + *
21705 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21706 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21707 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21708 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
21709 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21710 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21711 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21712 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21713 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
21714 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
21715 + * POSSIBILITY OF SUCH DAMAGE.
21716 + */
21717 +#ifndef _FSL_DPRTC_CMD_H
21718 +#define _FSL_DPRTC_CMD_H
21719 +
21720 +/* DPRTC Version */
21721 +#define DPRTC_VER_MAJOR 2
21722 +#define DPRTC_VER_MINOR 0
21723 +
21724 +/* Command versioning */
21725 +#define DPRTC_CMD_BASE_VERSION 1
21726 +#define DPRTC_CMD_ID_OFFSET 4
21727 +
21728 +#define DPRTC_CMD(id) (((id) << DPRTC_CMD_ID_OFFSET) | DPRTC_CMD_BASE_VERSION)
21729 +
21730 +/* Command IDs */
21731 +#define DPRTC_CMDID_CLOSE DPRTC_CMD(0x800)
21732 +#define DPRTC_CMDID_OPEN DPRTC_CMD(0x810)
21733 +#define DPRTC_CMDID_CREATE DPRTC_CMD(0x910)
21734 +#define DPRTC_CMDID_DESTROY DPRTC_CMD(0x990)
21735 +#define DPRTC_CMDID_GET_API_VERSION DPRTC_CMD(0xa10)
21736 +
21737 +#define DPRTC_CMDID_ENABLE DPRTC_CMD(0x002)
21738 +#define DPRTC_CMDID_DISABLE DPRTC_CMD(0x003)
21739 +#define DPRTC_CMDID_GET_ATTR DPRTC_CMD(0x004)
21740 +#define DPRTC_CMDID_RESET DPRTC_CMD(0x005)
21741 +#define DPRTC_CMDID_IS_ENABLED DPRTC_CMD(0x006)
21742 +
21743 +#define DPRTC_CMDID_SET_IRQ_ENABLE DPRTC_CMD(0x012)
21744 +#define DPRTC_CMDID_GET_IRQ_ENABLE DPRTC_CMD(0x013)
21745 +#define DPRTC_CMDID_SET_IRQ_MASK DPRTC_CMD(0x014)
21746 +#define DPRTC_CMDID_GET_IRQ_MASK DPRTC_CMD(0x015)
21747 +#define DPRTC_CMDID_GET_IRQ_STATUS DPRTC_CMD(0x016)
21748 +#define DPRTC_CMDID_CLEAR_IRQ_STATUS DPRTC_CMD(0x017)
21749 +
21750 +#define DPRTC_CMDID_SET_CLOCK_OFFSET DPRTC_CMD(0x1d0)
21751 +#define DPRTC_CMDID_SET_FREQ_COMPENSATION DPRTC_CMD(0x1d1)
21752 +#define DPRTC_CMDID_GET_FREQ_COMPENSATION DPRTC_CMD(0x1d2)
21753 +#define DPRTC_CMDID_GET_TIME DPRTC_CMD(0x1d3)
21754 +#define DPRTC_CMDID_SET_TIME DPRTC_CMD(0x1d4)
21755 +#define DPRTC_CMDID_SET_ALARM DPRTC_CMD(0x1d5)
21756 +#define DPRTC_CMDID_SET_PERIODIC_PULSE DPRTC_CMD(0x1d6)
21757 +#define DPRTC_CMDID_CLEAR_PERIODIC_PULSE DPRTC_CMD(0x1d7)
21758 +#define DPRTC_CMDID_SET_EXT_TRIGGER DPRTC_CMD(0x1d8)
21759 +#define DPRTC_CMDID_CLEAR_EXT_TRIGGER DPRTC_CMD(0x1d9)
21760 +#define DPRTC_CMDID_GET_EXT_TRIGGER_TIMESTAMP DPRTC_CMD(0x1dA)
21761 +
21762 +/* Macros for accessing command fields smaller than 1byte */
21763 +#define DPRTC_MASK(field) \
21764 + GENMASK(DPRTC_##field##_SHIFT + DPRTC_##field##_SIZE - 1, \
21765 + DPRTC_##field##_SHIFT)
21766 +#define dprtc_get_field(var, field) \
21767 + (((var) & DPRTC_MASK(field)) >> DPRTC_##field##_SHIFT)
21768 +
21769 +#pragma pack(push, 1)
21770 +struct dprtc_cmd_open {
21771 + uint32_t dprtc_id;
21772 +};
21773 +
21774 +struct dprtc_cmd_destroy {
21775 + uint32_t object_id;
21776 +};
21777 +
21778 +#define DPRTC_ENABLE_SHIFT 0
21779 +#define DPRTC_ENABLE_SIZE 1
21780 +
21781 +struct dprtc_rsp_is_enabled {
21782 + uint8_t en;
21783 +};
21784 +
21785 +struct dprtc_cmd_get_irq {
21786 + uint32_t pad;
21787 + uint8_t irq_index;
21788 +};
21789 +
21790 +struct dprtc_cmd_set_irq_enable {
21791 + uint8_t en;
21792 + uint8_t pad[3];
21793 + uint8_t irq_index;
21794 +};
21795 +
21796 +struct dprtc_rsp_get_irq_enable {
21797 + uint8_t en;
21798 +};
21799 +
21800 +struct dprtc_cmd_set_irq_mask {
21801 + uint32_t mask;
21802 + uint8_t irq_index;
21803 +};
21804 +
21805 +struct dprtc_rsp_get_irq_mask {
21806 + uint32_t mask;
21807 +};
21808 +
21809 +struct dprtc_cmd_get_irq_status {
21810 + uint32_t status;
21811 + uint8_t irq_index;
21812 +};
21813 +
21814 +struct dprtc_rsp_get_irq_status {
21815 + uint32_t status;
21816 +};
21817 +
21818 +struct dprtc_cmd_clear_irq_status {
21819 + uint32_t status;
21820 + uint8_t irq_index;
21821 +};
21822 +
21823 +struct dprtc_rsp_get_attributes {
21824 + uint32_t pad;
21825 + uint32_t id;
21826 +};
21827 +
21828 +struct dprtc_cmd_set_clock_offset {
21829 + uint64_t offset;
21830 +};
21831 +
21832 +struct dprtc_get_freq_compensation {
21833 + uint32_t freq_compensation;
21834 +};
21835 +
21836 +struct dprtc_time {
21837 + uint64_t time;
21838 +};
21839 +
21840 +struct dprtc_rsp_get_api_version {
21841 + uint16_t major;
21842 + uint16_t minor;
21843 +};
21844 +#pragma pack(pop)
21845 +#endif /* _FSL_DPRTC_CMD_H */
21846 diff --git a/drivers/staging/fsl-dpaa2/rtc/dprtc.c b/drivers/staging/fsl-dpaa2/rtc/dprtc.c
21847 new file mode 100644
21848 index 00000000..399177e4
21849 --- /dev/null
21850 +++ b/drivers/staging/fsl-dpaa2/rtc/dprtc.c
21851 @@ -0,0 +1,746 @@
21852 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
21853 + *
21854 + * Redistribution and use in source and binary forms, with or without
21855 + * modification, are permitted provided that the following conditions are met:
21856 + * * Redistributions of source code must retain the above copyright
21857 + * notice, this list of conditions and the following disclaimer.
21858 + * * Redistributions in binary form must reproduce the above copyright
21859 + * notice, this list of conditions and the following disclaimer in the
21860 + * documentation and/or other materials provided with the distribution.
21861 + * * Neither the name of the above-listed copyright holders nor the
21862 + * names of any contributors may be used to endorse or promote products
21863 + * derived from this software without specific prior written permission.
21864 + *
21865 + *
21866 + * ALTERNATIVELY, this software may be distributed under the terms of the
21867 + * GNU General Public License ("GPL") as published by the Free Software
21868 + * Foundation, either version 2 of that License or (at your option) any
21869 + * later version.
21870 + *
21871 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21872 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21873 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21874 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
21875 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21876 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21877 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21878 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21879 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
21880 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
21881 + * POSSIBILITY OF SUCH DAMAGE.
21882 + */
21883 +#include "../../fsl-mc/include/mc-sys.h"
21884 +#include "../../fsl-mc/include/mc-cmd.h"
21885 +#include "dprtc.h"
21886 +#include "dprtc-cmd.h"
21887 +
21888 +/**
21889 + * dprtc_open() - Open a control session for the specified object.
21890 + * @mc_io: Pointer to MC portal's I/O object
21891 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
21892 + * @dprtc_id: DPRTC unique ID
21893 + * @token: Returned token; use in subsequent API calls
21894 + *
21895 + * This function can be used to open a control session for an
21896 + * already created object; an object may have been declared in
21897 + * the DPL or by calling the dprtc_create function.
21898 + * This function returns a unique authentication token,
21899 + * associated with the specific object ID and the specific MC
21900 + * portal; this token must be used in all subsequent commands for
21901 + * this specific object
21902 + *
21903 + * Return: '0' on Success; Error code otherwise.
21904 + */
21905 +int dprtc_open(struct fsl_mc_io *mc_io,
21906 + uint32_t cmd_flags,
21907 + int dprtc_id,
21908 + uint16_t *token)
21909 +{
21910 + struct dprtc_cmd_open *cmd_params;
21911 + struct mc_command cmd = { 0 };
21912 + int err;
21913 +
21914 + /* prepare command */
21915 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_OPEN,
21916 + cmd_flags,
21917 + 0);
21918 + cmd_params = (struct dprtc_cmd_open *)cmd.params;
21919 + cmd_params->dprtc_id = cpu_to_le32(dprtc_id);
21920 +
21921 + /* send command to mc*/
21922 + err = mc_send_command(mc_io, &cmd);
21923 + if (err)
21924 + return err;
21925 +
21926 + /* retrieve response parameters */
21927 + *token = mc_cmd_hdr_read_token(&cmd);
21928 +
21929 + return err;
21930 +}
21931 +
21932 +/**
21933 + * dprtc_close() - Close the control session of the object
21934 + * @mc_io: Pointer to MC portal's I/O object
21935 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
21936 + * @token: Token of DPRTC object
21937 + *
21938 + * After this function is called, no further operations are
21939 + * allowed on the object without opening a new control session.
21940 + *
21941 + * Return: '0' on Success; Error code otherwise.
21942 + */
21943 +int dprtc_close(struct fsl_mc_io *mc_io,
21944 + uint32_t cmd_flags,
21945 + uint16_t token)
21946 +{
21947 + struct mc_command cmd = { 0 };
21948 +
21949 + /* prepare command */
21950 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLOSE, cmd_flags,
21951 + token);
21952 +
21953 + /* send command to mc*/
21954 + return mc_send_command(mc_io, &cmd);
21955 +}
21956 +
21957 +/**
21958 + * dprtc_create() - Create the DPRTC object.
21959 + * @mc_io: Pointer to MC portal's I/O object
21960 + * @dprc_token: Parent container token; '0' for default container
21961 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
21962 + * @cfg: Configuration structure
21963 + * @obj_id: Returned object id
21964 + *
21965 + * Create the DPRTC object, allocate required resources and
21966 + * perform required initialization.
21967 + *
21968 + * The function accepts an authentication token of a parent
21969 + * container that this object should be assigned to. The token
21970 + * can be '0' so the object will be assigned to the default container.
21971 + * The newly created object can be opened with the returned
21972 + * object id and using the container's associated tokens and MC portals.
21973 + *
21974 + * Return: '0' on Success; Error code otherwise.
21975 + */
21976 +int dprtc_create(struct fsl_mc_io *mc_io,
21977 + uint16_t dprc_token,
21978 + uint32_t cmd_flags,
21979 + const struct dprtc_cfg *cfg,
21980 + uint32_t *obj_id)
21981 +{
21982 + struct mc_command cmd = { 0 };
21983 + int err;
21984 +
21985 + (void)(cfg); /* unused */
21986 +
21987 + /* prepare command */
21988 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CREATE,
21989 + cmd_flags,
21990 + dprc_token);
21991 +
21992 + /* send command to mc*/
21993 + err = mc_send_command(mc_io, &cmd);
21994 + if (err)
21995 + return err;
21996 +
21997 + /* retrieve response parameters */
21998 + *obj_id = mc_cmd_read_object_id(&cmd);
21999 +
22000 + return 0;
22001 +}
22002 +
22003 +/**
22004 + * dprtc_destroy() - Destroy the DPRTC object and release all its resources.
22005 + * @mc_io: Pointer to MC portal's I/O object
22006 + * @dprc_token: Parent container token; '0' for default container
22007 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22008 + * @object_id: The object id; it must be a valid id within the container that
22009 + * created this object;
22010 + *
22011 + * The function accepts the authentication token of the parent container that
22012 + * created the object (not the one that currently owns the object). The object
22013 + * is searched within parent using the provided 'object_id'.
22014 + * All tokens to the object must be closed before calling destroy.
22015 + *
22016 + * Return: '0' on Success; error code otherwise.
22017 + */
22018 +int dprtc_destroy(struct fsl_mc_io *mc_io,
22019 + uint16_t dprc_token,
22020 + uint32_t cmd_flags,
22021 + uint32_t object_id)
22022 +{
22023 + struct dprtc_cmd_destroy *cmd_params;
22024 + struct mc_command cmd = { 0 };
22025 +
22026 + /* prepare command */
22027 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DESTROY,
22028 + cmd_flags,
22029 + dprc_token);
22030 + cmd_params = (struct dprtc_cmd_destroy *)cmd.params;
22031 + cmd_params->object_id = cpu_to_le32(object_id);
22032 +
22033 + /* send command to mc*/
22034 + return mc_send_command(mc_io, &cmd);
22035 +}
22036 +
22037 +int dprtc_enable(struct fsl_mc_io *mc_io,
22038 + uint32_t cmd_flags,
22039 + uint16_t token)
22040 +{
22041 + struct mc_command cmd = { 0 };
22042 +
22043 + /* prepare command */
22044 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_ENABLE, cmd_flags,
22045 + token);
22046 +
22047 + /* send command to mc*/
22048 + return mc_send_command(mc_io, &cmd);
22049 +}
22050 +
22051 +int dprtc_disable(struct fsl_mc_io *mc_io,
22052 + uint32_t cmd_flags,
22053 + uint16_t token)
22054 +{
22055 + struct mc_command cmd = { 0 };
22056 +
22057 + /* prepare command */
22058 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DISABLE,
22059 + cmd_flags,
22060 + token);
22061 +
22062 + /* send command to mc*/
22063 + return mc_send_command(mc_io, &cmd);
22064 +}
22065 +
22066 +int dprtc_is_enabled(struct fsl_mc_io *mc_io,
22067 + uint32_t cmd_flags,
22068 + uint16_t token,
22069 + int *en)
22070 +{
22071 + struct dprtc_rsp_is_enabled *rsp_params;
22072 + struct mc_command cmd = { 0 };
22073 + int err;
22074 +
22075 + /* prepare command */
22076 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_IS_ENABLED, cmd_flags,
22077 + token);
22078 +
22079 + /* send command to mc*/
22080 + err = mc_send_command(mc_io, &cmd);
22081 + if (err)
22082 + return err;
22083 +
22084 + /* retrieve response parameters */
22085 + rsp_params = (struct dprtc_rsp_is_enabled *)cmd.params;
22086 + *en = dprtc_get_field(rsp_params->en, ENABLE);
22087 +
22088 + return 0;
22089 +}
22090 +
22091 +int dprtc_reset(struct fsl_mc_io *mc_io,
22092 + uint32_t cmd_flags,
22093 + uint16_t token)
22094 +{
22095 + struct mc_command cmd = { 0 };
22096 +
22097 + /* prepare command */
22098 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_RESET,
22099 + cmd_flags,
22100 + token);
22101 +
22102 + /* send command to mc*/
22103 + return mc_send_command(mc_io, &cmd);
22104 +}
22105 +
22106 +/**
22107 + * dprtc_set_irq_enable() - Set overall interrupt state.
22108 + * @mc_io: Pointer to MC portal's I/O object
22109 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22110 + * @token: Token of DPRTC object
22111 + * @irq_index: The interrupt index to configure
22112 + * @en: Interrupt state - enable = 1, disable = 0
22113 + *
22114 + * Allows GPP software to control when interrupts are generated.
22115 + * Each interrupt can have up to 32 causes. The enable/disable control's the
22116 + * overall interrupt state. if the interrupt is disabled no causes will cause
22117 + * an interrupt.
22118 + *
22119 + * Return: '0' on Success; Error code otherwise.
22120 + */
22121 +int dprtc_set_irq_enable(struct fsl_mc_io *mc_io,
22122 + uint32_t cmd_flags,
22123 + uint16_t token,
22124 + uint8_t irq_index,
22125 + uint8_t en)
22126 +{
22127 + struct dprtc_cmd_set_irq_enable *cmd_params;
22128 + struct mc_command cmd = { 0 };
22129 +
22130 + /* prepare command */
22131 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_ENABLE,
22132 + cmd_flags,
22133 + token);
22134 + cmd_params = (struct dprtc_cmd_set_irq_enable *)cmd.params;
22135 + cmd_params->irq_index = irq_index;
22136 + cmd_params->en = en;
22137 +
22138 + /* send command to mc*/
22139 + return mc_send_command(mc_io, &cmd);
22140 +}
22141 +
22142 +/**
22143 + * dprtc_get_irq_enable() - Get overall interrupt state
22144 + * @mc_io: Pointer to MC portal's I/O object
22145 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22146 + * @token: Token of DPRTC object
22147 + * @irq_index: The interrupt index to configure
22148 + * @en: Returned interrupt state - enable = 1, disable = 0
22149 + *
22150 + * Return: '0' on Success; Error code otherwise.
22151 + */
22152 +int dprtc_get_irq_enable(struct fsl_mc_io *mc_io,
22153 + uint32_t cmd_flags,
22154 + uint16_t token,
22155 + uint8_t irq_index,
22156 + uint8_t *en)
22157 +{
22158 + struct dprtc_rsp_get_irq_enable *rsp_params;
22159 + struct dprtc_cmd_get_irq *cmd_params;
22160 + struct mc_command cmd = { 0 };
22161 + int err;
22162 +
22163 + /* prepare command */
22164 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_ENABLE,
22165 + cmd_flags,
22166 + token);
22167 + cmd_params = (struct dprtc_cmd_get_irq *)cmd.params;
22168 + cmd_params->irq_index = irq_index;
22169 +
22170 + /* send command to mc*/
22171 + err = mc_send_command(mc_io, &cmd);
22172 + if (err)
22173 + return err;
22174 +
22175 + /* retrieve response parameters */
22176 + rsp_params = (struct dprtc_rsp_get_irq_enable *)cmd.params;
22177 + *en = rsp_params->en;
22178 +
22179 + return 0;
22180 +}
22181 +
22182 +/**
22183 + * dprtc_set_irq_mask() - Set interrupt mask.
22184 + * @mc_io: Pointer to MC portal's I/O object
22185 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22186 + * @token: Token of DPRTC object
22187 + * @irq_index: The interrupt index to configure
22188 + * @mask: Event mask to trigger interrupt;
22189 + * each bit:
22190 + * 0 = ignore event
22191 + * 1 = consider event for asserting IRQ
22192 + *
22193 + * Every interrupt can have up to 32 causes and the interrupt model supports
22194 + * masking/unmasking each cause independently
22195 + *
22196 + * Return: '0' on Success; Error code otherwise.
22197 + */
22198 +int dprtc_set_irq_mask(struct fsl_mc_io *mc_io,
22199 + uint32_t cmd_flags,
22200 + uint16_t token,
22201 + uint8_t irq_index,
22202 + uint32_t mask)
22203 +{
22204 + struct dprtc_cmd_set_irq_mask *cmd_params;
22205 + struct mc_command cmd = { 0 };
22206 +
22207 + /* prepare command */
22208 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_MASK,
22209 + cmd_flags,
22210 + token);
22211 + cmd_params = (struct dprtc_cmd_set_irq_mask *)cmd.params;
22212 + cmd_params->mask = cpu_to_le32(mask);
22213 + cmd_params->irq_index = irq_index;
22214 +
22215 + /* send command to mc*/
22216 + return mc_send_command(mc_io, &cmd);
22217 +}
22218 +
22219 +/**
22220 + * dprtc_get_irq_mask() - Get interrupt mask.
22221 + * @mc_io: Pointer to MC portal's I/O object
22222 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22223 + * @token: Token of DPRTC object
22224 + * @irq_index: The interrupt index to configure
22225 + * @mask: Returned event mask to trigger interrupt
22226 + *
22227 + * Every interrupt can have up to 32 causes and the interrupt model supports
22228 + * masking/unmasking each cause independently
22229 + *
22230 + * Return: '0' on Success; Error code otherwise.
22231 + */
22232 +int dprtc_get_irq_mask(struct fsl_mc_io *mc_io,
22233 + uint32_t cmd_flags,
22234 + uint16_t token,
22235 + uint8_t irq_index,
22236 + uint32_t *mask)
22237 +{
22238 + struct dprtc_rsp_get_irq_mask *rsp_params;
22239 + struct dprtc_cmd_get_irq *cmd_params;
22240 + struct mc_command cmd = { 0 };
22241 + int err;
22242 +
22243 + /* prepare command */
22244 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_MASK,
22245 + cmd_flags,
22246 + token);
22247 + cmd_params = (struct dprtc_cmd_get_irq *)cmd.params;
22248 + cmd_params->irq_index = irq_index;
22249 +
22250 + /* send command to mc*/
22251 + err = mc_send_command(mc_io, &cmd);
22252 + if (err)
22253 + return err;
22254 +
22255 + /* retrieve response parameters */
22256 + rsp_params = (struct dprtc_rsp_get_irq_mask *)cmd.params;
22257 + *mask = le32_to_cpu(rsp_params->mask);
22258 +
22259 + return 0;
22260 +}
22261 +
22262 +/**
22263 + * dprtc_get_irq_status() - Get the current status of any pending interrupts.
22264 + *
22265 + * @mc_io: Pointer to MC portal's I/O object
22266 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22267 + * @token: Token of DPRTC object
22268 + * @irq_index: The interrupt index to configure
22269 + * @status: Returned interrupts status - one bit per cause:
22270 + * 0 = no interrupt pending
22271 + * 1 = interrupt pending
22272 + *
22273 + * Return: '0' on Success; Error code otherwise.
22274 + */
22275 +int dprtc_get_irq_status(struct fsl_mc_io *mc_io,
22276 + uint32_t cmd_flags,
22277 + uint16_t token,
22278 + uint8_t irq_index,
22279 + uint32_t *status)
22280 +{
22281 + struct dprtc_cmd_get_irq_status *cmd_params;
22282 + struct dprtc_rsp_get_irq_status *rsp_params;
22283 + struct mc_command cmd = { 0 };
22284 + int err;
22285 +
22286 + /* prepare command */
22287 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_STATUS,
22288 + cmd_flags,
22289 + token);
22290 + cmd_params = (struct dprtc_cmd_get_irq_status *)cmd.params;
22291 + cmd_params->status = cpu_to_le32(*status);
22292 + cmd_params->irq_index = irq_index;
22293 +
22294 + /* send command to mc*/
22295 + err = mc_send_command(mc_io, &cmd);
22296 + if (err)
22297 + return err;
22298 +
22299 + /* retrieve response parameters */
22300 + rsp_params = (struct dprtc_rsp_get_irq_status *)cmd.params;
22301 + *status = rsp_params->status;
22302 +
22303 + return 0;
22304 +}
22305 +
22306 +/**
22307 + * dprtc_clear_irq_status() - Clear a pending interrupt's status
22308 + *
22309 + * @mc_io: Pointer to MC portal's I/O object
22310 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22311 + * @token: Token of DPRTC object
22312 + * @irq_index: The interrupt index to configure
22313 + * @status: Bits to clear (W1C) - one bit per cause:
22314 + * 0 = don't change
22315 + * 1 = clear status bit
22316 + *
22317 + * Return: '0' on Success; Error code otherwise.
22318 + */
22319 +int dprtc_clear_irq_status(struct fsl_mc_io *mc_io,
22320 + uint32_t cmd_flags,
22321 + uint16_t token,
22322 + uint8_t irq_index,
22323 + uint32_t status)
22324 +{
22325 + struct dprtc_cmd_clear_irq_status *cmd_params;
22326 + struct mc_command cmd = { 0 };
22327 +
22328 + /* prepare command */
22329 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLEAR_IRQ_STATUS,
22330 + cmd_flags,
22331 + token);
22332 + cmd_params = (struct dprtc_cmd_clear_irq_status *)cmd.params;
22333 + cmd_params->irq_index = irq_index;
22334 + cmd_params->status = cpu_to_le32(status);
22335 +
22336 + /* send command to mc*/
22337 + return mc_send_command(mc_io, &cmd);
22338 +}
22339 +
22340 +/**
22341 + * dprtc_get_attributes - Retrieve DPRTC attributes.
22342 + *
22343 + * @mc_io: Pointer to MC portal's I/O object
22344 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22345 + * @token: Token of DPRTC object
22346 + * @attr: Returned object's attributes
22347 + *
22348 + * Return: '0' on Success; Error code otherwise.
22349 + */
22350 +int dprtc_get_attributes(struct fsl_mc_io *mc_io,
22351 + uint32_t cmd_flags,
22352 + uint16_t token,
22353 + struct dprtc_attr *attr)
22354 +{
22355 + struct dprtc_rsp_get_attributes *rsp_params;
22356 + struct mc_command cmd = { 0 };
22357 + int err;
22358 +
22359 + /* prepare command */
22360 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_ATTR,
22361 + cmd_flags,
22362 + token);
22363 +
22364 + /* send command to mc*/
22365 + err = mc_send_command(mc_io, &cmd);
22366 + if (err)
22367 + return err;
22368 +
22369 + /* retrieve response parameters */
22370 + rsp_params = (struct dprtc_rsp_get_attributes *)cmd.params;
22371 + attr->id = le32_to_cpu(rsp_params->id);
22372 +
22373 + return 0;
22374 +}
22375 +
22376 +/**
22377 + * dprtc_set_clock_offset() - Sets the clock's offset
22378 + * (usually relative to another clock).
22379 + *
22380 + * @mc_io: Pointer to MC portal's I/O object
22381 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22382 + * @token: Token of DPRTC object
22383 + * @offset: New clock offset (in nanoseconds).
22384 + *
22385 + * Return: '0' on Success; Error code otherwise.
22386 + */
22387 +int dprtc_set_clock_offset(struct fsl_mc_io *mc_io,
22388 + uint32_t cmd_flags,
22389 + uint16_t token,
22390 + int64_t offset)
22391 +{
22392 + struct dprtc_cmd_set_clock_offset *cmd_params;
22393 + struct mc_command cmd = { 0 };
22394 +
22395 + /* prepare command */
22396 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_CLOCK_OFFSET,
22397 + cmd_flags,
22398 + token);
22399 + cmd_params = (struct dprtc_cmd_set_clock_offset *)cmd.params;
22400 + cmd_params->offset = cpu_to_le64(offset);
22401 +
22402 + /* send command to mc*/
22403 + return mc_send_command(mc_io, &cmd);
22404 +}
22405 +
22406 +/**
22407 + * dprtc_set_freq_compensation() - Sets a new frequency compensation value.
22408 + *
22409 + * @mc_io: Pointer to MC portal's I/O object
22410 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22411 + * @token: Token of DPRTC object
22412 + * @freq_compensation: The new frequency compensation value to set.
22413 + *
22414 + * Return: '0' on Success; Error code otherwise.
22415 + */
22416 +int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io,
22417 + uint32_t cmd_flags,
22418 + uint16_t token,
22419 + uint32_t freq_compensation)
22420 +{
22421 + struct dprtc_get_freq_compensation *cmd_params;
22422 + struct mc_command cmd = { 0 };
22423 +
22424 + /* prepare command */
22425 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_FREQ_COMPENSATION,
22426 + cmd_flags,
22427 + token);
22428 + cmd_params = (struct dprtc_get_freq_compensation *)cmd.params;
22429 + cmd_params->freq_compensation = cpu_to_le32(freq_compensation);
22430 +
22431 + /* send command to mc*/
22432 + return mc_send_command(mc_io, &cmd);
22433 +}
22434 +
22435 +/**
22436 + * dprtc_get_freq_compensation() - Retrieves the frequency compensation value
22437 + *
22438 + * @mc_io: Pointer to MC portal's I/O object
22439 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22440 + * @token: Token of DPRTC object
22441 + * @freq_compensation: Frequency compensation value
22442 + *
22443 + * Return: '0' on Success; Error code otherwise.
22444 + */
22445 +int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io,
22446 + uint32_t cmd_flags,
22447 + uint16_t token,
22448 + uint32_t *freq_compensation)
22449 +{
22450 + struct dprtc_get_freq_compensation *rsp_params;
22451 + struct mc_command cmd = { 0 };
22452 + int err;
22453 +
22454 + /* prepare command */
22455 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_FREQ_COMPENSATION,
22456 + cmd_flags,
22457 + token);
22458 +
22459 + /* send command to mc*/
22460 + err = mc_send_command(mc_io, &cmd);
22461 + if (err)
22462 + return err;
22463 +
22464 + /* retrieve response parameters */
22465 + rsp_params = (struct dprtc_get_freq_compensation *)cmd.params;
22466 + *freq_compensation = le32_to_cpu(rsp_params->freq_compensation);
22467 +
22468 + return 0;
22469 +}
22470 +
22471 +/**
22472 + * dprtc_get_time() - Returns the current RTC time.
22473 + *
22474 + * @mc_io: Pointer to MC portal's I/O object
22475 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22476 + * @token: Token of DPRTC object
22477 + * @time: Current RTC time.
22478 + *
22479 + * Return: '0' on Success; Error code otherwise.
22480 + */
22481 +int dprtc_get_time(struct fsl_mc_io *mc_io,
22482 + uint32_t cmd_flags,
22483 + uint16_t token,
22484 + uint64_t *time)
22485 +{
22486 + struct dprtc_time *rsp_params;
22487 + struct mc_command cmd = { 0 };
22488 + int err;
22489 +
22490 + /* prepare command */
22491 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_TIME,
22492 + cmd_flags,
22493 + token);
22494 +
22495 + /* send command to mc*/
22496 + err = mc_send_command(mc_io, &cmd);
22497 + if (err)
22498 + return err;
22499 +
22500 + /* retrieve response parameters */
22501 + rsp_params = (struct dprtc_time *)cmd.params;
22502 + *time = le64_to_cpu(rsp_params->time);
22503 +
22504 + return 0;
22505 +}
22506 +
22507 +/**
22508 + * dprtc_set_time() - Updates current RTC time.
22509 + *
22510 + * @mc_io: Pointer to MC portal's I/O object
22511 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22512 + * @token: Token of DPRTC object
22513 + * @time: New RTC time.
22514 + *
22515 + * Return: '0' on Success; Error code otherwise.
22516 + */
22517 +int dprtc_set_time(struct fsl_mc_io *mc_io,
22518 + uint32_t cmd_flags,
22519 + uint16_t token,
22520 + uint64_t time)
22521 +{
22522 + struct dprtc_time *cmd_params;
22523 + struct mc_command cmd = { 0 };
22524 +
22525 + /* prepare command */
22526 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_TIME,
22527 + cmd_flags,
22528 + token);
22529 + cmd_params = (struct dprtc_time *)cmd.params;
22530 + cmd_params->time = cpu_to_le64(time);
22531 +
22532 + /* send command to mc*/
22533 + return mc_send_command(mc_io, &cmd);
22534 +}
22535 +
22536 +/**
22537 + * dprtc_set_alarm() - Defines and sets alarm.
22538 + *
22539 + * @mc_io: Pointer to MC portal's I/O object
22540 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22541 + * @token: Token of DPRTC object
22542 + * @time: In nanoseconds, the time when the alarm
22543 + * should go off - must be a multiple of
22544 + * 1 microsecond
22545 + *
22546 + * Return: '0' on Success; Error code otherwise.
22547 + */
22548 +int dprtc_set_alarm(struct fsl_mc_io *mc_io,
22549 + uint32_t cmd_flags,
22550 + uint16_t token, uint64_t time)
22551 +{
22552 + struct dprtc_time *cmd_params;
22553 + struct mc_command cmd = { 0 };
22554 +
22555 + /* prepare command */
22556 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_ALARM,
22557 + cmd_flags,
22558 + token);
22559 + cmd_params = (struct dprtc_time *)cmd.params;
22560 + cmd_params->time = cpu_to_le64(time);
22561 +
22562 + /* send command to mc*/
22563 + return mc_send_command(mc_io, &cmd);
22564 +}
22565 +
22566 +/**
22567 + * dprtc_get_api_version() - Get Data Path Real Time Counter API version
22568 + * @mc_io: Pointer to MC portal's I/O object
22569 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22570 + * @major_ver: Major version of data path real time counter API
22571 + * @minor_ver: Minor version of data path real time counter API
22572 + *
22573 + * Return: '0' on Success; Error code otherwise.
22574 + */
22575 +int dprtc_get_api_version(struct fsl_mc_io *mc_io,
22576 + uint32_t cmd_flags,
22577 + uint16_t *major_ver,
22578 + uint16_t *minor_ver)
22579 +{
22580 + struct dprtc_rsp_get_api_version *rsp_params;
22581 + struct mc_command cmd = { 0 };
22582 + int err;
22583 +
22584 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_API_VERSION,
22585 + cmd_flags,
22586 + 0);
22587 +
22588 + err = mc_send_command(mc_io, &cmd);
22589 + if (err)
22590 + return err;
22591 +
22592 + rsp_params = (struct dprtc_rsp_get_api_version *)cmd.params;
22593 + *major_ver = le16_to_cpu(rsp_params->major);
22594 + *minor_ver = le16_to_cpu(rsp_params->minor);
22595 +
22596 + return 0;
22597 +}
22598 diff --git a/drivers/staging/fsl-dpaa2/rtc/dprtc.h b/drivers/staging/fsl-dpaa2/rtc/dprtc.h
22599 new file mode 100644
22600 index 00000000..fc96cac6
22601 --- /dev/null
22602 +++ b/drivers/staging/fsl-dpaa2/rtc/dprtc.h
22603 @@ -0,0 +1,172 @@
22604 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
22605 + *
22606 + * Redistribution and use in source and binary forms, with or without
22607 + * modification, are permitted provided that the following conditions are met:
22608 + * * Redistributions of source code must retain the above copyright
22609 + * notice, this list of conditions and the following disclaimer.
22610 + * * Redistributions in binary form must reproduce the above copyright
22611 + * notice, this list of conditions and the following disclaimer in the
22612 + * documentation and/or other materials provided with the distribution.
22613 + * * Neither the name of the above-listed copyright holders nor the
22614 + * names of any contributors may be used to endorse or promote products
22615 + * derived from this software without specific prior written permission.
22616 + *
22617 + *
22618 + * ALTERNATIVELY, this software may be distributed under the terms of the
22619 + * GNU General Public License ("GPL") as published by the Free Software
22620 + * Foundation, either version 2 of that License or (at your option) any
22621 + * later version.
22622 + *
22623 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22624 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22625 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22626 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
22627 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22628 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22629 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22630 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22631 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22632 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
22633 + * POSSIBILITY OF SUCH DAMAGE.
22634 + */
22635 +#ifndef __FSL_DPRTC_H
22636 +#define __FSL_DPRTC_H
22637 +
22638 +/* Data Path Real Time Counter API
22639 + * Contains initialization APIs and runtime control APIs for RTC
22640 + */
22641 +
22642 +struct fsl_mc_io;
22643 +
22644 +/**
22645 + * Number of irq's
22646 + */
22647 +#define DPRTC_MAX_IRQ_NUM 1
22648 +#define DPRTC_IRQ_INDEX 0
22649 +
22650 +/**
22651 + * Interrupt event masks:
22652 + */
22653 +
22654 +/**
22655 + * Interrupt event mask indicating alarm event had occurred
22656 + */
22657 +#define DPRTC_EVENT_ALARM 0x40000000
22658 +/**
22659 + * Interrupt event mask indicating periodic pulse event had occurred
22660 + */
22661 +#define DPRTC_EVENT_PPS 0x08000000
22662 +
22663 +int dprtc_open(struct fsl_mc_io *mc_io,
22664 + uint32_t cmd_flags,
22665 + int dprtc_id,
22666 + uint16_t *token);
22667 +
22668 +int dprtc_close(struct fsl_mc_io *mc_io,
22669 + uint32_t cmd_flags,
22670 + uint16_t token);
22671 +
22672 +/**
22673 + * struct dprtc_cfg - Structure representing DPRTC configuration
22674 + * @options: place holder
22675 + */
22676 +struct dprtc_cfg {
22677 + uint32_t options;
22678 +};
22679 +
22680 +int dprtc_create(struct fsl_mc_io *mc_io,
22681 + uint16_t dprc_token,
22682 + uint32_t cmd_flags,
22683 + const struct dprtc_cfg *cfg,
22684 + uint32_t *obj_id);
22685 +
22686 +int dprtc_destroy(struct fsl_mc_io *mc_io,
22687 + uint16_t dprc_token,
22688 + uint32_t cmd_flags,
22689 + uint32_t object_id);
22690 +
22691 +int dprtc_set_clock_offset(struct fsl_mc_io *mc_io,
22692 + uint32_t cmd_flags,
22693 + uint16_t token,
22694 + int64_t offset);
22695 +
22696 +int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io,
22697 + uint32_t cmd_flags,
22698 + uint16_t token,
22699 + uint32_t freq_compensation);
22700 +
22701 +int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io,
22702 + uint32_t cmd_flags,
22703 + uint16_t token,
22704 + uint32_t *freq_compensation);
22705 +
22706 +int dprtc_get_time(struct fsl_mc_io *mc_io,
22707 + uint32_t cmd_flags,
22708 + uint16_t token,
22709 + uint64_t *time);
22710 +
22711 +int dprtc_set_time(struct fsl_mc_io *mc_io,
22712 + uint32_t cmd_flags,
22713 + uint16_t token,
22714 + uint64_t time);
22715 +
22716 +int dprtc_set_alarm(struct fsl_mc_io *mc_io,
22717 + uint32_t cmd_flags,
22718 + uint16_t token,
22719 + uint64_t time);
22720 +
22721 +int dprtc_set_irq_enable(struct fsl_mc_io *mc_io,
22722 + uint32_t cmd_flags,
22723 + uint16_t token,
22724 + uint8_t irq_index,
22725 + uint8_t en);
22726 +
22727 +int dprtc_get_irq_enable(struct fsl_mc_io *mc_io,
22728 + uint32_t cmd_flags,
22729 + uint16_t token,
22730 + uint8_t irq_index,
22731 + uint8_t *en);
22732 +
22733 +int dprtc_set_irq_mask(struct fsl_mc_io *mc_io,
22734 + uint32_t cmd_flags,
22735 + uint16_t token,
22736 + uint8_t irq_index,
22737 + uint32_t mask);
22738 +
22739 +int dprtc_get_irq_mask(struct fsl_mc_io *mc_io,
22740 + uint32_t cmd_flags,
22741 + uint16_t token,
22742 + uint8_t irq_index,
22743 + uint32_t *mask);
22744 +
22745 +int dprtc_get_irq_status(struct fsl_mc_io *mc_io,
22746 + uint32_t cmd_flags,
22747 + uint16_t token,
22748 + uint8_t irq_index,
22749 + uint32_t *status);
22750 +
22751 +int dprtc_clear_irq_status(struct fsl_mc_io *mc_io,
22752 + uint32_t cmd_flags,
22753 + uint16_t token,
22754 + uint8_t irq_index,
22755 + uint32_t status);
22756 +
22757 +/**
22758 + * struct dprtc_attr - Structure representing DPRTC attributes
22759 + * @id: DPRTC object ID
22760 + */
22761 +struct dprtc_attr {
22762 + int id;
22763 +};
22764 +
22765 +int dprtc_get_attributes(struct fsl_mc_io *mc_io,
22766 + uint32_t cmd_flags,
22767 + uint16_t token,
22768 + struct dprtc_attr *attr);
22769 +
22770 +int dprtc_get_api_version(struct fsl_mc_io *mc_io,
22771 + uint32_t cmd_flags,
22772 + uint16_t *major_ver,
22773 + uint16_t *minor_ver);
22774 +
22775 +#endif /* __FSL_DPRTC_H */
22776 diff --git a/drivers/staging/fsl-dpaa2/rtc/rtc.c b/drivers/staging/fsl-dpaa2/rtc/rtc.c
22777 new file mode 100644
22778 index 00000000..0afc6538
22779 --- /dev/null
22780 +++ b/drivers/staging/fsl-dpaa2/rtc/rtc.c
22781 @@ -0,0 +1,243 @@
22782 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
22783 + *
22784 + * Redistribution and use in source and binary forms, with or without
22785 + * modification, are permitted provided that the following conditions are met:
22786 + * * Redistributions of source code must retain the above copyright
22787 + * notice, this list of conditions and the following disclaimer.
22788 + * * Redistributions in binary form must reproduce the above copyright
22789 + * notice, this list of conditions and the following disclaimer in the
22790 + * documentation and/or other materials provided with the distribution.
22791 + * * Neither the name of the above-listed copyright holders nor the
22792 + * names of any contributors may be used to endorse or promote products
22793 + * derived from this software without specific prior written permission.
22794 + *
22795 + *
22796 + * ALTERNATIVELY, this software may be distributed under the terms of the
22797 + * GNU General Public License ("GPL") as published by the Free Software
22798 + * Foundation, either version 2 of that License or (at your option) any
22799 + * later version.
22800 + *
22801 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22802 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22803 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22804 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
22805 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22806 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22807 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22808 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22809 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22810 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
22811 + * POSSIBILITY OF SUCH DAMAGE.
22812 + */
22813 +
22814 +#include <linux/module.h>
22815 +#include <linux/ptp_clock_kernel.h>
22816 +
22817 +#include "../../fsl-mc/include/mc.h"
22818 +#include "../../fsl-mc/include/mc-sys.h"
22819 +
22820 +#include "dprtc.h"
22821 +#include "dprtc-cmd.h"
22822 +
22823 +#define N_EXT_TS 2
22824 +
22825 +struct ptp_clock *clock;
22826 +struct fsl_mc_device *rtc_mc_dev;
22827 +u32 freqCompensation;
22828 +
22829 +/* PTP clock operations */
22830 +static int ptp_dpaa2_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
22831 +{
22832 + u64 adj;
22833 + u32 diff, tmr_add;
22834 + int neg_adj = 0;
22835 + int err = 0;
22836 + struct fsl_mc_device *mc_dev = rtc_mc_dev;
22837 + struct device *dev = &mc_dev->dev;
22838 +
22839 + if (ppb < 0) {
22840 + neg_adj = 1;
22841 + ppb = -ppb;
22842 + }
22843 +
22844 + tmr_add = freqCompensation;
22845 + adj = tmr_add;
22846 + adj *= ppb;
22847 + diff = div_u64(adj, 1000000000ULL);
22848 +
22849 + tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff;
22850 +
22851 + err = dprtc_set_freq_compensation(mc_dev->mc_io, 0,
22852 + mc_dev->mc_handle, tmr_add);
22853 + if (err)
22854 + dev_err(dev, "dprtc_set_freq_compensation err %d\n", err);
22855 + return 0;
22856 +}
22857 +
22858 +static int ptp_dpaa2_adjtime(struct ptp_clock_info *ptp, s64 delta)
22859 +{
22860 + s64 now;
22861 + int err = 0;
22862 + struct fsl_mc_device *mc_dev = rtc_mc_dev;
22863 + struct device *dev = &mc_dev->dev;
22864 +
22865 + err = dprtc_get_time(mc_dev->mc_io, 0, mc_dev->mc_handle, &now);
22866 + if (err) {
22867 + dev_err(dev, "dprtc_get_time err %d\n", err);
22868 + return 0;
22869 + }
22870 +
22871 + now += delta;
22872 +
22873 + err = dprtc_set_time(mc_dev->mc_io, 0, mc_dev->mc_handle, now);
22874 + if (err) {
22875 + dev_err(dev, "dprtc_set_time err %d\n", err);
22876 + return 0;
22877 + }
22878 + return 0;
22879 +}
22880 +
22881 +static int ptp_dpaa2_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
22882 +{
22883 + u64 ns;
22884 + u32 remainder;
22885 + int err = 0;
22886 + struct fsl_mc_device *mc_dev = rtc_mc_dev;
22887 + struct device *dev = &mc_dev->dev;
22888 +
22889 + err = dprtc_get_time(mc_dev->mc_io, 0, mc_dev->mc_handle, &ns);
22890 + if (err) {
22891 + dev_err(dev, "dprtc_get_time err %d\n", err);
22892 + return 0;
22893 + }
22894 +
22895 + ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
22896 + ts->tv_nsec = remainder;
22897 + return 0;
22898 +}
22899 +
22900 +static int ptp_dpaa2_settime(struct ptp_clock_info *ptp,
22901 + const struct timespec *ts)
22902 +{
22903 + u64 ns;
22904 + int err = 0;
22905 + struct fsl_mc_device *mc_dev = rtc_mc_dev;
22906 + struct device *dev = &mc_dev->dev;
22907 +
22908 + ns = ts->tv_sec * 1000000000ULL;
22909 + ns += ts->tv_nsec;
22910 +
22911 + err = dprtc_set_time(mc_dev->mc_io, 0, mc_dev->mc_handle, ns);
22912 + if (err)
22913 + dev_err(dev, "dprtc_set_time err %d\n", err);
22914 + return 0;
22915 +}
22916 +
22917 +static struct ptp_clock_info ptp_dpaa2_caps = {
22918 + .owner = THIS_MODULE,
22919 + .name = "dpaa2 clock",
22920 + .max_adj = 512000,
22921 + .n_alarm = 0,
22922 + .n_ext_ts = N_EXT_TS,
22923 + .n_per_out = 0,
22924 + .n_pins = 0,
22925 + .pps = 1,
22926 + .adjfreq = ptp_dpaa2_adjfreq,
22927 + .adjtime = ptp_dpaa2_adjtime,
22928 + .gettime64 = ptp_dpaa2_gettime,
22929 + .settime64 = ptp_dpaa2_settime,
22930 +};
22931 +
22932 +static int rtc_probe(struct fsl_mc_device *mc_dev)
22933 +{
22934 + struct device *dev;
22935 + int err = 0;
22936 + int dpaa2_phc_index;
22937 + u32 tmr_add = 0;
22938 +
22939 + if (!mc_dev)
22940 + return -EFAULT;
22941 +
22942 + dev = &mc_dev->dev;
22943 +
22944 + err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io);
22945 + if (unlikely(err)) {
22946 + dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
22947 + goto err_exit;
22948 + }
22949 + if (!mc_dev->mc_io) {
22950 + dev_err(dev,
22951 + "fsl_mc_portal_allocate returned null handle but no error\n");
22952 + err = -EFAULT;
22953 + goto err_exit;
22954 + }
22955 +
22956 + err = dprtc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
22957 + &mc_dev->mc_handle);
22958 + if (err) {
22959 + dev_err(dev, "dprtc_open err %d\n", err);
22960 + goto err_free_mcp;
22961 + }
22962 + if (!mc_dev->mc_handle) {
22963 + dev_err(dev, "dprtc_open returned null handle but no error\n");
22964 + err = -EFAULT;
22965 + goto err_free_mcp;
22966 + }
22967 +
22968 + rtc_mc_dev = mc_dev;
22969 +
22970 + err = dprtc_get_freq_compensation(mc_dev->mc_io, 0,
22971 + mc_dev->mc_handle, &tmr_add);
22972 + if (err) {
22973 + dev_err(dev, "dprtc_get_freq_compensation err %d\n", err);
22974 + goto err_close;
22975 + }
22976 + freqCompensation = tmr_add;
22977 +
22978 + clock = ptp_clock_register(&ptp_dpaa2_caps, dev);
22979 + if (IS_ERR(clock)) {
22980 + err = PTR_ERR(clock);
22981 + goto err_close;
22982 + }
22983 + dpaa2_phc_index = ptp_clock_index(clock);
22984 +
22985 + return 0;
22986 +err_close:
22987 + dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
22988 +err_free_mcp:
22989 + fsl_mc_portal_free(mc_dev->mc_io);
22990 +err_exit:
22991 + return err;
22992 +}
22993 +
22994 +static int rtc_remove(struct fsl_mc_device *mc_dev)
22995 +{
22996 + ptp_clock_unregister(clock);
22997 + dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
22998 + fsl_mc_portal_free(mc_dev->mc_io);
22999 +
23000 + return 0;
23001 +}
23002 +
23003 +static const struct fsl_mc_device_id rtc_match_id_table[] = {
23004 + {
23005 + .vendor = FSL_MC_VENDOR_FREESCALE,
23006 + .obj_type = "dprtc",
23007 + },
23008 + {}
23009 +};
23010 +
23011 +static struct fsl_mc_driver rtc_drv = {
23012 + .driver = {
23013 + .name = KBUILD_MODNAME,
23014 + .owner = THIS_MODULE,
23015 + },
23016 + .probe = rtc_probe,
23017 + .remove = rtc_remove,
23018 + .match_id_table = rtc_match_id_table,
23019 +};
23020 +
23021 +module_fsl_mc_driver(rtc_drv);
23022 +
23023 +MODULE_LICENSE("GPL");
23024 +MODULE_DESCRIPTION("DPAA2 RTC (PTP 1588 clock) driver (prototype)");
23025 --
23026 2.14.1
23027