layerscape: refresh patches
[openwrt/staging/dedeckeh.git] / target / linux / layerscape / patches-4.9 / 705-dpaa2-support-layerscape.patch
1 From 3a302437605308079db398b67000a77a4fe92da8 Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Mon, 25 Sep 2017 12:07:58 +0800
4 Subject: [PATCH] dpaa2: support layerscape
5
6 This is a integrated patch for layerscape dpaa2 support.
7
8 Signed-off-by: Bogdan Purcareata <bogdan.purcareata@nxp.com>
9 Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
10 Signed-off-by: Razvan Stefanescu <razvan.stefanescu@nxp.com>
11 Signed-off-by: costi <constantin.tudor@freescale.com>
12 Signed-off-by: Catalin Horghidan <catalin.horghidan@nxp.com>
13 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
14 ---
15 drivers/soc/fsl/ls2-console/Kconfig | 4 +
16 drivers/soc/fsl/ls2-console/Makefile | 1 +
17 drivers/soc/fsl/ls2-console/ls2-console.c | 284 ++
18 drivers/staging/fsl-dpaa2/ethernet/Makefile | 11 +
19 drivers/staging/fsl-dpaa2/ethernet/README | 186 ++
20 .../staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c | 350 +++
21 .../staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h | 60 +
22 .../staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h | 184 ++
23 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 3155 ++++++++++++++++++++
24 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h | 460 +++
25 drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c | 856 ++++++
26 drivers/staging/fsl-dpaa2/ethernet/dpkg.h | 176 ++
27 drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h | 600 ++++
28 drivers/staging/fsl-dpaa2/ethernet/dpni.c | 1770 +++++++++++
29 drivers/staging/fsl-dpaa2/ethernet/dpni.h | 989 ++++++
30 drivers/staging/fsl-dpaa2/ethernet/net.h | 480 +++
31 drivers/staging/fsl-dpaa2/ethsw/Kconfig | 6 +
32 drivers/staging/fsl-dpaa2/ethsw/Makefile | 10 +
33 drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h | 851 ++++++
34 drivers/staging/fsl-dpaa2/ethsw/dpsw.c | 2762 +++++++++++++++++
35 drivers/staging/fsl-dpaa2/ethsw/dpsw.h | 1269 ++++++++
36 drivers/staging/fsl-dpaa2/ethsw/switch.c | 1857 ++++++++++++
37 drivers/staging/fsl-dpaa2/evb/Kconfig | 7 +
38 drivers/staging/fsl-dpaa2/evb/Makefile | 10 +
39 drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h | 279 ++
40 drivers/staging/fsl-dpaa2/evb/dpdmux.c | 1112 +++++++
41 drivers/staging/fsl-dpaa2/evb/dpdmux.h | 453 +++
42 drivers/staging/fsl-dpaa2/evb/evb.c | 1350 +++++++++
43 drivers/staging/fsl-dpaa2/mac/Kconfig | 23 +
44 drivers/staging/fsl-dpaa2/mac/Makefile | 10 +
45 drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h | 172 ++
46 drivers/staging/fsl-dpaa2/mac/dpmac.c | 620 ++++
47 drivers/staging/fsl-dpaa2/mac/dpmac.h | 342 +++
48 drivers/staging/fsl-dpaa2/mac/mac.c | 666 +++++
49 drivers/staging/fsl-dpaa2/rtc/Makefile | 10 +
50 drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h | 160 +
51 drivers/staging/fsl-dpaa2/rtc/dprtc.c | 746 +++++
52 drivers/staging/fsl-dpaa2/rtc/dprtc.h | 172 ++
53 drivers/staging/fsl-dpaa2/rtc/rtc.c | 243 ++
54 39 files changed, 22696 insertions(+)
55 create mode 100644 drivers/soc/fsl/ls2-console/Kconfig
56 create mode 100644 drivers/soc/fsl/ls2-console/Makefile
57 create mode 100644 drivers/soc/fsl/ls2-console/ls2-console.c
58 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/Makefile
59 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/README
60 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
61 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h
62 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
63 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
64 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
65 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
66 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpkg.h
67 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
68 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni.c
69 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni.h
70 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/net.h
71 create mode 100644 drivers/staging/fsl-dpaa2/ethsw/Kconfig
72 create mode 100644 drivers/staging/fsl-dpaa2/ethsw/Makefile
73 create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
74 create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw.c
75 create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw.h
76 create mode 100644 drivers/staging/fsl-dpaa2/ethsw/switch.c
77 create mode 100644 drivers/staging/fsl-dpaa2/evb/Kconfig
78 create mode 100644 drivers/staging/fsl-dpaa2/evb/Makefile
79 create mode 100644 drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h
80 create mode 100644 drivers/staging/fsl-dpaa2/evb/dpdmux.c
81 create mode 100644 drivers/staging/fsl-dpaa2/evb/dpdmux.h
82 create mode 100644 drivers/staging/fsl-dpaa2/evb/evb.c
83 create mode 100644 drivers/staging/fsl-dpaa2/mac/Kconfig
84 create mode 100644 drivers/staging/fsl-dpaa2/mac/Makefile
85 create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h
86 create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac.c
87 create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac.h
88 create mode 100644 drivers/staging/fsl-dpaa2/mac/mac.c
89 create mode 100644 drivers/staging/fsl-dpaa2/rtc/Makefile
90 create mode 100644 drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h
91 create mode 100644 drivers/staging/fsl-dpaa2/rtc/dprtc.c
92 create mode 100644 drivers/staging/fsl-dpaa2/rtc/dprtc.h
93 create mode 100644 drivers/staging/fsl-dpaa2/rtc/rtc.c
94
95 --- /dev/null
96 +++ b/drivers/soc/fsl/ls2-console/Kconfig
97 @@ -0,0 +1,4 @@
98 +config FSL_LS2_CONSOLE
99 + tristate "Layerscape MC and AIOP console support"
100 + depends on ARCH_LAYERSCAPE
101 + default y
102 --- /dev/null
103 +++ b/drivers/soc/fsl/ls2-console/Makefile
104 @@ -0,0 +1 @@
105 +obj-$(CONFIG_FSL_LS2_CONSOLE) += ls2-console.o
106 --- /dev/null
107 +++ b/drivers/soc/fsl/ls2-console/ls2-console.c
108 @@ -0,0 +1,284 @@
109 +/* Copyright 2015-2016 Freescale Semiconductor Inc.
110 + *
111 + * Redistribution and use in source and binary forms, with or without
112 + * modification, are permitted provided that the following conditions are met:
113 + * * Redistributions of source code must retain the above copyright
114 + * notice, this list of conditions and the following disclaimer.
115 + * * Redistributions in binary form must reproduce the above copyright
116 + * notice, this list of conditions and the following disclaimer in the
117 + * documentation and/or other materials provided with the distribution.
118 + * * Neither the name of the above-listed copyright holders nor the
119 + * names of any contributors may be used to endorse or promote products
120 + * derived from this software without specific prior written permission.
121 + *
122 + *
123 + * ALTERNATIVELY, this software may be distributed under the terms of the
124 + * GNU General Public License ("GPL") as published by the Free Software
125 + * Foundation, either version 2 of that License or (at your option) any
126 + * later version.
127 + *
128 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
129 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
130 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
131 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
132 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
133 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
134 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
135 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
136 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
137 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
138 + * POSSIBILITY OF SUCH DAMAGE.
139 + */
140 +
141 +#include <linux/miscdevice.h>
142 +#include <linux/uaccess.h>
143 +#include <linux/poll.h>
144 +#include <linux/compat.h>
145 +#include <linux/module.h>
146 +#include <linux/slab.h>
147 +#include <linux/io.h>
148 +
149 +/* SoC address for the MC firmware base low/high registers */
150 +#define SOC_CCSR_MC_FW_BASE_ADDR_REGS 0x8340020
151 +#define SOC_CCSR_MC_FW_BASE_ADDR_REGS_SIZE 2
152 +/* MC firmware base low/high registers indexes */
153 +#define MCFBALR_OFFSET 0
154 +#define MCFBAHR_OFFSET 1
155 +
156 +/* Bit mask used to obtain the most significant part of the MC base address */
157 +#define MC_FW_HIGH_ADDR_MASK 0x1FFFF
158 +/* Bit mask used to obtain the least significant part of the MC base address */
159 +#define MC_FW_LOW_ADDR_MASK 0xE0000000
160 +
161 +#define MC_BUFFER_OFFSET 0x01000000
162 +#define MC_BUFFER_SIZE (1024*1024*16)
163 +#define MC_OFFSET_DELTA (MC_BUFFER_OFFSET)
164 +
165 +#define AIOP_BUFFER_OFFSET 0x06000000
166 +#define AIOP_BUFFER_SIZE (1024*1024*16)
167 +#define AIOP_OFFSET_DELTA (0)
168 +
169 +struct log_header {
170 + char magic_word[8]; /* magic word */
171 + uint32_t buf_start; /* holds the 32-bit little-endian
172 + * offset of the start of the buffer
173 + */
174 + uint32_t buf_length; /* holds the 32-bit little-endian
175 + * length of the buffer
176 + */
177 + uint32_t last_byte; /* holds the 32-bit little-endian offset
178 + * of the byte after the last byte that
179 + * was written
180 + */
181 + char reserved[44];
182 +};
183 +
184 +#define LOG_HEADER_FLAG_BUFFER_WRAPAROUND 0x80000000
185 +#define LOG_VERSION_MAJOR 1
186 +#define LOG_VERSION_MINOR 0
187 +
188 +
189 +#define invalidate(p) { asm volatile("dc ivac, %0" : : "r" (p) : "memory"); }
190 +
191 +struct console_data {
192 + char *map_addr;
193 + struct log_header *hdr;
194 + char *start_addr; /* Start of buffer */
195 + char *end_addr; /* End of buffer */
196 + char *end_of_data; /* Current end of data */
197 + char *cur_ptr; /* Last data sent to console */
198 +};
199 +
200 +#define LAST_BYTE(a) ((a) & ~(LOG_HEADER_FLAG_BUFFER_WRAPAROUND))
201 +
202 +static inline void __adjust_end(struct console_data *cd)
203 +{
204 + cd->end_of_data = cd->start_addr
205 + + LAST_BYTE(le32_to_cpu(cd->hdr->last_byte));
206 +}
207 +
208 +static inline void adjust_end(struct console_data *cd)
209 +{
210 + invalidate(cd->hdr);
211 + __adjust_end(cd);
212 +}
213 +
214 +static inline uint64_t get_mc_fw_base_address(void)
215 +{
216 + u32 *mcfbaregs = (u32 *) ioremap(SOC_CCSR_MC_FW_BASE_ADDR_REGS,
217 + SOC_CCSR_MC_FW_BASE_ADDR_REGS_SIZE);
218 + u64 mcfwbase = 0ULL;
219 +
220 + mcfwbase = readl(mcfbaregs + MCFBAHR_OFFSET) & MC_FW_HIGH_ADDR_MASK;
221 + mcfwbase <<= 32;
222 + mcfwbase |= readl(mcfbaregs + MCFBALR_OFFSET) & MC_FW_LOW_ADDR_MASK;
223 + iounmap(mcfbaregs);
224 + pr_info("fsl-ls2-console: MC base address at 0x%016llx\n", mcfwbase);
225 + return mcfwbase;
226 +}
227 +
228 +static int fsl_ls2_generic_console_open(struct inode *node, struct file *fp,
229 + u64 offset, u64 size,
230 + uint8_t *emagic, uint8_t magic_len,
231 + u32 offset_delta)
232 +{
233 + struct console_data *cd;
234 + uint8_t *magic;
235 + uint32_t wrapped;
236 +
237 + cd = kmalloc(sizeof(*cd), GFP_KERNEL);
238 + if (cd == NULL)
239 + return -ENOMEM;
240 + fp->private_data = cd;
241 + cd->map_addr = ioremap(get_mc_fw_base_address() + offset, size);
242 +
243 + cd->hdr = (struct log_header *) cd->map_addr;
244 + invalidate(cd->hdr);
245 +
246 + magic = cd->hdr->magic_word;
247 + if (memcmp(magic, emagic, magic_len)) {
248 + pr_info("magic didn't match!\n");
249 + pr_info("expected: %02x %02x %02x %02x %02x %02x %02x %02x\n",
250 + emagic[0], emagic[1], emagic[2], emagic[3],
251 + emagic[4], emagic[5], emagic[6], emagic[7]);
252 + pr_info(" seen: %02x %02x %02x %02x %02x %02x %02x %02x\n",
253 + magic[0], magic[1], magic[2], magic[3],
254 + magic[4], magic[5], magic[6], magic[7]);
255 + kfree(cd);
256 + iounmap(cd->map_addr);
257 + return -EIO;
258 + }
259 +
260 + cd->start_addr = cd->map_addr
261 + + le32_to_cpu(cd->hdr->buf_start) - offset_delta;
262 + cd->end_addr = cd->start_addr + le32_to_cpu(cd->hdr->buf_length);
263 +
264 + wrapped = le32_to_cpu(cd->hdr->last_byte)
265 + & LOG_HEADER_FLAG_BUFFER_WRAPAROUND;
266 +
267 + __adjust_end(cd);
268 + if (wrapped && (cd->end_of_data != cd->end_addr))
269 + cd->cur_ptr = cd->end_of_data+1;
270 + else
271 + cd->cur_ptr = cd->start_addr;
272 +
273 + return 0;
274 +}
275 +
276 +static int fsl_ls2_mc_console_open(struct inode *node, struct file *fp)
277 +{
278 + uint8_t magic_word[] = { 0, 1, 'C', 'M' };
279 +
280 + return fsl_ls2_generic_console_open(node, fp,
281 + MC_BUFFER_OFFSET, MC_BUFFER_SIZE,
282 + magic_word, sizeof(magic_word),
283 + MC_OFFSET_DELTA);
284 +}
285 +
286 +static int fsl_ls2_aiop_console_open(struct inode *node, struct file *fp)
287 +{
288 + uint8_t magic_word[] = { 'P', 'O', 'I', 'A' };
289 +
290 + return fsl_ls2_generic_console_open(node, fp,
291 + AIOP_BUFFER_OFFSET, AIOP_BUFFER_SIZE,
292 + magic_word, sizeof(magic_word),
293 + AIOP_OFFSET_DELTA);
294 +}
295 +
296 +static int fsl_ls2_console_close(struct inode *node, struct file *fp)
297 +{
298 + struct console_data *cd = fp->private_data;
299 +
300 + iounmap(cd->map_addr);
301 + kfree(cd);
302 + return 0;
303 +}
304 +
305 +ssize_t fsl_ls2_console_read(struct file *fp, char __user *buf, size_t count,
306 + loff_t *f_pos)
307 +{
308 + struct console_data *cd = fp->private_data;
309 + size_t bytes = 0;
310 + char data;
311 +
312 + /* Check if we need to adjust the end of data addr */
313 + adjust_end(cd);
314 +
315 + while ((count != bytes) && (cd->end_of_data != cd->cur_ptr)) {
316 + if (((u64)cd->cur_ptr) % 64 == 0)
317 + invalidate(cd->cur_ptr);
318 +
319 + data = *(cd->cur_ptr);
320 + if (copy_to_user(&buf[bytes], &data, 1))
321 + return -EFAULT;
322 + cd->cur_ptr++;
323 + if (cd->cur_ptr >= cd->end_addr)
324 + cd->cur_ptr = cd->start_addr;
325 + ++bytes;
326 + }
327 + return bytes;
328 +}
329 +
330 +static const struct file_operations fsl_ls2_mc_console_fops = {
331 + .owner = THIS_MODULE,
332 + .open = fsl_ls2_mc_console_open,
333 + .release = fsl_ls2_console_close,
334 + .read = fsl_ls2_console_read,
335 +};
336 +
337 +static struct miscdevice fsl_ls2_mc_console_dev = {
338 + .minor = MISC_DYNAMIC_MINOR,
339 + .name = "fsl_mc_console",
340 + .fops = &fsl_ls2_mc_console_fops
341 +};
342 +
343 +static const struct file_operations fsl_ls2_aiop_console_fops = {
344 + .owner = THIS_MODULE,
345 + .open = fsl_ls2_aiop_console_open,
346 + .release = fsl_ls2_console_close,
347 + .read = fsl_ls2_console_read,
348 +};
349 +
350 +static struct miscdevice fsl_ls2_aiop_console_dev = {
351 + .minor = MISC_DYNAMIC_MINOR,
352 + .name = "fsl_aiop_console",
353 + .fops = &fsl_ls2_aiop_console_fops
354 +};
355 +
356 +static int __init fsl_ls2_console_init(void)
357 +{
358 + int err = 0;
359 +
360 + pr_info("Freescale LS2 console driver\n");
361 + err = misc_register(&fsl_ls2_mc_console_dev);
362 + if (err) {
363 + pr_err("fsl_mc_console: cannot register device\n");
364 + return err;
365 + }
366 + pr_info("fsl-ls2-console: device %s registered\n",
367 + fsl_ls2_mc_console_dev.name);
368 +
369 + err = misc_register(&fsl_ls2_aiop_console_dev);
370 + if (err) {
371 + pr_err("fsl_aiop_console: cannot register device\n");
372 + return err;
373 + }
374 + pr_info("fsl-ls2-console: device %s registered\n",
375 + fsl_ls2_aiop_console_dev.name);
376 +
377 + return 0;
378 +}
379 +
380 +static void __exit fsl_ls2_console_exit(void)
381 +{
382 + misc_deregister(&fsl_ls2_mc_console_dev);
383 +
384 + misc_deregister(&fsl_ls2_aiop_console_dev);
385 +}
386 +
387 +module_init(fsl_ls2_console_init);
388 +module_exit(fsl_ls2_console_exit);
389 +
390 +MODULE_AUTHOR("Roy Pledge <roy.pledge@freescale.com>");
391 +MODULE_LICENSE("Dual BSD/GPL");
392 +MODULE_DESCRIPTION("Freescale LS2 console driver");
393 --- /dev/null
394 +++ b/drivers/staging/fsl-dpaa2/ethernet/Makefile
395 @@ -0,0 +1,11 @@
396 +#
397 +# Makefile for the Freescale DPAA2 Ethernet controller
398 +#
399 +
400 +obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
401 +
402 +fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o
403 +fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DEBUGFS} += dpaa2-eth-debugfs.o
404 +
405 +# Needed by the tracing framework
406 +CFLAGS_dpaa2-eth.o := -I$(src)
407 --- /dev/null
408 +++ b/drivers/staging/fsl-dpaa2/ethernet/README
409 @@ -0,0 +1,186 @@
410 +Freescale DPAA2 Ethernet driver
411 +===============================
412 +
413 +This file provides documentation for the Freescale DPAA2 Ethernet driver.
414 +
415 +
416 +Contents
417 +========
418 + Supported Platforms
419 + Architecture Overview
420 + Creating a Network Interface
421 + Features & Offloads
422 +
423 +
424 +Supported Platforms
425 +===================
426 +This driver provides networking support for Freescale DPAA2 SoCs, e.g.
427 +LS2080A, LS2088A, LS1088A.
428 +
429 +
430 +Architecture Overview
431 +=====================
432 +Unlike regular NICs, in the DPAA2 architecture there is no single hardware block
433 +representing network interfaces; instead, several separate hardware resources
434 +concur to provide the networking functionality:
435 + - network interfaces
436 + - queues, channels
437 + - buffer pools
438 + - MAC/PHY
439 +
440 +All hardware resources are allocated and configured through the Management
441 +Complex (MC) portals. MC abstracts most of these resources as DPAA2 objects
442 +and exposes ABIs through which they can be configured and controlled. A few
443 +hardware resources, like queues, do not have a corresponding MC object and
444 +are treated as internal resources of other objects.
445 +
446 +For a more detailed description of the DPAA2 architecture and its object
447 +abstractions see:
448 + drivers/staging/fsl-mc/README.txt
449 +
450 +Each Linux net device is built on top of a Datapath Network Interface (DPNI)
451 +object and uses Buffer Pools (DPBPs), I/O Portals (DPIOs) and Concentrators
452 +(DPCONs).
453 +
454 +Configuration interface:
455 +
456 + -----------------------
457 + | DPAA2 Ethernet Driver |
458 + -----------------------
459 + . . .
460 + . . .
461 + . . . . . . . . . . . .
462 + . . .
463 + . . .
464 + ---------- ---------- -----------
465 + | DPBP API | | DPNI API | | DPCON API |
466 + ---------- ---------- -----------
467 + . . . software
468 +=========== . ========== . ============ . ===================
469 + . . . hardware
470 + ------------------------------------------
471 + | MC hardware portals |
472 + ------------------------------------------
473 + . . .
474 + . . .
475 + ------ ------ -------
476 + | DPBP | | DPNI | | DPCON |
477 + ------ ------ -------
478 +
479 +The DPNIs are network interfaces without a direct one-on-one mapping to PHYs.
480 +DPBPs represent hardware buffer pools. Packet I/O is performed in the context
481 +of DPCON objects, using DPIO portals for managing and communicating with the
482 +hardware resources.
483 +
484 +Datapath (I/O) interface:
485 +
486 + -----------------------------------------------
487 + | DPAA2 Ethernet Driver |
488 + -----------------------------------------------
489 + | ^ ^ | |
490 + | | | | |
491 + enqueue| dequeue| data | dequeue| seed |
492 + (Tx) | (Rx, TxC)| avail.| request| buffers|
493 + | | notify| | |
494 + | | | | |
495 + V | | V V
496 + -----------------------------------------------
497 + | DPIO Driver |
498 + -----------------------------------------------
499 + | | | | | software
500 + | | | | | ================
501 + | | | | | hardware
502 + -----------------------------------------------
503 + | I/O hardware portals |
504 + -----------------------------------------------
505 + | ^ ^ | |
506 + | | | | |
507 + | | | V |
508 + V | ================ V
509 + ---------------------- | -------------
510 + queues ---------------------- | | Buffer pool |
511 + ---------------------- | -------------
512 + =======================
513 + Channel
514 +
515 +Datapath I/O (DPIO) portals provide enqueue and dequeue services, data
516 +availability notifications and buffer pool management. DPIOs are shared between
517 +all DPAA2 objects (and implicitly all DPAA2 kernel drivers) that work with data
518 +frames, but must be affine to the CPUs for the purpose of traffic distribution.
519 +
520 +Frames are transmitted and received through hardware frame queues, which can be
521 +grouped in channels for the purpose of hardware scheduling. The Ethernet driver
522 +enqueues TX frames on egress queues and after transmission is complete a TX
523 +confirmation frame is sent back to the CPU.
524 +
525 +When frames are available on ingress queues, a data availability notification
526 +is sent to the CPU; notifications are raised per channel, so even if multiple
527 +queues in the same channel have available frames, only one notification is sent.
528 +After a channel fires a notification, is must be explicitly rearmed.
529 +
530 +Each network interface can have multiple Rx, Tx and confirmation queues affined
531 +to CPUs, and one channel (DPCON) for each CPU that services at least one queue.
532 +DPCONs are used to distribute ingress traffic to different CPUs via the cores'
533 +affine DPIOs.
534 +
535 +The role of hardware buffer pools is storage of ingress frame data. Each network
536 +interface has a privately owned buffer pool which it seeds with kernel allocated
537 +buffers.
538 +
539 +
540 +DPNIs are decoupled from PHYs; a DPNI can be connected to a PHY through a DPMAC
541 +object or to another DPNI through an internal link, but the connection is
542 +managed by MC and completely transparent to the Ethernet driver.
543 +
544 + --------- --------- ---------
545 + | eth if1 | | eth if2 | | eth ifn |
546 + --------- --------- ---------
547 + . . .
548 + . . .
549 + . . .
550 + ---------------------------
551 + | DPAA2 Ethernet Driver |
552 + ---------------------------
553 + . . .
554 + . . .
555 + . . .
556 + ------ ------ ------ -------
557 + | DPNI | | DPNI | | DPNI | | DPMAC |----+
558 + ------ ------ ------ ------- |
559 + | | | | |
560 + | | | | -----
561 + =========== ================== | PHY |
562 + -----
563 +
564 +Creating a Network Interface
565 +============================
566 +A net device is created for each DPNI object probed on the MC bus. Each DPNI has
567 +a number of properties which determine the network interface configuration
568 +options and associated hardware resources.
569 +
570 +DPNI objects (and the other DPAA2 objects needed for a network interface) can be
571 +added to a container on the MC bus in one of two ways: statically, through a
572 +Datapath Layout Binary file (DPL) that is parsed by MC at boot time; or created
573 +dynamically at runtime, via the DPAA2 objects APIs.
574 +
575 +
576 +Features & Offloads
577 +===================
578 +Hardware checksum offloading is supported for TCP and UDP over IPv4/6 frames.
579 +The checksum offloads can be independently configured on RX and TX through
580 +ethtool.
581 +
582 +Hardware offload of unicast and multicast MAC filtering is supported on the
583 +ingress path and permanently enabled.
584 +
585 +Scatter-gather frames are supported on both RX and TX paths. On TX, SG support
586 +is configurable via ethtool; on RX it is always enabled.
587 +
588 +The DPAA2 hardware can process jumbo Ethernet frames of up to 10K bytes.
589 +
590 +The Ethernet driver defines a static flow hashing scheme that distributes
591 +traffic based on a 5-tuple key: src IP, dst IP, IP proto, L4 src port,
592 +L4 dst port. No user configuration is supported for now.
593 +
594 +Hardware specific statistics for the network interface as well as some
595 +non-standard driver stats can be consulted through ethtool -S option.
596 --- /dev/null
597 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
598 @@ -0,0 +1,350 @@
599 +
600 +/* Copyright 2015 Freescale Semiconductor Inc.
601 + *
602 + * Redistribution and use in source and binary forms, with or without
603 + * modification, are permitted provided that the following conditions are met:
604 + * * Redistributions of source code must retain the above copyright
605 + * notice, this list of conditions and the following disclaimer.
606 + * * Redistributions in binary form must reproduce the above copyright
607 + * notice, this list of conditions and the following disclaimer in the
608 + * documentation and/or other materials provided with the distribution.
609 + * * Neither the name of Freescale Semiconductor nor the
610 + * names of its contributors may be used to endorse or promote products
611 + * derived from this software without specific prior written permission.
612 + *
613 + *
614 + * ALTERNATIVELY, this software may be distributed under the terms of the
615 + * GNU General Public License ("GPL") as published by the Free Software
616 + * Foundation, either version 2 of that License or (at your option) any
617 + * later version.
618 + *
619 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
620 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
621 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
622 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
623 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
624 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
625 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
626 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
627 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
628 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
629 + */
630 +
631 +#include <linux/module.h>
632 +#include <linux/debugfs.h>
633 +#include "dpaa2-eth.h"
634 +#include "dpaa2-eth-debugfs.h"
635 +
636 +#define DPAA2_ETH_DBG_ROOT "dpaa2-eth"
637 +
638 +static struct dentry *dpaa2_dbg_root;
639 +
640 +static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset)
641 +{
642 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
643 + struct rtnl_link_stats64 *stats;
644 + struct dpaa2_eth_drv_stats *extras;
645 + int i;
646 +
647 + seq_printf(file, "Per-CPU stats for %s\n", priv->net_dev->name);
648 + seq_printf(file, "%s%16s%16s%16s%16s%16s%16s%16s%16s\n",
649 + "CPU", "Rx", "Rx Err", "Rx SG", "Tx", "Tx Err", "Tx conf",
650 + "Tx SG", "Enq busy");
651 +
652 + for_each_online_cpu(i) {
653 + stats = per_cpu_ptr(priv->percpu_stats, i);
654 + extras = per_cpu_ptr(priv->percpu_extras, i);
655 + seq_printf(file, "%3d%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu\n",
656 + i,
657 + stats->rx_packets,
658 + stats->rx_errors,
659 + extras->rx_sg_frames,
660 + stats->tx_packets,
661 + stats->tx_errors,
662 + extras->tx_conf_frames,
663 + extras->tx_sg_frames,
664 + extras->tx_portal_busy);
665 + }
666 +
667 + return 0;
668 +}
669 +
670 +static int dpaa2_dbg_cpu_open(struct inode *inode, struct file *file)
671 +{
672 + int err;
673 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
674 +
675 + err = single_open(file, dpaa2_dbg_cpu_show, priv);
676 + if (err < 0)
677 + netdev_err(priv->net_dev, "single_open() failed\n");
678 +
679 + return err;
680 +}
681 +
682 +static const struct file_operations dpaa2_dbg_cpu_ops = {
683 + .open = dpaa2_dbg_cpu_open,
684 + .read = seq_read,
685 + .llseek = seq_lseek,
686 + .release = single_release,
687 +};
688 +
689 +static char *fq_type_to_str(struct dpaa2_eth_fq *fq)
690 +{
691 + switch (fq->type) {
692 + case DPAA2_RX_FQ:
693 + return "Rx";
694 + case DPAA2_TX_CONF_FQ:
695 + return "Tx conf";
696 + case DPAA2_RX_ERR_FQ:
697 + return "Rx err";
698 + default:
699 + return "N/A";
700 + }
701 +}
702 +
703 +static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset)
704 +{
705 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
706 + struct dpaa2_eth_fq *fq;
707 + u32 fcnt, bcnt;
708 + int i, err;
709 +
710 + seq_printf(file, "FQ stats for %s:\n", priv->net_dev->name);
711 + seq_printf(file, "%s%16s%16s%16s%16s%16s\n",
712 + "VFQID", "CPU", "Type", "Frames", "Pending frames",
713 + "Congestion");
714 +
715 + for (i = 0; i < priv->num_fqs; i++) {
716 + fq = &priv->fq[i];
717 + err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
718 + if (err)
719 + fcnt = 0;
720 +
721 + seq_printf(file, "%5d%16d%16s%16llu%16u%16llu\n",
722 + fq->fqid,
723 + fq->target_cpu,
724 + fq_type_to_str(fq),
725 + fq->stats.frames,
726 + fcnt,
727 + fq->stats.congestion_entry);
728 + }
729 +
730 + return 0;
731 +}
732 +
733 +static int dpaa2_dbg_fqs_open(struct inode *inode, struct file *file)
734 +{
735 + int err;
736 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
737 +
738 + err = single_open(file, dpaa2_dbg_fqs_show, priv);
739 + if (err < 0)
740 + netdev_err(priv->net_dev, "single_open() failed\n");
741 +
742 + return err;
743 +}
744 +
745 +static const struct file_operations dpaa2_dbg_fq_ops = {
746 + .open = dpaa2_dbg_fqs_open,
747 + .read = seq_read,
748 + .llseek = seq_lseek,
749 + .release = single_release,
750 +};
751 +
752 +static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset)
753 +{
754 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
755 + struct dpaa2_eth_channel *ch;
756 + int i;
757 +
758 + seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name);
759 + seq_printf(file, "%s%16s%16s%16s%16s%16s\n",
760 + "CHID", "CPU", "Deq busy", "Frames", "CDANs",
761 + "Avg frm/CDAN");
762 +
763 + for (i = 0; i < priv->num_channels; i++) {
764 + ch = priv->channel[i];
765 + seq_printf(file, "%4d%16d%16llu%16llu%16llu%16llu\n",
766 + ch->ch_id,
767 + ch->nctx.desired_cpu,
768 + ch->stats.dequeue_portal_busy,
769 + ch->stats.frames,
770 + ch->stats.cdan,
771 + ch->stats.frames / ch->stats.cdan);
772 + }
773 +
774 + return 0;
775 +}
776 +
777 +static int dpaa2_dbg_ch_open(struct inode *inode, struct file *file)
778 +{
779 + int err;
780 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
781 +
782 + err = single_open(file, dpaa2_dbg_ch_show, priv);
783 + if (err < 0)
784 + netdev_err(priv->net_dev, "single_open() failed\n");
785 +
786 + return err;
787 +}
788 +
789 +static const struct file_operations dpaa2_dbg_ch_ops = {
790 + .open = dpaa2_dbg_ch_open,
791 + .read = seq_read,
792 + .llseek = seq_lseek,
793 + .release = single_release,
794 +};
795 +
796 +static ssize_t dpaa2_dbg_reset_write(struct file *file, const char __user *buf,
797 + size_t count, loff_t *offset)
798 +{
799 + struct dpaa2_eth_priv *priv = file->private_data;
800 + struct rtnl_link_stats64 *percpu_stats;
801 + struct dpaa2_eth_drv_stats *percpu_extras;
802 + struct dpaa2_eth_fq *fq;
803 + struct dpaa2_eth_channel *ch;
804 + int i;
805 +
806 + for_each_online_cpu(i) {
807 + percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
808 + memset(percpu_stats, 0, sizeof(*percpu_stats));
809 +
810 + percpu_extras = per_cpu_ptr(priv->percpu_extras, i);
811 + memset(percpu_extras, 0, sizeof(*percpu_extras));
812 + }
813 +
814 + for (i = 0; i < priv->num_fqs; i++) {
815 + fq = &priv->fq[i];
816 + memset(&fq->stats, 0, sizeof(fq->stats));
817 + }
818 +
819 + for (i = 0; i < priv->num_channels; i++) {
820 + ch = priv->channel[i];
821 + memset(&ch->stats, 0, sizeof(ch->stats));
822 + }
823 +
824 + return count;
825 +}
826 +
827 +static const struct file_operations dpaa2_dbg_reset_ops = {
828 + .open = simple_open,
829 + .write = dpaa2_dbg_reset_write,
830 +};
831 +
832 +static ssize_t dpaa2_dbg_reset_mc_write(struct file *file,
833 + const char __user *buf,
834 + size_t count, loff_t *offset)
835 +{
836 + struct dpaa2_eth_priv *priv = file->private_data;
837 + int err;
838 +
839 + err = dpni_reset_statistics(priv->mc_io, 0, priv->mc_token);
840 + if (err)
841 + netdev_err(priv->net_dev,
842 + "dpni_reset_statistics() failed %d\n", err);
843 +
844 + return count;
845 +}
846 +
847 +static const struct file_operations dpaa2_dbg_reset_mc_ops = {
848 + .open = simple_open,
849 + .write = dpaa2_dbg_reset_mc_write,
850 +};
851 +
852 +void dpaa2_dbg_add(struct dpaa2_eth_priv *priv)
853 +{
854 + if (!dpaa2_dbg_root)
855 + return;
856 +
857 + /* Create a directory for the interface */
858 + priv->dbg.dir = debugfs_create_dir(priv->net_dev->name,
859 + dpaa2_dbg_root);
860 + if (!priv->dbg.dir) {
861 + netdev_err(priv->net_dev, "debugfs_create_dir() failed\n");
862 + return;
863 + }
864 +
865 + /* per-cpu stats file */
866 + priv->dbg.cpu_stats = debugfs_create_file("cpu_stats", 0444,
867 + priv->dbg.dir, priv,
868 + &dpaa2_dbg_cpu_ops);
869 + if (!priv->dbg.cpu_stats) {
870 + netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
871 + goto err_cpu_stats;
872 + }
873 +
874 + /* per-fq stats file */
875 + priv->dbg.fq_stats = debugfs_create_file("fq_stats", 0444,
876 + priv->dbg.dir, priv,
877 + &dpaa2_dbg_fq_ops);
878 + if (!priv->dbg.fq_stats) {
879 + netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
880 + goto err_fq_stats;
881 + }
882 +
883 + /* per-fq stats file */
884 + priv->dbg.ch_stats = debugfs_create_file("ch_stats", 0444,
885 + priv->dbg.dir, priv,
886 + &dpaa2_dbg_ch_ops);
887 + if (!priv->dbg.fq_stats) {
888 + netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
889 + goto err_ch_stats;
890 + }
891 +
892 + /* reset stats */
893 + priv->dbg.reset_stats = debugfs_create_file("reset_stats", 0200,
894 + priv->dbg.dir, priv,
895 + &dpaa2_dbg_reset_ops);
896 + if (!priv->dbg.reset_stats) {
897 + netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
898 + goto err_reset_stats;
899 + }
900 +
901 + /* reset MC stats */
902 + priv->dbg.reset_mc_stats = debugfs_create_file("reset_mc_stats",
903 + 0222, priv->dbg.dir, priv,
904 + &dpaa2_dbg_reset_mc_ops);
905 + if (!priv->dbg.reset_mc_stats) {
906 + netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
907 + goto err_reset_mc_stats;
908 + }
909 +
910 + return;
911 +
912 +err_reset_mc_stats:
913 + debugfs_remove(priv->dbg.reset_stats);
914 +err_reset_stats:
915 + debugfs_remove(priv->dbg.ch_stats);
916 +err_ch_stats:
917 + debugfs_remove(priv->dbg.fq_stats);
918 +err_fq_stats:
919 + debugfs_remove(priv->dbg.cpu_stats);
920 +err_cpu_stats:
921 + debugfs_remove(priv->dbg.dir);
922 +}
923 +
924 +void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv)
925 +{
926 + debugfs_remove(priv->dbg.reset_mc_stats);
927 + debugfs_remove(priv->dbg.reset_stats);
928 + debugfs_remove(priv->dbg.fq_stats);
929 + debugfs_remove(priv->dbg.ch_stats);
930 + debugfs_remove(priv->dbg.cpu_stats);
931 + debugfs_remove(priv->dbg.dir);
932 +}
933 +
934 +void dpaa2_eth_dbg_init(void)
935 +{
936 + dpaa2_dbg_root = debugfs_create_dir(DPAA2_ETH_DBG_ROOT, NULL);
937 + if (!dpaa2_dbg_root) {
938 + pr_err("DPAA2-ETH: debugfs create failed\n");
939 + return;
940 + }
941 +
942 + pr_info("DPAA2-ETH: debugfs created\n");
943 +}
944 +
945 +void __exit dpaa2_eth_dbg_exit(void)
946 +{
947 + debugfs_remove(dpaa2_dbg_root);
948 +}
949 --- /dev/null
950 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h
951 @@ -0,0 +1,60 @@
952 +/* Copyright 2015 Freescale Semiconductor Inc.
953 + *
954 + * Redistribution and use in source and binary forms, with or without
955 + * modification, are permitted provided that the following conditions are met:
956 + * * Redistributions of source code must retain the above copyright
957 + * notice, this list of conditions and the following disclaimer.
958 + * * Redistributions in binary form must reproduce the above copyright
959 + * notice, this list of conditions and the following disclaimer in the
960 + * documentation and/or other materials provided with the distribution.
961 + * * Neither the name of Freescale Semiconductor nor the
962 + * names of its contributors may be used to endorse or promote products
963 + * derived from this software without specific prior written permission.
964 + *
965 + *
966 + * ALTERNATIVELY, this software may be distributed under the terms of the
967 + * GNU General Public License ("GPL") as published by the Free Software
968 + * Foundation, either version 2 of that License or (at your option) any
969 + * later version.
970 + *
971 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
972 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
973 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
974 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
975 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
976 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
977 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
978 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
979 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
980 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
981 + */
982 +
983 +#ifndef DPAA2_ETH_DEBUGFS_H
984 +#define DPAA2_ETH_DEBUGFS_H
985 +
986 +#include <linux/dcache.h>
987 +
988 +struct dpaa2_eth_priv;
989 +
990 +struct dpaa2_debugfs {
991 + struct dentry *dir;
992 + struct dentry *fq_stats;
993 + struct dentry *ch_stats;
994 + struct dentry *cpu_stats;
995 + struct dentry *reset_stats;
996 + struct dentry *reset_mc_stats;
997 +};
998 +
999 +#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
1000 +void dpaa2_eth_dbg_init(void);
1001 +void dpaa2_eth_dbg_exit(void);
1002 +void dpaa2_dbg_add(struct dpaa2_eth_priv *priv);
1003 +void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv);
1004 +#else
1005 +static inline void dpaa2_eth_dbg_init(void) {}
1006 +static inline void dpaa2_eth_dbg_exit(void) {}
1007 +static inline void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) {}
1008 +static inline void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) {}
1009 +#endif /* CONFIG_FSL_DPAA2_ETH_DEBUGFS */
1010 +
1011 +#endif /* DPAA2_ETH_DEBUGFS_H */
1012 --- /dev/null
1013 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
1014 @@ -0,0 +1,184 @@
1015 +/* Copyright 2014-2015 Freescale Semiconductor Inc.
1016 + *
1017 + * Redistribution and use in source and binary forms, with or without
1018 + * modification, are permitted provided that the following conditions are met:
1019 + * * Redistributions of source code must retain the above copyright
1020 + * notice, this list of conditions and the following disclaimer.
1021 + * * Redistributions in binary form must reproduce the above copyright
1022 + * notice, this list of conditions and the following disclaimer in the
1023 + * documentation and/or other materials provided with the distribution.
1024 + * * Neither the name of Freescale Semiconductor nor the
1025 + * names of its contributors may be used to endorse or promote products
1026 + * derived from this software without specific prior written permission.
1027 + *
1028 + *
1029 + * ALTERNATIVELY, this software may be distributed under the terms of the
1030 + * GNU General Public License ("GPL") as published by the Free Software
1031 + * Foundation, either version 2 of that License or (at your option) any
1032 + * later version.
1033 + *
1034 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
1035 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1036 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1037 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
1038 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1039 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1040 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1041 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1042 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1043 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1044 + */
1045 +
1046 +#undef TRACE_SYSTEM
1047 +#define TRACE_SYSTEM dpaa2_eth
1048 +
1049 +#if !defined(_DPAA2_ETH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
1050 +#define _DPAA2_ETH_TRACE_H
1051 +
1052 +#include <linux/skbuff.h>
1053 +#include <linux/netdevice.h>
1054 +#include <linux/tracepoint.h>
1055 +
1056 +#define TR_FMT "[%s] fd: addr=0x%llx, len=%u, off=%u"
1057 +/* trace_printk format for raw buffer event class */
1058 +#define TR_BUF_FMT "[%s] vaddr=%p size=%zu dma_addr=%pad map_size=%zu bpid=%d"
1059 +
1060 +/* This is used to declare a class of events.
1061 + * individual events of this type will be defined below.
1062 + */
1063 +
1064 +/* Store details about a frame descriptor */
1065 +DECLARE_EVENT_CLASS(dpaa2_eth_fd,
1066 + /* Trace function prototype */
1067 + TP_PROTO(struct net_device *netdev,
1068 + const struct dpaa2_fd *fd),
1069 +
1070 + /* Repeat argument list here */
1071 + TP_ARGS(netdev, fd),
1072 +
1073 + /* A structure containing the relevant information we want
1074 + * to record. Declare name and type for each normal element,
1075 + * name, type and size for arrays. Use __string for variable
1076 + * length strings.
1077 + */
1078 + TP_STRUCT__entry(
1079 + __field(u64, fd_addr)
1080 + __field(u32, fd_len)
1081 + __field(u16, fd_offset)
1082 + __string(name, netdev->name)
1083 + ),
1084 +
1085 + /* The function that assigns values to the above declared
1086 + * fields
1087 + */
1088 + TP_fast_assign(
1089 + __entry->fd_addr = dpaa2_fd_get_addr(fd);
1090 + __entry->fd_len = dpaa2_fd_get_len(fd);
1091 + __entry->fd_offset = dpaa2_fd_get_offset(fd);
1092 + __assign_str(name, netdev->name);
1093 + ),
1094 +
1095 + /* This is what gets printed when the trace event is
1096 + * triggered.
1097 + */
1098 + TP_printk(TR_FMT,
1099 + __get_str(name),
1100 + __entry->fd_addr,
1101 + __entry->fd_len,
1102 + __entry->fd_offset)
1103 +);
1104 +
1105 +/* Now declare events of the above type. Format is:
1106 + * DEFINE_EVENT(class, name, proto, args), with proto and args same as for class
1107 + */
1108 +
1109 +/* Tx (egress) fd */
1110 +DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_fd,
1111 + TP_PROTO(struct net_device *netdev,
1112 + const struct dpaa2_fd *fd),
1113 +
1114 + TP_ARGS(netdev, fd)
1115 +);
1116 +
1117 +/* Rx fd */
1118 +DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_fd,
1119 + TP_PROTO(struct net_device *netdev,
1120 + const struct dpaa2_fd *fd),
1121 +
1122 + TP_ARGS(netdev, fd)
1123 +);
1124 +
1125 +/* Tx confirmation fd */
1126 +DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_conf_fd,
1127 + TP_PROTO(struct net_device *netdev,
1128 + const struct dpaa2_fd *fd),
1129 +
1130 + TP_ARGS(netdev, fd)
1131 +);
1132 +
1133 +/* Log data about raw buffers. Useful for tracing DPBP content. */
1134 +TRACE_EVENT(dpaa2_eth_buf_seed,
1135 + /* Trace function prototype */
1136 + TP_PROTO(struct net_device *netdev,
1137 + /* virtual address and size */
1138 + void *vaddr,
1139 + size_t size,
1140 + /* dma map address and size */
1141 + dma_addr_t dma_addr,
1142 + size_t map_size,
1143 + /* buffer pool id, if relevant */
1144 + u16 bpid),
1145 +
1146 + /* Repeat argument list here */
1147 + TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid),
1148 +
1149 + /* A structure containing the relevant information we want
1150 + * to record. Declare name and type for each normal element,
1151 + * name, type and size for arrays. Use __string for variable
1152 + * length strings.
1153 + */
1154 + TP_STRUCT__entry(
1155 + __field(void *, vaddr)
1156 + __field(size_t, size)
1157 + __field(dma_addr_t, dma_addr)
1158 + __field(size_t, map_size)
1159 + __field(u16, bpid)
1160 + __string(name, netdev->name)
1161 + ),
1162 +
1163 + /* The function that assigns values to the above declared
1164 + * fields
1165 + */
1166 + TP_fast_assign(
1167 + __entry->vaddr = vaddr;
1168 + __entry->size = size;
1169 + __entry->dma_addr = dma_addr;
1170 + __entry->map_size = map_size;
1171 + __entry->bpid = bpid;
1172 + __assign_str(name, netdev->name);
1173 + ),
1174 +
1175 + /* This is what gets printed when the trace event is
1176 + * triggered.
1177 + */
1178 + TP_printk(TR_BUF_FMT,
1179 + __get_str(name),
1180 + __entry->vaddr,
1181 + __entry->size,
1182 + &__entry->dma_addr,
1183 + __entry->map_size,
1184 + __entry->bpid)
1185 +);
1186 +
1187 +/* If only one event of a certain type needs to be declared, use TRACE_EVENT().
1188 + * The syntax is the same as for DECLARE_EVENT_CLASS().
1189 + */
1190 +
1191 +#endif /* _DPAA2_ETH_TRACE_H */
1192 +
1193 +/* This must be outside ifdef _DPAA2_ETH_TRACE_H */
1194 +#undef TRACE_INCLUDE_PATH
1195 +#define TRACE_INCLUDE_PATH .
1196 +#undef TRACE_INCLUDE_FILE
1197 +#define TRACE_INCLUDE_FILE dpaa2-eth-trace
1198 +#include <trace/define_trace.h>
1199 --- /dev/null
1200 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
1201 @@ -0,0 +1,3155 @@
1202 +/* Copyright 2014-2015 Freescale Semiconductor Inc.
1203 + *
1204 + * Redistribution and use in source and binary forms, with or without
1205 + * modification, are permitted provided that the following conditions are met:
1206 + * * Redistributions of source code must retain the above copyright
1207 + * notice, this list of conditions and the following disclaimer.
1208 + * * Redistributions in binary form must reproduce the above copyright
1209 + * notice, this list of conditions and the following disclaimer in the
1210 + * documentation and/or other materials provided with the distribution.
1211 + * * Neither the name of Freescale Semiconductor nor the
1212 + * names of its contributors may be used to endorse or promote products
1213 + * derived from this software without specific prior written permission.
1214 + *
1215 + *
1216 + * ALTERNATIVELY, this software may be distributed under the terms of the
1217 + * GNU General Public License ("GPL") as published by the Free Software
1218 + * Foundation, either version 2 of that License or (at your option) any
1219 + * later version.
1220 + *
1221 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
1222 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1223 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1224 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
1225 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1226 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1227 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1228 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1229 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1230 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1231 + */
1232 +#include <linux/init.h>
1233 +#include <linux/module.h>
1234 +#include <linux/platform_device.h>
1235 +#include <linux/etherdevice.h>
1236 +#include <linux/of_net.h>
1237 +#include <linux/interrupt.h>
1238 +#include <linux/debugfs.h>
1239 +#include <linux/kthread.h>
1240 +#include <linux/msi.h>
1241 +#include <linux/net_tstamp.h>
1242 +#include <linux/iommu.h>
1243 +
1244 +#include "../../fsl-mc/include/dpbp.h"
1245 +#include "../../fsl-mc/include/dpcon.h"
1246 +#include "../../fsl-mc/include/mc.h"
1247 +#include "../../fsl-mc/include/mc-sys.h"
1248 +#include "dpaa2-eth.h"
1249 +#include "dpkg.h"
1250 +
1251 +/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
1252 + * using trace events only need to #include <trace/events/sched.h>
1253 + */
1254 +#define CREATE_TRACE_POINTS
1255 +#include "dpaa2-eth-trace.h"
1256 +
1257 +MODULE_LICENSE("Dual BSD/GPL");
1258 +MODULE_AUTHOR("Freescale Semiconductor, Inc");
1259 +MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
1260 +
1261 +const char dpaa2_eth_drv_version[] = "0.1";
1262 +
1263 +void *dpaa2_eth_iova_to_virt(struct iommu_domain *domain, dma_addr_t iova_addr)
1264 +{
1265 + phys_addr_t phys_addr;
1266 +
1267 + phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
1268 +
1269 + return phys_to_virt(phys_addr);
1270 +}
1271 +
1272 +static void validate_rx_csum(struct dpaa2_eth_priv *priv,
1273 + u32 fd_status,
1274 + struct sk_buff *skb)
1275 +{
1276 + skb_checksum_none_assert(skb);
1277 +
1278 + /* HW checksum validation is disabled, nothing to do here */
1279 + if (!(priv->net_dev->features & NETIF_F_RXCSUM))
1280 + return;
1281 +
1282 + /* Read checksum validation bits */
1283 + if (!((fd_status & DPAA2_FAS_L3CV) &&
1284 + (fd_status & DPAA2_FAS_L4CV)))
1285 + return;
1286 +
1287 + /* Inform the stack there's no need to compute L3/L4 csum anymore */
1288 + skb->ip_summed = CHECKSUM_UNNECESSARY;
1289 +}
1290 +
1291 +/* Free a received FD.
1292 + * Not to be used for Tx conf FDs or on any other paths.
1293 + */
1294 +static void free_rx_fd(struct dpaa2_eth_priv *priv,
1295 + const struct dpaa2_fd *fd,
1296 + void *vaddr)
1297 +{
1298 + struct device *dev = priv->net_dev->dev.parent;
1299 + dma_addr_t addr = dpaa2_fd_get_addr(fd);
1300 + u8 fd_format = dpaa2_fd_get_format(fd);
1301 + struct dpaa2_sg_entry *sgt;
1302 + void *sg_vaddr;
1303 + int i;
1304 +
1305 + /* If single buffer frame, just free the data buffer */
1306 + if (fd_format == dpaa2_fd_single)
1307 + goto free_buf;
1308 + else if (fd_format != dpaa2_fd_sg)
1309 + /* we don't support any other format */
1310 + return;
1311 +
1312 + /* For S/G frames, we first need to free all SG entries */
1313 + sgt = vaddr + dpaa2_fd_get_offset(fd);
1314 + for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
1315 + addr = dpaa2_sg_get_addr(&sgt[i]);
1316 + sg_vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, addr);
1317 +
1318 + dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
1319 + DMA_FROM_DEVICE);
1320 +
1321 + put_page(virt_to_head_page(sg_vaddr));
1322 +
1323 + if (dpaa2_sg_is_final(&sgt[i]))
1324 + break;
1325 + }
1326 +
1327 +free_buf:
1328 + put_page(virt_to_head_page(vaddr));
1329 +}
1330 +
1331 +/* Build a linear skb based on a single-buffer frame descriptor */
1332 +static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv,
1333 + struct dpaa2_eth_channel *ch,
1334 + const struct dpaa2_fd *fd,
1335 + void *fd_vaddr)
1336 +{
1337 + struct sk_buff *skb = NULL;
1338 + u16 fd_offset = dpaa2_fd_get_offset(fd);
1339 + u32 fd_length = dpaa2_fd_get_len(fd);
1340 +
1341 + skb = build_skb(fd_vaddr, DPAA2_ETH_SKB_SIZE);
1342 + if (unlikely(!skb))
1343 + return NULL;
1344 +
1345 + skb_reserve(skb, fd_offset);
1346 + skb_put(skb, fd_length);
1347 +
1348 + ch->buf_count--;
1349 +
1350 + return skb;
1351 +}
1352 +
1353 +/* Build a non linear (fragmented) skb based on a S/G table */
1354 +static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
1355 + struct dpaa2_eth_channel *ch,
1356 + struct dpaa2_sg_entry *sgt)
1357 +{
1358 + struct sk_buff *skb = NULL;
1359 + struct device *dev = priv->net_dev->dev.parent;
1360 + void *sg_vaddr;
1361 + dma_addr_t sg_addr;
1362 + u16 sg_offset;
1363 + u32 sg_length;
1364 + struct page *page, *head_page;
1365 + int page_offset;
1366 + int i;
1367 +
1368 + for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
1369 + struct dpaa2_sg_entry *sge = &sgt[i];
1370 +
1371 + /* NOTE: We only support SG entries in dpaa2_sg_single format,
1372 + * but this is the only format we may receive from HW anyway
1373 + */
1374 +
1375 + /* Get the address and length from the S/G entry */
1376 + sg_addr = dpaa2_sg_get_addr(sge);
1377 + sg_vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, sg_addr);
1378 + dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
1379 + DMA_FROM_DEVICE);
1380 +
1381 + sg_length = dpaa2_sg_get_len(sge);
1382 +
1383 + if (i == 0) {
1384 + /* We build the skb around the first data buffer */
1385 + skb = build_skb(sg_vaddr, DPAA2_ETH_SKB_SIZE);
1386 + if (unlikely(!skb))
1387 + return NULL;
1388 +
1389 + sg_offset = dpaa2_sg_get_offset(sge);
1390 + skb_reserve(skb, sg_offset);
1391 + skb_put(skb, sg_length);
1392 + } else {
1393 + /* Rest of the data buffers are stored as skb frags */
1394 + page = virt_to_page(sg_vaddr);
1395 + head_page = virt_to_head_page(sg_vaddr);
1396 +
1397 + /* Offset in page (which may be compound).
1398 + * Data in subsequent SG entries is stored from the
1399 + * beginning of the buffer, so we don't need to add the
1400 + * sg_offset.
1401 + */
1402 + page_offset = ((unsigned long)sg_vaddr &
1403 + (PAGE_SIZE - 1)) +
1404 + (page_address(page) - page_address(head_page));
1405 +
1406 + skb_add_rx_frag(skb, i - 1, head_page, page_offset,
1407 + sg_length, DPAA2_ETH_RX_BUF_SIZE);
1408 + }
1409 +
1410 + if (dpaa2_sg_is_final(sge))
1411 + break;
1412 + }
1413 +
1414 + /* Count all data buffers + SG table buffer */
1415 + ch->buf_count -= i + 2;
1416 +
1417 + return skb;
1418 +}
1419 +
1420 +/* Main Rx frame processing routine */
1421 +static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
1422 + struct dpaa2_eth_channel *ch,
1423 + const struct dpaa2_fd *fd,
1424 + struct napi_struct *napi,
1425 + u16 queue_id)
1426 +{
1427 + dma_addr_t addr = dpaa2_fd_get_addr(fd);
1428 + u8 fd_format = dpaa2_fd_get_format(fd);
1429 + void *vaddr;
1430 + struct sk_buff *skb;
1431 + struct rtnl_link_stats64 *percpu_stats;
1432 + struct dpaa2_eth_drv_stats *percpu_extras;
1433 + struct device *dev = priv->net_dev->dev.parent;
1434 + struct dpaa2_fas *fas;
1435 + void *buf_data;
1436 + u32 status = 0;
1437 +
1438 + /* Tracing point */
1439 + trace_dpaa2_rx_fd(priv->net_dev, fd);
1440 +
1441 + vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, addr);
1442 + dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
1443 +
1444 + /* HWA - FAS, timestamp */
1445 + fas = dpaa2_eth_get_fas(vaddr);
1446 + prefetch(fas);
1447 + /* data / SG table */
1448 + buf_data = vaddr + dpaa2_fd_get_offset(fd);
1449 + prefetch(buf_data);
1450 +
1451 + percpu_stats = this_cpu_ptr(priv->percpu_stats);
1452 + percpu_extras = this_cpu_ptr(priv->percpu_extras);
1453 +
1454 + switch (fd_format) {
1455 + case dpaa2_fd_single:
1456 + skb = build_linear_skb(priv, ch, fd, vaddr);
1457 + break;
1458 + case dpaa2_fd_sg:
1459 + skb = build_frag_skb(priv, ch, buf_data);
1460 + put_page(virt_to_head_page(vaddr));
1461 + percpu_extras->rx_sg_frames++;
1462 + percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
1463 + break;
1464 + default:
1465 + /* We don't support any other format */
1466 + goto err_frame_format;
1467 + }
1468 +
1469 + if (unlikely(!skb))
1470 + goto err_build_skb;
1471 +
1472 + prefetch(skb->data);
1473 +
1474 + /* Get the timestamp value */
1475 + if (priv->ts_rx_en) {
1476 + struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
1477 + u64 *ns = (u64 *)dpaa2_eth_get_ts(vaddr);
1478 +
1479 + *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ns);
1480 + memset(shhwtstamps, 0, sizeof(*shhwtstamps));
1481 + shhwtstamps->hwtstamp = ns_to_ktime(*ns);
1482 + }
1483 +
1484 + /* Check if we need to validate the L4 csum */
1485 + if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
1486 + status = le32_to_cpu(fas->status);
1487 + validate_rx_csum(priv, status, skb);
1488 + }
1489 +
1490 + skb->protocol = eth_type_trans(skb, priv->net_dev);
1491 +
1492 + /* Record Rx queue - this will be used when picking a Tx queue to
1493 + * forward the frames. We're keeping flow affinity through the
1494 + * network stack.
1495 + */
1496 + skb_record_rx_queue(skb, queue_id);
1497 +
1498 + percpu_stats->rx_packets++;
1499 + percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
1500 +
1501 + napi_gro_receive(napi, skb);
1502 +
1503 + return;
1504 +
1505 +err_build_skb:
1506 + free_rx_fd(priv, fd, vaddr);
1507 +err_frame_format:
1508 + percpu_stats->rx_dropped++;
1509 +}
1510 +
1511 +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
1512 +/* Processing of Rx frames received on the error FQ
1513 + * We check and print the error bits and then free the frame
1514 + */
1515 +static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv,
1516 + struct dpaa2_eth_channel *ch,
1517 + const struct dpaa2_fd *fd,
1518 + struct napi_struct *napi __always_unused,
1519 + u16 queue_id __always_unused)
1520 +{
1521 + struct device *dev = priv->net_dev->dev.parent;
1522 + dma_addr_t addr = dpaa2_fd_get_addr(fd);
1523 + void *vaddr;
1524 + struct rtnl_link_stats64 *percpu_stats;
1525 + struct dpaa2_fas *fas;
1526 + u32 status = 0;
1527 + bool check_fas_errors = false;
1528 +
1529 + vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, addr);
1530 + dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
1531 +
1532 + /* check frame errors in the FD field */
1533 + if (fd->simple.ctrl & DPAA2_FD_RX_ERR_MASK) {
1534 + check_fas_errors = !!(fd->simple.ctrl & FD_CTRL_FAERR) &&
1535 + !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV);
1536 + if (net_ratelimit())
1537 + netdev_dbg(priv->net_dev, "Rx frame FD err: %x08\n",
1538 + fd->simple.ctrl & DPAA2_FD_RX_ERR_MASK);
1539 + }
1540 +
1541 + /* check frame errors in the FAS field */
1542 + if (check_fas_errors) {
1543 + fas = dpaa2_eth_get_fas(vaddr);
1544 + status = le32_to_cpu(fas->status);
1545 + if (net_ratelimit())
1546 + netdev_dbg(priv->net_dev, "Rx frame FAS err: 0x%08x\n",
1547 + status & DPAA2_FAS_RX_ERR_MASK);
1548 + }
1549 + free_rx_fd(priv, fd, vaddr);
1550 +
1551 + percpu_stats = this_cpu_ptr(priv->percpu_stats);
1552 + percpu_stats->rx_errors++;
1553 +}
1554 +#endif
1555 +
1556 +/* Consume all frames pull-dequeued into the store. This is the simplest way to
1557 + * make sure we don't accidentally issue another volatile dequeue which would
1558 + * overwrite (leak) frames already in the store.
1559 + *
1560 + * The number of frames is returned using the last 2 output arguments,
1561 + * separately for Rx and Tx confirmations.
1562 + *
1563 + * Observance of NAPI budget is not our concern, leaving that to the caller.
1564 + */
1565 +static bool consume_frames(struct dpaa2_eth_channel *ch, int *rx_cleaned,
1566 + int *tx_conf_cleaned)
1567 +{
1568 + struct dpaa2_eth_priv *priv = ch->priv;
1569 + struct dpaa2_eth_fq *fq = NULL;
1570 + struct dpaa2_dq *dq;
1571 + const struct dpaa2_fd *fd;
1572 + int cleaned = 0;
1573 + int is_last;
1574 +
1575 + do {
1576 + dq = dpaa2_io_store_next(ch->store, &is_last);
1577 + if (unlikely(!dq)) {
1578 + /* If we're here, we *must* have placed a
1579 + * volatile dequeue comnmand, so keep reading through
1580 + * the store until we get some sort of valid response
1581 + * token (either a valid frame or an "empty dequeue")
1582 + */
1583 + continue;
1584 + }
1585 +
1586 + fd = dpaa2_dq_fd(dq);
1587 +
1588 + /* prefetch the frame descriptor */
1589 + prefetch(fd);
1590 +
1591 + fq = (struct dpaa2_eth_fq *)dpaa2_dq_fqd_ctx(dq);
1592 + fq->consume(priv, ch, fd, &ch->napi, fq->flowid);
1593 + cleaned++;
1594 + } while (!is_last);
1595 +
1596 + if (!cleaned)
1597 + return false;
1598 +
1599 + /* All frames brought in store by a volatile dequeue
1600 + * come from the same queue
1601 + */
1602 + if (fq->type == DPAA2_TX_CONF_FQ)
1603 + *tx_conf_cleaned += cleaned;
1604 + else
1605 + *rx_cleaned += cleaned;
1606 +
1607 + fq->stats.frames += cleaned;
1608 + ch->stats.frames += cleaned;
1609 +
1610 + return true;
1611 +}
1612 +
1613 +/* Configure the egress frame annotation for timestamp update */
1614 +static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start)
1615 +{
1616 + struct dpaa2_faead *faead;
1617 + u32 ctrl;
1618 + u32 frc;
1619 +
1620 + /* Mark the egress frame annotation area as valid */
1621 + frc = dpaa2_fd_get_frc(fd);
1622 + dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
1623 +
1624 + /* enable UPD (update prepanded data) bit in FAEAD field of
1625 + * hardware frame annotation area
1626 + */
1627 + ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
1628 + faead = dpaa2_eth_get_faead(buf_start);
1629 + faead->ctrl = cpu_to_le32(ctrl);
1630 +}
1631 +
1632 +/* Create a frame descriptor based on a fragmented skb */
1633 +static int build_sg_fd(struct dpaa2_eth_priv *priv,
1634 + struct sk_buff *skb,
1635 + struct dpaa2_fd *fd)
1636 +{
1637 + struct device *dev = priv->net_dev->dev.parent;
1638 + void *sgt_buf = NULL;
1639 + dma_addr_t addr;
1640 + int nr_frags = skb_shinfo(skb)->nr_frags;
1641 + struct dpaa2_sg_entry *sgt;
1642 + int i, err;
1643 + int sgt_buf_size;
1644 + struct scatterlist *scl, *crt_scl;
1645 + int num_sg;
1646 + int num_dma_bufs;
1647 + struct dpaa2_fas *fas;
1648 + struct dpaa2_eth_swa *swa;
1649 +
1650 + /* Create and map scatterlist.
1651 + * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
1652 + * to go beyond nr_frags+1.
1653 + * Note: We don't support chained scatterlists
1654 + */
1655 + if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
1656 + return -EINVAL;
1657 +
1658 + scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
1659 + if (unlikely(!scl))
1660 + return -ENOMEM;
1661 +
1662 + sg_init_table(scl, nr_frags + 1);
1663 + num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
1664 + num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_TO_DEVICE);
1665 + if (unlikely(!num_dma_bufs)) {
1666 + err = -ENOMEM;
1667 + goto dma_map_sg_failed;
1668 + }
1669 +
1670 + /* Prepare the HW SGT structure */
1671 + sgt_buf_size = priv->tx_data_offset +
1672 + sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
1673 + sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN, GFP_ATOMIC);
1674 + if (unlikely(!sgt_buf)) {
1675 + err = -ENOMEM;
1676 + goto sgt_buf_alloc_failed;
1677 + }
1678 + sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
1679 +
1680 + /* PTA from egress side is passed as is to the confirmation side so
1681 + * we need to clear some fields here in order to find consistent values
1682 + * on TX confirmation. We are clearing FAS (Frame Annotation Status)
1683 + * field from the hardware annotation area
1684 + */
1685 + fas = dpaa2_eth_get_fas(sgt_buf);
1686 + memset(fas, 0, DPAA2_FAS_SIZE);
1687 +
1688 + sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
1689 +
1690 + /* Fill in the HW SGT structure.
1691 + *
1692 + * sgt_buf is zeroed out, so the following fields are implicit
1693 + * in all sgt entries:
1694 + * - offset is 0
1695 + * - format is 'dpaa2_sg_single'
1696 + */
1697 + for_each_sg(scl, crt_scl, num_dma_bufs, i) {
1698 + dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
1699 + dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
1700 + }
1701 + dpaa2_sg_set_final(&sgt[i - 1], true);
1702 +
1703 + /* Store the skb backpointer in the SGT buffer.
1704 + * Fit the scatterlist and the number of buffers alongside the
1705 + * skb backpointer in the software annotation area. We'll need
1706 + * all of them on Tx Conf.
1707 + */
1708 + swa = (struct dpaa2_eth_swa *)sgt_buf;
1709 + swa->skb = skb;
1710 + swa->scl = scl;
1711 + swa->num_sg = num_sg;
1712 + swa->num_dma_bufs = num_dma_bufs;
1713 +
1714 + /* Separately map the SGT buffer */
1715 + addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
1716 + if (unlikely(dma_mapping_error(dev, addr))) {
1717 + err = -ENOMEM;
1718 + goto dma_map_single_failed;
1719 + }
1720 + dpaa2_fd_set_offset(fd, priv->tx_data_offset);
1721 + dpaa2_fd_set_format(fd, dpaa2_fd_sg);
1722 + dpaa2_fd_set_addr(fd, addr);
1723 + dpaa2_fd_set_len(fd, skb->len);
1724 +
1725 + fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | FD_CTRL_PTA | FD_CTRL_PTV1;
1726 +
1727 + if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
1728 + enable_tx_tstamp(fd, sgt_buf);
1729 +
1730 + return 0;
1731 +
1732 +dma_map_single_failed:
1733 + kfree(sgt_buf);
1734 +sgt_buf_alloc_failed:
1735 + dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE);
1736 +dma_map_sg_failed:
1737 + kfree(scl);
1738 + return err;
1739 +}
1740 +
1741 +/* Create a frame descriptor based on a linear skb */
1742 +static int build_single_fd(struct dpaa2_eth_priv *priv,
1743 + struct sk_buff *skb,
1744 + struct dpaa2_fd *fd)
1745 +{
1746 + struct device *dev = priv->net_dev->dev.parent;
1747 + u8 *buffer_start;
1748 + struct sk_buff **skbh;
1749 + dma_addr_t addr;
1750 + struct dpaa2_fas *fas;
1751 +
1752 + buffer_start = PTR_ALIGN(skb->data - priv->tx_data_offset -
1753 + DPAA2_ETH_TX_BUF_ALIGN,
1754 + DPAA2_ETH_TX_BUF_ALIGN);
1755 +
1756 + /* PTA from egress side is passed as is to the confirmation side so
1757 + * we need to clear some fields here in order to find consistent values
1758 + * on TX confirmation. We are clearing FAS (Frame Annotation Status)
1759 + * field from the hardware annotation area
1760 + */
1761 + fas = dpaa2_eth_get_fas(buffer_start);
1762 + memset(fas, 0, DPAA2_FAS_SIZE);
1763 +
1764 + /* Store a backpointer to the skb at the beginning of the buffer
1765 + * (in the private data area) such that we can release it
1766 + * on Tx confirm
1767 + */
1768 + skbh = (struct sk_buff **)buffer_start;
1769 + *skbh = skb;
1770 +
1771 + addr = dma_map_single(dev, buffer_start,
1772 + skb_tail_pointer(skb) - buffer_start,
1773 + DMA_BIDIRECTIONAL);
1774 + if (unlikely(dma_mapping_error(dev, addr)))
1775 + return -ENOMEM;
1776 +
1777 + dpaa2_fd_set_addr(fd, addr);
1778 + dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
1779 + dpaa2_fd_set_len(fd, skb->len);
1780 + dpaa2_fd_set_format(fd, dpaa2_fd_single);
1781 +
1782 + fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | FD_CTRL_PTA | FD_CTRL_PTV1;
1783 +
1784 + if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
1785 + enable_tx_tstamp(fd, buffer_start);
1786 +
1787 + return 0;
1788 +}
1789 +
1790 +/* FD freeing routine on the Tx path
1791 + *
1792 + * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
1793 + * back-pointed to is also freed.
1794 + * This can be called either from dpaa2_eth_tx_conf() or on the error path of
1795 + * dpaa2_eth_tx().
1796 + * Optionally, return the frame annotation status word (FAS), which needs
1797 + * to be checked if we're on the confirmation path.
1798 + */
1799 +static void free_tx_fd(const struct dpaa2_eth_priv *priv,
1800 + const struct dpaa2_fd *fd,
1801 + u32 *status)
1802 +{
1803 + struct device *dev = priv->net_dev->dev.parent;
1804 + dma_addr_t fd_addr;
1805 + struct sk_buff **skbh, *skb;
1806 + unsigned char *buffer_start;
1807 + int unmap_size;
1808 + struct scatterlist *scl;
1809 + int num_sg, num_dma_bufs;
1810 + struct dpaa2_eth_swa *swa;
1811 + u8 fd_format = dpaa2_fd_get_format(fd);
1812 + struct dpaa2_fas *fas;
1813 +
1814 + fd_addr = dpaa2_fd_get_addr(fd);
1815 + skbh = dpaa2_eth_iova_to_virt(priv->iommu_domain, fd_addr);
1816 +
1817 + /* HWA - FAS, timestamp (for Tx confirmation frames) */
1818 + fas = dpaa2_eth_get_fas(skbh);
1819 + prefetch(fas);
1820 +
1821 + switch (fd_format) {
1822 + case dpaa2_fd_single:
1823 + skb = *skbh;
1824 + buffer_start = (unsigned char *)skbh;
1825 + /* Accessing the skb buffer is safe before dma unmap, because
1826 + * we didn't map the actual skb shell.
1827 + */
1828 + dma_unmap_single(dev, fd_addr,
1829 + skb_tail_pointer(skb) - buffer_start,
1830 + DMA_BIDIRECTIONAL);
1831 + break;
1832 + case dpaa2_fd_sg:
1833 + swa = (struct dpaa2_eth_swa *)skbh;
1834 + skb = swa->skb;
1835 + scl = swa->scl;
1836 + num_sg = swa->num_sg;
1837 + num_dma_bufs = swa->num_dma_bufs;
1838 +
1839 + /* Unmap the scatterlist */
1840 + dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE);
1841 + kfree(scl);
1842 +
1843 + /* Unmap the SGT buffer */
1844 + unmap_size = priv->tx_data_offset +
1845 + sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
1846 + dma_unmap_single(dev, fd_addr, unmap_size, DMA_BIDIRECTIONAL);
1847 + break;
1848 + default:
1849 + /* Unsupported format, mark it as errored and give up */
1850 + if (status)
1851 + *status = ~0;
1852 + return;
1853 + }
1854 +
1855 + /* Get the timestamp value */
1856 + if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
1857 + struct skb_shared_hwtstamps shhwtstamps;
1858 + u64 *ns;
1859 +
1860 + memset(&shhwtstamps, 0, sizeof(shhwtstamps));
1861 +
1862 + ns = (u64 *)dpaa2_eth_get_ts(skbh);
1863 + *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ns);
1864 + shhwtstamps.hwtstamp = ns_to_ktime(*ns);
1865 + skb_tstamp_tx(skb, &shhwtstamps);
1866 + }
1867 +
1868 + /* Read the status from the Frame Annotation after we unmap the first
1869 + * buffer but before we free it. The caller function is responsible
1870 + * for checking the status value.
1871 + */
1872 + if (status)
1873 + *status = le32_to_cpu(fas->status);
1874 +
1875 + /* Free SGT buffer kmalloc'ed on tx */
1876 + if (fd_format != dpaa2_fd_single)
1877 + kfree(skbh);
1878 +
1879 + /* Move on with skb release */
1880 + dev_kfree_skb(skb);
1881 +}
1882 +
1883 +static int dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
1884 +{
1885 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1886 + struct device *dev = net_dev->dev.parent;
1887 + struct dpaa2_fd fd;
1888 + struct rtnl_link_stats64 *percpu_stats;
1889 + struct dpaa2_eth_drv_stats *percpu_extras;
1890 + struct dpaa2_eth_fq *fq;
1891 + u16 queue_mapping = skb_get_queue_mapping(skb);
1892 + int err, i;
1893 +
1894 + /* If we're congested, stop this tx queue; transmission of the
1895 + * current skb happens regardless of congestion state
1896 + */
1897 + fq = &priv->fq[queue_mapping];
1898 +
1899 + dma_sync_single_for_cpu(dev, priv->cscn_dma,
1900 + DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
1901 + if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem))) {
1902 + netif_stop_subqueue(net_dev, queue_mapping);
1903 + fq->stats.congestion_entry++;
1904 + }
1905 +
1906 + percpu_stats = this_cpu_ptr(priv->percpu_stats);
1907 + percpu_extras = this_cpu_ptr(priv->percpu_extras);
1908 +
1909 + if (unlikely(skb_headroom(skb) < DPAA2_ETH_NEEDED_HEADROOM(priv))) {
1910 + struct sk_buff *ns;
1911 +
1912 + ns = skb_realloc_headroom(skb, DPAA2_ETH_NEEDED_HEADROOM(priv));
1913 + if (unlikely(!ns)) {
1914 + percpu_stats->tx_dropped++;
1915 + goto err_alloc_headroom;
1916 + }
1917 + dev_kfree_skb(skb);
1918 + skb = ns;
1919 + }
1920 +
1921 + /* We'll be holding a back-reference to the skb until Tx Confirmation;
1922 + * we don't want that overwritten by a concurrent Tx with a cloned skb.
1923 + */
1924 + skb = skb_unshare(skb, GFP_ATOMIC);
1925 + if (unlikely(!skb)) {
1926 + /* skb_unshare() has already freed the skb */
1927 + percpu_stats->tx_dropped++;
1928 + return NETDEV_TX_OK;
1929 + }
1930 +
1931 + /* Setup the FD fields */
1932 + memset(&fd, 0, sizeof(fd));
1933 +
1934 + if (skb_is_nonlinear(skb)) {
1935 + err = build_sg_fd(priv, skb, &fd);
1936 + percpu_extras->tx_sg_frames++;
1937 + percpu_extras->tx_sg_bytes += skb->len;
1938 + } else {
1939 + err = build_single_fd(priv, skb, &fd);
1940 + }
1941 +
1942 + if (unlikely(err)) {
1943 + percpu_stats->tx_dropped++;
1944 + goto err_build_fd;
1945 + }
1946 +
1947 + /* Tracing point */
1948 + trace_dpaa2_tx_fd(net_dev, &fd);
1949 +
1950 + for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
1951 + err = dpaa2_io_service_enqueue_qd(NULL, priv->tx_qdid, 0,
1952 + fq->tx_qdbin, &fd);
1953 + /* TODO: This doesn't work. Check on simulator.
1954 + * err = dpaa2_io_service_enqueue_fq(NULL,
1955 + * priv->fq[0].fqid_tx, &fd);
1956 + */
1957 + if (err != -EBUSY)
1958 + break;
1959 + }
1960 + percpu_extras->tx_portal_busy += i;
1961 + if (unlikely(err < 0)) {
1962 + percpu_stats->tx_errors++;
1963 + /* Clean up everything, including freeing the skb */
1964 + free_tx_fd(priv, &fd, NULL);
1965 + } else {
1966 + percpu_stats->tx_packets++;
1967 + percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
1968 + }
1969 +
1970 + return NETDEV_TX_OK;
1971 +
1972 +err_build_fd:
1973 +err_alloc_headroom:
1974 + dev_kfree_skb(skb);
1975 +
1976 + return NETDEV_TX_OK;
1977 +}
1978 +
1979 +/* Tx confirmation frame processing routine */
1980 +static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
1981 + struct dpaa2_eth_channel *ch,
1982 + const struct dpaa2_fd *fd,
1983 + struct napi_struct *napi __always_unused,
1984 + u16 queue_id)
1985 +{
1986 + struct device *dev = priv->net_dev->dev.parent;
1987 + struct rtnl_link_stats64 *percpu_stats;
1988 + struct dpaa2_eth_drv_stats *percpu_extras;
1989 + u32 status = 0;
1990 + bool errors = !!(fd->simple.ctrl & DPAA2_FD_TX_ERR_MASK);
1991 + bool check_fas_errors = false;
1992 +
1993 + /* Tracing point */
1994 + trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
1995 +
1996 + percpu_extras = this_cpu_ptr(priv->percpu_extras);
1997 + percpu_extras->tx_conf_frames++;
1998 + percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd);
1999 +
2000 + /* Check congestion state and wake all queues if necessary */
2001 + if (unlikely(__netif_subqueue_stopped(priv->net_dev, queue_id))) {
2002 + dma_sync_single_for_cpu(dev, priv->cscn_dma,
2003 + DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
2004 + if (!dpaa2_cscn_state_congested(priv->cscn_mem))
2005 + netif_tx_wake_all_queues(priv->net_dev);
2006 + }
2007 +
2008 + /* check frame errors in the FD field */
2009 + if (unlikely(errors)) {
2010 + check_fas_errors = !!(fd->simple.ctrl & FD_CTRL_FAERR) &&
2011 + !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV);
2012 + if (net_ratelimit())
2013 + netdev_dbg(priv->net_dev, "Tx frame FD err: %x08\n",
2014 + fd->simple.ctrl & DPAA2_FD_TX_ERR_MASK);
2015 + }
2016 +
2017 + free_tx_fd(priv, fd, check_fas_errors ? &status : NULL);
2018 +
2019 + /* if there are no errors, we're done */
2020 + if (likely(!errors))
2021 + return;
2022 +
2023 + percpu_stats = this_cpu_ptr(priv->percpu_stats);
2024 + /* Tx-conf logically pertains to the egress path. */
2025 + percpu_stats->tx_errors++;
2026 +
2027 + if (net_ratelimit())
2028 + netdev_dbg(priv->net_dev, "Tx frame FAS err: %x08\n",
2029 + status & DPAA2_FAS_TX_ERR_MASK);
2030 +}
2031 +
2032 +static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
2033 +{
2034 + int err;
2035 +
2036 + err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
2037 + DPNI_OFF_RX_L3_CSUM, enable);
2038 + if (err) {
2039 + netdev_err(priv->net_dev,
2040 + "dpni_set_offload() DPNI_OFF_RX_L3_CSUM failed\n");
2041 + return err;
2042 + }
2043 +
2044 + err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
2045 + DPNI_OFF_RX_L4_CSUM, enable);
2046 + if (err) {
2047 + netdev_err(priv->net_dev,
2048 + "dpni_set_offload() DPNI_OFF_RX_L4_CSUM failed\n");
2049 + return err;
2050 + }
2051 +
2052 + return 0;
2053 +}
2054 +
2055 +static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
2056 +{
2057 + int err;
2058 +
2059 + err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
2060 + DPNI_OFF_TX_L3_CSUM, enable);
2061 + if (err) {
2062 + netdev_err(priv->net_dev,
2063 + "dpni_set_offload() DPNI_OFF_RX_L3_CSUM failed\n");
2064 + return err;
2065 + }
2066 +
2067 + err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
2068 + DPNI_OFF_TX_L4_CSUM, enable);
2069 + if (err) {
2070 + netdev_err(priv->net_dev,
2071 + "dpni_set_offload() DPNI_OFF_RX_L4_CSUM failed\n");
2072 + return err;
2073 + }
2074 +
2075 + return 0;
2076 +}
2077 +
2078 +/* Perform a single release command to add buffers
2079 + * to the specified buffer pool
2080 + */
2081 +static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid)
2082 +{
2083 + struct device *dev = priv->net_dev->dev.parent;
2084 + u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
2085 + void *buf;
2086 + dma_addr_t addr;
2087 + int i;
2088 +
2089 + for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
2090 + /* Allocate buffer visible to WRIOP + skb shared info +
2091 + * alignment padding.
2092 + */
2093 + buf = napi_alloc_frag(DPAA2_ETH_BUF_RAW_SIZE(priv));
2094 + if (unlikely(!buf))
2095 + goto err_alloc;
2096 +
2097 + buf = PTR_ALIGN(buf, priv->rx_buf_align);
2098 +
2099 + addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
2100 + DMA_FROM_DEVICE);
2101 + if (unlikely(dma_mapping_error(dev, addr)))
2102 + goto err_map;
2103 +
2104 + buf_array[i] = addr;
2105 +
2106 + /* tracing point */
2107 + trace_dpaa2_eth_buf_seed(priv->net_dev,
2108 + buf, DPAA2_ETH_BUF_RAW_SIZE(priv),
2109 + addr, DPAA2_ETH_RX_BUF_SIZE,
2110 + bpid);
2111 + }
2112 +
2113 +release_bufs:
2114 + /* In case the portal is busy, retry until successful.
2115 + * The buffer release function would only fail if the QBMan portal
2116 + * was busy, which implies portal contention (i.e. more CPUs than
2117 + * portals, i.e. GPPs w/o affine DPIOs). For all practical purposes,
2118 + * there is little we can realistically do, short of giving up -
2119 + * in which case we'd risk depleting the buffer pool and never again
2120 + * receiving the Rx interrupt which would kick-start the refill logic.
2121 + * So just keep retrying, at the risk of being moved to ksoftirqd.
2122 + */
2123 + while (dpaa2_io_service_release(NULL, bpid, buf_array, i))
2124 + cpu_relax();
2125 + return i;
2126 +
2127 +err_map:
2128 + put_page(virt_to_head_page(buf));
2129 +err_alloc:
2130 + if (i)
2131 + goto release_bufs;
2132 +
2133 + return 0;
2134 +}
2135 +
2136 +static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
2137 +{
2138 + int i, j;
2139 + int new_count;
2140 +
2141 + /* This is the lazy seeding of Rx buffer pools.
2142 + * dpaa2_add_bufs() is also used on the Rx hotpath and calls
2143 + * napi_alloc_frag(). The trouble with that is that it in turn ends up
2144 + * calling this_cpu_ptr(), which mandates execution in atomic context.
2145 + * Rather than splitting up the code, do a one-off preempt disable.
2146 + */
2147 + preempt_disable();
2148 + for (j = 0; j < priv->num_channels; j++) {
2149 + priv->channel[j]->buf_count = 0;
2150 + for (i = 0; i < priv->num_bufs;
2151 + i += DPAA2_ETH_BUFS_PER_CMD) {
2152 + new_count = add_bufs(priv, bpid);
2153 + priv->channel[j]->buf_count += new_count;
2154 +
2155 + if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
2156 + preempt_enable();
2157 + return -ENOMEM;
2158 + }
2159 + }
2160 + }
2161 + preempt_enable();
2162 +
2163 + return 0;
2164 +}
2165 +
2166 +/**
2167 + * Drain the specified number of buffers from the DPNI's private buffer pool.
2168 + * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
2169 + */
2170 +static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
2171 +{
2172 + struct device *dev = priv->net_dev->dev.parent;
2173 + u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
2174 + void *vaddr;
2175 + int ret, i;
2176 +
2177 + do {
2178 + ret = dpaa2_io_service_acquire(NULL, priv->bpid,
2179 + buf_array, count);
2180 + if (ret < 0) {
2181 + netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
2182 + return;
2183 + }
2184 + for (i = 0; i < ret; i++) {
2185 + /* Same logic as on regular Rx path */
2186 + vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain,
2187 + buf_array[i]);
2188 + dma_unmap_single(dev, buf_array[i],
2189 + DPAA2_ETH_RX_BUF_SIZE,
2190 + DMA_FROM_DEVICE);
2191 + put_page(virt_to_head_page(vaddr));
2192 + }
2193 + } while (ret);
2194 +}
2195 +
2196 +static void drain_pool(struct dpaa2_eth_priv *priv)
2197 +{
2198 + preempt_disable();
2199 + drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
2200 + drain_bufs(priv, 1);
2201 + preempt_enable();
2202 +}
2203 +
2204 +/* Function is called from softirq context only, so we don't need to guard
2205 + * the access to percpu count
2206 + */
2207 +static int refill_pool(struct dpaa2_eth_priv *priv,
2208 + struct dpaa2_eth_channel *ch,
2209 + u16 bpid)
2210 +{
2211 + int new_count;
2212 +
2213 + if (likely(ch->buf_count >= priv->refill_thresh))
2214 + return 0;
2215 +
2216 + do {
2217 + new_count = add_bufs(priv, bpid);
2218 + if (unlikely(!new_count)) {
2219 + /* Out of memory; abort for now, we'll try later on */
2220 + break;
2221 + }
2222 + ch->buf_count += new_count;
2223 + } while (ch->buf_count < priv->num_bufs);
2224 +
2225 + if (unlikely(ch->buf_count < priv->num_bufs))
2226 + return -ENOMEM;
2227 +
2228 + return 0;
2229 +}
2230 +
2231 +static int pull_channel(struct dpaa2_eth_channel *ch)
2232 +{
2233 + int err;
2234 + int dequeues = -1;
2235 +
2236 + /* Retry while portal is busy */
2237 + do {
2238 + err = dpaa2_io_service_pull_channel(NULL, ch->ch_id, ch->store);
2239 + dequeues++;
2240 + cpu_relax();
2241 + } while (err == -EBUSY);
2242 +
2243 + ch->stats.dequeue_portal_busy += dequeues;
2244 + if (unlikely(err))
2245 + ch->stats.pull_err++;
2246 +
2247 + return err;
2248 +}
2249 +
2250 +/* NAPI poll routine
2251 + *
2252 + * Frames are dequeued from the QMan channel associated with this NAPI context.
2253 + * Rx and (if configured) Rx error frames count towards the NAPI budget. Tx
2254 + * confirmation frames are limited by a threshold per NAPI poll cycle.
2255 + */
2256 +static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
2257 +{
2258 + struct dpaa2_eth_channel *ch;
2259 + int rx_cleaned = 0, tx_conf_cleaned = 0;
2260 + bool store_cleaned;
2261 + struct dpaa2_eth_priv *priv;
2262 + int err;
2263 +
2264 + ch = container_of(napi, struct dpaa2_eth_channel, napi);
2265 + priv = ch->priv;
2266 +
2267 + do {
2268 + err = pull_channel(ch);
2269 + if (unlikely(err))
2270 + break;
2271 +
2272 + /* Refill pool if appropriate */
2273 + refill_pool(priv, ch, priv->bpid);
2274 +
2275 + store_cleaned = consume_frames(ch, &rx_cleaned,
2276 + &tx_conf_cleaned);
2277 +
2278 + /* If we've either consumed the budget with Rx frames,
2279 + * or reached the Tx conf threshold, we're done.
2280 + */
2281 + if (rx_cleaned >= budget ||
2282 + tx_conf_cleaned >= TX_CONF_PER_NAPI_POLL)
2283 + return budget;
2284 + } while (store_cleaned);
2285 +
2286 + /* We didn't consume the entire budget, finish napi and
2287 + * re-enable data availability notifications.
2288 + */
2289 + napi_complete(napi);
2290 + do {
2291 + err = dpaa2_io_service_rearm(NULL, &ch->nctx);
2292 + cpu_relax();
2293 + } while (err == -EBUSY);
2294 +
2295 + return max(rx_cleaned, 1);
2296 +}
2297 +
2298 +static void enable_ch_napi(struct dpaa2_eth_priv *priv)
2299 +{
2300 + struct dpaa2_eth_channel *ch;
2301 + int i;
2302 +
2303 + for (i = 0; i < priv->num_channels; i++) {
2304 + ch = priv->channel[i];
2305 + napi_enable(&ch->napi);
2306 + }
2307 +}
2308 +
2309 +static void disable_ch_napi(struct dpaa2_eth_priv *priv)
2310 +{
2311 + struct dpaa2_eth_channel *ch;
2312 + int i;
2313 +
2314 + for (i = 0; i < priv->num_channels; i++) {
2315 + ch = priv->channel[i];
2316 + napi_disable(&ch->napi);
2317 + }
2318 +}
2319 +
2320 +static int link_state_update(struct dpaa2_eth_priv *priv)
2321 +{
2322 + struct dpni_link_state state;
2323 + int err;
2324 +
2325 + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
2326 + if (unlikely(err)) {
2327 + netdev_err(priv->net_dev,
2328 + "dpni_get_link_state() failed\n");
2329 + return err;
2330 + }
2331 +
2332 + /* Chech link state; speed / duplex changes are not treated yet */
2333 + if (priv->link_state.up == state.up)
2334 + return 0;
2335 +
2336 + priv->link_state = state;
2337 + if (state.up) {
2338 + netif_carrier_on(priv->net_dev);
2339 + netif_tx_start_all_queues(priv->net_dev);
2340 + } else {
2341 + netif_tx_stop_all_queues(priv->net_dev);
2342 + netif_carrier_off(priv->net_dev);
2343 + }
2344 +
2345 + netdev_info(priv->net_dev, "Link Event: state %s",
2346 + state.up ? "up" : "down");
2347 +
2348 + return 0;
2349 +}
2350 +
2351 +static int dpaa2_eth_open(struct net_device *net_dev)
2352 +{
2353 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2354 + int err;
2355 +
2356 + /* We'll only start the txqs when the link is actually ready; make sure
2357 + * we don't race against the link up notification, which may come
2358 + * immediately after dpni_enable();
2359 + */
2360 + netif_tx_stop_all_queues(net_dev);
2361 +
2362 + /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will
2363 + * return true and cause 'ip link show' to report the LOWER_UP flag,
2364 + * even though the link notification wasn't even received.
2365 + */
2366 + netif_carrier_off(net_dev);
2367 +
2368 + err = seed_pool(priv, priv->bpid);
2369 + if (err) {
2370 + /* Not much to do; the buffer pool, though not filled up,
2371 + * may still contain some buffers which would enable us
2372 + * to limp on.
2373 + */
2374 + netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
2375 + priv->dpbp_dev->obj_desc.id, priv->bpid);
2376 + }
2377 +
2378 + if (priv->tx_pause_frames)
2379 + priv->refill_thresh = priv->num_bufs - DPAA2_ETH_BUFS_PER_CMD;
2380 + else
2381 + priv->refill_thresh = DPAA2_ETH_REFILL_THRESH_TD;
2382 +
2383 + err = dpni_enable(priv->mc_io, 0, priv->mc_token);
2384 + if (err < 0) {
2385 + netdev_err(net_dev, "dpni_enable() failed\n");
2386 + goto enable_err;
2387 + }
2388 +
2389 + /* If the DPMAC object has already processed the link up interrupt,
2390 + * we have to learn the link state ourselves.
2391 + */
2392 + err = link_state_update(priv);
2393 + if (err < 0) {
2394 + netdev_err(net_dev, "Can't update link state\n");
2395 + goto link_state_err;
2396 + }
2397 +
2398 + return 0;
2399 +
2400 +link_state_err:
2401 +enable_err:
2402 + priv->refill_thresh = 0;
2403 + drain_pool(priv);
2404 + return err;
2405 +}
2406 +
2407 +static int dpaa2_eth_stop(struct net_device *net_dev)
2408 +{
2409 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2410 + int dpni_enabled;
2411 + int retries = 10, i;
2412 +
2413 + netif_tx_stop_all_queues(net_dev);
2414 + netif_carrier_off(net_dev);
2415 +
2416 + /* Loop while dpni_disable() attempts to drain the egress FQs
2417 + * and confirm them back to us.
2418 + */
2419 + do {
2420 + dpni_disable(priv->mc_io, 0, priv->mc_token);
2421 + dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
2422 + if (dpni_enabled)
2423 + /* Allow the MC some slack */
2424 + msleep(100);
2425 + } while (dpni_enabled && --retries);
2426 + if (!retries) {
2427 + netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
2428 + /* Must go on and disable NAPI nonetheless, so we don't crash at
2429 + * the next "ifconfig up"
2430 + */
2431 + }
2432 +
2433 + priv->refill_thresh = 0;
2434 +
2435 + /* Wait for all running napi poll routines to finish, so that no
2436 + * new refill operations are started.
2437 + */
2438 + for (i = 0; i < priv->num_channels; i++)
2439 + napi_synchronize(&priv->channel[i]->napi);
2440 +
2441 + /* Empty the buffer pool */
2442 + drain_pool(priv);
2443 +
2444 + return 0;
2445 +}
2446 +
2447 +static int dpaa2_eth_init(struct net_device *net_dev)
2448 +{
2449 + u64 supported = 0;
2450 + u64 not_supported = 0;
2451 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2452 + u32 options = priv->dpni_attrs.options;
2453 +
2454 + /* Capabilities listing */
2455 + supported |= IFF_LIVE_ADDR_CHANGE;
2456 +
2457 + if (options & DPNI_OPT_NO_MAC_FILTER)
2458 + not_supported |= IFF_UNICAST_FLT;
2459 + else
2460 + supported |= IFF_UNICAST_FLT;
2461 +
2462 + net_dev->priv_flags |= supported;
2463 + net_dev->priv_flags &= ~not_supported;
2464 +
2465 + /* Features */
2466 + net_dev->features = NETIF_F_RXCSUM |
2467 + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2468 + NETIF_F_SG | NETIF_F_HIGHDMA |
2469 + NETIF_F_LLTX;
2470 + net_dev->hw_features = net_dev->features;
2471 +
2472 + return 0;
2473 +}
2474 +
2475 +static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
2476 +{
2477 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2478 + struct device *dev = net_dev->dev.parent;
2479 + int err;
2480 +
2481 + err = eth_mac_addr(net_dev, addr);
2482 + if (err < 0) {
2483 + dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
2484 + return err;
2485 + }
2486 +
2487 + err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
2488 + net_dev->dev_addr);
2489 + if (err) {
2490 + dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
2491 + return err;
2492 + }
2493 +
2494 + return 0;
2495 +}
2496 +
2497 +/** Fill in counters maintained by the GPP driver. These may be different from
2498 + * the hardware counters obtained by ethtool.
2499 + */
2500 +static void dpaa2_eth_get_stats(struct net_device *net_dev,
2501 + struct rtnl_link_stats64 *stats)
2502 +{
2503 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2504 + struct rtnl_link_stats64 *percpu_stats;
2505 + u64 *cpustats;
2506 + u64 *netstats = (u64 *)stats;
2507 + int i, j;
2508 + int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
2509 +
2510 + for_each_possible_cpu(i) {
2511 + percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
2512 + cpustats = (u64 *)percpu_stats;
2513 + for (j = 0; j < num; j++)
2514 + netstats[j] += cpustats[j];
2515 + }
2516 +}
2517 +
2518 +static int dpaa2_eth_change_mtu(struct net_device *net_dev, int mtu)
2519 +{
2520 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2521 + int err;
2522 +
2523 + /* Set the maximum Rx frame length to match the transmit side;
2524 + * account for L2 headers when computing the MFL
2525 + */
2526 + err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
2527 + (u16)DPAA2_ETH_L2_MAX_FRM(mtu));
2528 + if (err) {
2529 + netdev_err(net_dev, "dpni_set_max_frame_length() failed\n");
2530 + return err;
2531 + }
2532 +
2533 + net_dev->mtu = mtu;
2534 + return 0;
2535 +}
2536 +
2537 +/* Copy mac unicast addresses from @net_dev to @priv.
2538 + * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
2539 + */
2540 +static void add_uc_hw_addr(const struct net_device *net_dev,
2541 + struct dpaa2_eth_priv *priv)
2542 +{
2543 + struct netdev_hw_addr *ha;
2544 + int err;
2545 +
2546 + netdev_for_each_uc_addr(ha, net_dev) {
2547 + err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
2548 + ha->addr);
2549 + if (err)
2550 + netdev_warn(priv->net_dev,
2551 + "Could not add ucast MAC %pM to the filtering table (err %d)\n",
2552 + ha->addr, err);
2553 + }
2554 +}
2555 +
2556 +/* Copy mac multicast addresses from @net_dev to @priv
2557 + * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
2558 + */
2559 +static void add_mc_hw_addr(const struct net_device *net_dev,
2560 + struct dpaa2_eth_priv *priv)
2561 +{
2562 + struct netdev_hw_addr *ha;
2563 + int err;
2564 +
2565 + netdev_for_each_mc_addr(ha, net_dev) {
2566 + err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
2567 + ha->addr);
2568 + if (err)
2569 + netdev_warn(priv->net_dev,
2570 + "Could not add mcast MAC %pM to the filtering table (err %d)\n",
2571 + ha->addr, err);
2572 + }
2573 +}
2574 +
2575 +static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
2576 +{
2577 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2578 + int uc_count = netdev_uc_count(net_dev);
2579 + int mc_count = netdev_mc_count(net_dev);
2580 + u8 max_mac = priv->dpni_attrs.mac_filter_entries;
2581 + u32 options = priv->dpni_attrs.options;
2582 + u16 mc_token = priv->mc_token;
2583 + struct fsl_mc_io *mc_io = priv->mc_io;
2584 + int err;
2585 +
2586 + /* Basic sanity checks; these probably indicate a misconfiguration */
2587 + if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
2588 + netdev_info(net_dev,
2589 + "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
2590 + max_mac);
2591 +
2592 + /* Force promiscuous if the uc or mc counts exceed our capabilities. */
2593 + if (uc_count > max_mac) {
2594 + netdev_info(net_dev,
2595 + "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
2596 + uc_count, max_mac);
2597 + goto force_promisc;
2598 + }
2599 + if (mc_count + uc_count > max_mac) {
2600 + netdev_info(net_dev,
2601 + "Unicast + Multicast addr count reached %d, max allowed is %d; forcing promisc\n",
2602 + uc_count + mc_count, max_mac);
2603 + goto force_mc_promisc;
2604 + }
2605 +
2606 + /* Adjust promisc settings due to flag combinations */
2607 + if (net_dev->flags & IFF_PROMISC)
2608 + goto force_promisc;
2609 + if (net_dev->flags & IFF_ALLMULTI) {
2610 + /* First, rebuild unicast filtering table. This should be done
2611 + * in promisc mode, in order to avoid frame loss while we
2612 + * progressively add entries to the table.
2613 + * We don't know whether we had been in promisc already, and
2614 + * making an MC call to find out is expensive; so set uc promisc
2615 + * nonetheless.
2616 + */
2617 + err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2618 + if (err)
2619 + netdev_warn(net_dev, "Can't set uc promisc\n");
2620 +
2621 + /* Actual uc table reconstruction. */
2622 + err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
2623 + if (err)
2624 + netdev_warn(net_dev, "Can't clear uc filters\n");
2625 + add_uc_hw_addr(net_dev, priv);
2626 +
2627 + /* Finally, clear uc promisc and set mc promisc as requested. */
2628 + err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
2629 + if (err)
2630 + netdev_warn(net_dev, "Can't clear uc promisc\n");
2631 + goto force_mc_promisc;
2632 + }
2633 +
2634 + /* Neither unicast, nor multicast promisc will be on... eventually.
2635 + * For now, rebuild mac filtering tables while forcing both of them on.
2636 + */
2637 + err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2638 + if (err)
2639 + netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
2640 + err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
2641 + if (err)
2642 + netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
2643 +
2644 + /* Actual mac filtering tables reconstruction */
2645 + err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
2646 + if (err)
2647 + netdev_warn(net_dev, "Can't clear mac filters\n");
2648 + add_mc_hw_addr(net_dev, priv);
2649 + add_uc_hw_addr(net_dev, priv);
2650 +
2651 + /* Now we can clear both ucast and mcast promisc, without risking
2652 + * to drop legitimate frames anymore.
2653 + */
2654 + err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
2655 + if (err)
2656 + netdev_warn(net_dev, "Can't clear ucast promisc\n");
2657 + err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
2658 + if (err)
2659 + netdev_warn(net_dev, "Can't clear mcast promisc\n");
2660 +
2661 + return;
2662 +
2663 +force_promisc:
2664 + err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2665 + if (err)
2666 + netdev_warn(net_dev, "Can't set ucast promisc\n");
2667 +force_mc_promisc:
2668 + err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
2669 + if (err)
2670 + netdev_warn(net_dev, "Can't set mcast promisc\n");
2671 +}
2672 +
2673 +static int dpaa2_eth_set_features(struct net_device *net_dev,
2674 + netdev_features_t features)
2675 +{
2676 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2677 + netdev_features_t changed = features ^ net_dev->features;
2678 + bool enable;
2679 + int err;
2680 +
2681 + if (changed & NETIF_F_RXCSUM) {
2682 + enable = !!(features & NETIF_F_RXCSUM);
2683 + err = set_rx_csum(priv, enable);
2684 + if (err)
2685 + return err;
2686 + }
2687 +
2688 + if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
2689 + enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
2690 + err = set_tx_csum(priv, enable);
2691 + if (err)
2692 + return err;
2693 + }
2694 +
2695 + return 0;
2696 +}
2697 +
2698 +static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2699 +{
2700 + struct dpaa2_eth_priv *priv = netdev_priv(dev);
2701 + struct hwtstamp_config config;
2702 +
2703 + if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
2704 + return -EFAULT;
2705 +
2706 + switch (config.tx_type) {
2707 + case HWTSTAMP_TX_OFF:
2708 + priv->ts_tx_en = false;
2709 + break;
2710 + case HWTSTAMP_TX_ON:
2711 + priv->ts_tx_en = true;
2712 + break;
2713 + default:
2714 + return -ERANGE;
2715 + }
2716 +
2717 + if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
2718 + priv->ts_rx_en = false;
2719 + } else {
2720 + priv->ts_rx_en = true;
2721 + /* TS is set for all frame types, not only those requested */
2722 + config.rx_filter = HWTSTAMP_FILTER_ALL;
2723 + }
2724 +
2725 + return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
2726 + -EFAULT : 0;
2727 +}
2728 +
2729 +static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2730 +{
2731 + if (cmd == SIOCSHWTSTAMP)
2732 + return dpaa2_eth_ts_ioctl(dev, rq, cmd);
2733 +
2734 + return -EINVAL;
2735 +}
2736 +
2737 +static const struct net_device_ops dpaa2_eth_ops = {
2738 + .ndo_open = dpaa2_eth_open,
2739 + .ndo_start_xmit = dpaa2_eth_tx,
2740 + .ndo_stop = dpaa2_eth_stop,
2741 + .ndo_init = dpaa2_eth_init,
2742 + .ndo_set_mac_address = dpaa2_eth_set_addr,
2743 + .ndo_get_stats64 = dpaa2_eth_get_stats,
2744 + .ndo_change_mtu = dpaa2_eth_change_mtu,
2745 + .ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
2746 + .ndo_set_features = dpaa2_eth_set_features,
2747 + .ndo_do_ioctl = dpaa2_eth_ioctl,
2748 +};
2749 +
2750 +static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
2751 +{
2752 + struct dpaa2_eth_channel *ch;
2753 +
2754 + ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
2755 +
2756 + /* Update NAPI statistics */
2757 + ch->stats.cdan++;
2758 +
2759 + napi_schedule_irqoff(&ch->napi);
2760 +}
2761 +
2762 +/* Allocate and configure a DPCON object */
2763 +static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
2764 +{
2765 + struct fsl_mc_device *dpcon;
2766 + struct device *dev = priv->net_dev->dev.parent;
2767 + struct dpcon_attr attrs;
2768 + int err;
2769 +
2770 + err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
2771 + FSL_MC_POOL_DPCON, &dpcon);
2772 + if (err) {
2773 + dev_info(dev, "Not enough DPCONs, will go on as-is\n");
2774 + return NULL;
2775 + }
2776 +
2777 + err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
2778 + if (err) {
2779 + dev_err(dev, "dpcon_open() failed\n");
2780 + goto err_open;
2781 + }
2782 +
2783 + err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
2784 + if (err) {
2785 + dev_err(dev, "dpcon_reset() failed\n");
2786 + goto err_reset;
2787 + }
2788 +
2789 + err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs);
2790 + if (err) {
2791 + dev_err(dev, "dpcon_get_attributes() failed\n");
2792 + goto err_get_attr;
2793 + }
2794 +
2795 + err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
2796 + if (err) {
2797 + dev_err(dev, "dpcon_enable() failed\n");
2798 + goto err_enable;
2799 + }
2800 +
2801 + return dpcon;
2802 +
2803 +err_enable:
2804 +err_get_attr:
2805 +err_reset:
2806 + dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
2807 +err_open:
2808 + fsl_mc_object_free(dpcon);
2809 +
2810 + return NULL;
2811 +}
2812 +
2813 +static void free_dpcon(struct dpaa2_eth_priv *priv,
2814 + struct fsl_mc_device *dpcon)
2815 +{
2816 + dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
2817 + dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
2818 + fsl_mc_object_free(dpcon);
2819 +}
2820 +
2821 +static struct dpaa2_eth_channel *
2822 +alloc_channel(struct dpaa2_eth_priv *priv)
2823 +{
2824 + struct dpaa2_eth_channel *channel;
2825 + struct dpcon_attr attr;
2826 + struct device *dev = priv->net_dev->dev.parent;
2827 + int err;
2828 +
2829 + channel = kzalloc(sizeof(*channel), GFP_KERNEL);
2830 + if (!channel)
2831 + return NULL;
2832 +
2833 + channel->dpcon = setup_dpcon(priv);
2834 + if (!channel->dpcon)
2835 + goto err_setup;
2836 +
2837 + err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
2838 + &attr);
2839 + if (err) {
2840 + dev_err(dev, "dpcon_get_attributes() failed\n");
2841 + goto err_get_attr;
2842 + }
2843 +
2844 + channel->dpcon_id = attr.id;
2845 + channel->ch_id = attr.qbman_ch_id;
2846 + channel->priv = priv;
2847 +
2848 + return channel;
2849 +
2850 +err_get_attr:
2851 + free_dpcon(priv, channel->dpcon);
2852 +err_setup:
2853 + kfree(channel);
2854 + return NULL;
2855 +}
2856 +
2857 +static void free_channel(struct dpaa2_eth_priv *priv,
2858 + struct dpaa2_eth_channel *channel)
2859 +{
2860 + free_dpcon(priv, channel->dpcon);
2861 + kfree(channel);
2862 +}
2863 +
2864 +/* DPIO setup: allocate and configure QBMan channels, setup core affinity
2865 + * and register data availability notifications
2866 + */
2867 +static int setup_dpio(struct dpaa2_eth_priv *priv)
2868 +{
2869 + struct dpaa2_io_notification_ctx *nctx;
2870 + struct dpaa2_eth_channel *channel;
2871 + struct dpcon_notification_cfg dpcon_notif_cfg;
2872 + struct device *dev = priv->net_dev->dev.parent;
2873 + int i, err;
2874 +
2875 + /* We want the ability to spread ingress traffic (RX, TX conf) to as
2876 + * many cores as possible, so we need one channel for each core
2877 + * (unless there's fewer queues than cores, in which case the extra
2878 + * channels would be wasted).
2879 + * Allocate one channel per core and register it to the core's
2880 + * affine DPIO. If not enough channels are available for all cores
2881 + * or if some cores don't have an affine DPIO, there will be no
2882 + * ingress frame processing on those cores.
2883 + */
2884 + cpumask_clear(&priv->dpio_cpumask);
2885 + for_each_online_cpu(i) {
2886 + /* Try to allocate a channel */
2887 + channel = alloc_channel(priv);
2888 + if (!channel) {
2889 + dev_info(dev,
2890 + "No affine channel for cpu %d and above\n", i);
2891 + goto err_alloc_ch;
2892 + }
2893 +
2894 + priv->channel[priv->num_channels] = channel;
2895 +
2896 + nctx = &channel->nctx;
2897 + nctx->is_cdan = 1;
2898 + nctx->cb = cdan_cb;
2899 + nctx->id = channel->ch_id;
2900 + nctx->desired_cpu = i;
2901 +
2902 + /* Register the new context */
2903 + err = dpaa2_io_service_register(NULL, nctx);
2904 + if (err) {
2905 + dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
2906 + /* If no affine DPIO for this core, there's probably
2907 + * none available for next cores either.
2908 + */
2909 + goto err_service_reg;
2910 + }
2911 +
2912 + /* Register DPCON notification with MC */
2913 + dpcon_notif_cfg.dpio_id = nctx->dpio_id;
2914 + dpcon_notif_cfg.priority = 0;
2915 + dpcon_notif_cfg.user_ctx = nctx->qman64;
2916 + err = dpcon_set_notification(priv->mc_io, 0,
2917 + channel->dpcon->mc_handle,
2918 + &dpcon_notif_cfg);
2919 + if (err) {
2920 + dev_err(dev, "dpcon_set_notification failed()\n");
2921 + goto err_set_cdan;
2922 + }
2923 +
2924 + /* If we managed to allocate a channel and also found an affine
2925 + * DPIO for this core, add it to the final mask
2926 + */
2927 + cpumask_set_cpu(i, &priv->dpio_cpumask);
2928 + priv->num_channels++;
2929 +
2930 + /* Stop if we already have enough channels to accommodate all
2931 + * RX and TX conf queues
2932 + */
2933 + if (priv->num_channels == dpaa2_eth_queue_count(priv))
2934 + break;
2935 + }
2936 +
2937 + /* Tx confirmation queues can only be serviced by cpus
2938 + * with an affine DPIO/channel
2939 + */
2940 + cpumask_copy(&priv->txconf_cpumask, &priv->dpio_cpumask);
2941 +
2942 + return 0;
2943 +
2944 +err_set_cdan:
2945 + dpaa2_io_service_deregister(NULL, nctx);
2946 +err_service_reg:
2947 + free_channel(priv, channel);
2948 +err_alloc_ch:
2949 + if (cpumask_empty(&priv->dpio_cpumask)) {
2950 + dev_dbg(dev, "No cpu with an affine DPIO/DPCON\n");
2951 + return -ENODEV;
2952 + }
2953 + cpumask_copy(&priv->txconf_cpumask, &priv->dpio_cpumask);
2954 +
2955 + dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
2956 + cpumask_pr_args(&priv->dpio_cpumask));
2957 +
2958 + return 0;
2959 +}
2960 +
2961 +static void free_dpio(struct dpaa2_eth_priv *priv)
2962 +{
2963 + int i;
2964 + struct dpaa2_eth_channel *ch;
2965 +
2966 + /* deregister CDAN notifications and free channels */
2967 + for (i = 0; i < priv->num_channels; i++) {
2968 + ch = priv->channel[i];
2969 + dpaa2_io_service_deregister(NULL, &ch->nctx);
2970 + free_channel(priv, ch);
2971 + }
2972 +}
2973 +
2974 +static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv,
2975 + int cpu)
2976 +{
2977 + struct device *dev = priv->net_dev->dev.parent;
2978 + int i;
2979 +
2980 + for (i = 0; i < priv->num_channels; i++)
2981 + if (priv->channel[i]->nctx.desired_cpu == cpu)
2982 + return priv->channel[i];
2983 +
2984 + /* We should never get here. Issue a warning and return
2985 + * the first channel, because it's still better than nothing
2986 + */
2987 + dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
2988 +
2989 + return priv->channel[0];
2990 +}
2991 +
2992 +static void set_fq_affinity(struct dpaa2_eth_priv *priv)
2993 +{
2994 + struct device *dev = priv->net_dev->dev.parent;
2995 + struct cpumask xps_mask = CPU_MASK_NONE;
2996 + struct dpaa2_eth_fq *fq;
2997 + int rx_cpu, txc_cpu;
2998 + int i, err;
2999 +
3000 + /* For each FQ, pick one channel/CPU to deliver frames to.
3001 + * This may well change at runtime, either through irqbalance or
3002 + * through direct user intervention.
3003 + */
3004 + rx_cpu = cpumask_first(&priv->dpio_cpumask);
3005 + txc_cpu = cpumask_first(&priv->txconf_cpumask);
3006 +
3007 + for (i = 0; i < priv->num_fqs; i++) {
3008 + fq = &priv->fq[i];
3009 + switch (fq->type) {
3010 + case DPAA2_RX_FQ:
3011 + case DPAA2_RX_ERR_FQ:
3012 + fq->target_cpu = rx_cpu;
3013 + rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
3014 + if (rx_cpu >= nr_cpu_ids)
3015 + rx_cpu = cpumask_first(&priv->dpio_cpumask);
3016 + break;
3017 + case DPAA2_TX_CONF_FQ:
3018 + fq->target_cpu = txc_cpu;
3019 +
3020 + /* register txc_cpu to XPS */
3021 + cpumask_set_cpu(txc_cpu, &xps_mask);
3022 + err = netif_set_xps_queue(priv->net_dev, &xps_mask,
3023 + fq->flowid);
3024 + if (err)
3025 + dev_info_once(dev,
3026 + "Tx: error setting XPS queue\n");
3027 + cpumask_clear_cpu(txc_cpu, &xps_mask);
3028 +
3029 + txc_cpu = cpumask_next(txc_cpu, &priv->txconf_cpumask);
3030 + if (txc_cpu >= nr_cpu_ids)
3031 + txc_cpu = cpumask_first(&priv->txconf_cpumask);
3032 + break;
3033 + default:
3034 + dev_err(dev, "Unknown FQ type: %d\n", fq->type);
3035 + }
3036 + fq->channel = get_affine_channel(priv, fq->target_cpu);
3037 + }
3038 +}
3039 +
3040 +static void setup_fqs(struct dpaa2_eth_priv *priv)
3041 +{
3042 + int i;
3043 +
3044 + /* We have one TxConf FQ per Tx flow. Tx queues MUST be at the
3045 + * beginning of the queue array.
3046 + * Number of Rx and Tx queues are the same.
3047 + * We only support one traffic class for now.
3048 + */
3049 + for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
3050 + priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
3051 + priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
3052 + priv->fq[priv->num_fqs++].flowid = (u16)i;
3053 + }
3054 +
3055 + for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
3056 + priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
3057 + priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
3058 + priv->fq[priv->num_fqs++].flowid = (u16)i;
3059 + }
3060 +
3061 +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
3062 + /* We have exactly one Rx error queue per DPNI */
3063 + priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ;
3064 + priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err;
3065 +#endif
3066 +
3067 + /* For each FQ, decide on which core to process incoming frames */
3068 + set_fq_affinity(priv);
3069 +}
3070 +
3071 +/* Allocate and configure one buffer pool for each interface */
3072 +static int setup_dpbp(struct dpaa2_eth_priv *priv)
3073 +{
3074 + int err;
3075 + struct fsl_mc_device *dpbp_dev;
3076 + struct dpbp_attr dpbp_attrs;
3077 + struct device *dev = priv->net_dev->dev.parent;
3078 +
3079 + err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
3080 + &dpbp_dev);
3081 + if (err) {
3082 + dev_err(dev, "DPBP device allocation failed\n");
3083 + return err;
3084 + }
3085 +
3086 + priv->dpbp_dev = dpbp_dev;
3087 +
3088 + err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
3089 + &dpbp_dev->mc_handle);
3090 + if (err) {
3091 + dev_err(dev, "dpbp_open() failed\n");
3092 + goto err_open;
3093 + }
3094 +
3095 + err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle);
3096 + if (err) {
3097 + dev_err(dev, "dpbp_reset() failed\n");
3098 + goto err_reset;
3099 + }
3100 +
3101 + err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
3102 + if (err) {
3103 + dev_err(dev, "dpbp_enable() failed\n");
3104 + goto err_enable;
3105 + }
3106 +
3107 + err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
3108 + &dpbp_attrs);
3109 + if (err) {
3110 + dev_err(dev, "dpbp_get_attributes() failed\n");
3111 + goto err_get_attr;
3112 + }
3113 +
3114 + priv->bpid = dpbp_attrs.bpid;
3115 + priv->num_bufs = DPAA2_ETH_NUM_BUFS_FC / priv->num_channels;
3116 +
3117 + return 0;
3118 +
3119 +err_get_attr:
3120 + dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
3121 +err_enable:
3122 +err_reset:
3123 + dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
3124 +err_open:
3125 + fsl_mc_object_free(dpbp_dev);
3126 +
3127 + return err;
3128 +}
3129 +
3130 +static void free_dpbp(struct dpaa2_eth_priv *priv)
3131 +{
3132 + drain_pool(priv);
3133 + dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
3134 + dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
3135 + fsl_mc_object_free(priv->dpbp_dev);
3136 +}
3137 +
3138 +static int setup_tx_congestion(struct dpaa2_eth_priv *priv)
3139 +{
3140 + struct dpni_congestion_notification_cfg cong_notif_cfg = { 0 };
3141 + struct device *dev = priv->net_dev->dev.parent;
3142 + int err;
3143 +
3144 + priv->cscn_unaligned = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
3145 + GFP_KERNEL);
3146 + if (!priv->cscn_unaligned)
3147 + return -ENOMEM;
3148 +
3149 + priv->cscn_mem = PTR_ALIGN(priv->cscn_unaligned, DPAA2_CSCN_ALIGN);
3150 + priv->cscn_dma = dma_map_single(dev, priv->cscn_mem, DPAA2_CSCN_SIZE,
3151 + DMA_FROM_DEVICE);
3152 + if (dma_mapping_error(dev, priv->cscn_dma)) {
3153 + dev_err(dev, "Error mapping CSCN memory area\n");
3154 + err = -ENOMEM;
3155 + goto err_dma_map;
3156 + }
3157 +
3158 + cong_notif_cfg.units = DPNI_CONGESTION_UNIT_BYTES;
3159 + cong_notif_cfg.threshold_entry = DPAA2_ETH_TX_CONG_ENTRY_THRESH;
3160 + cong_notif_cfg.threshold_exit = DPAA2_ETH_TX_CONG_EXIT_THRESH;
3161 + cong_notif_cfg.message_ctx = (u64)priv;
3162 + cong_notif_cfg.message_iova = priv->cscn_dma;
3163 + cong_notif_cfg.notification_mode = DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
3164 + DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
3165 + DPNI_CONG_OPT_COHERENT_WRITE;
3166 + err = dpni_set_congestion_notification(priv->mc_io, 0, priv->mc_token,
3167 + DPNI_QUEUE_TX, 0,
3168 + &cong_notif_cfg);
3169 + if (err) {
3170 + dev_err(dev, "dpni_set_congestion_notification failed\n");
3171 + goto err_set_cong;
3172 + }
3173 +
3174 + return 0;
3175 +
3176 +err_set_cong:
3177 + dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
3178 +err_dma_map:
3179 + kfree(priv->cscn_unaligned);
3180 +
3181 + return err;
3182 +}
3183 +
3184 +/* Configure the DPNI object this interface is associated with */
3185 +static int setup_dpni(struct fsl_mc_device *ls_dev)
3186 +{
3187 + struct device *dev = &ls_dev->dev;
3188 + struct dpaa2_eth_priv *priv;
3189 + struct net_device *net_dev;
3190 + struct dpni_buffer_layout buf_layout;
3191 + struct dpni_link_cfg cfg = {0};
3192 + int err;
3193 +
3194 + net_dev = dev_get_drvdata(dev);
3195 + priv = netdev_priv(net_dev);
3196 +
3197 + priv->dpni_id = ls_dev->obj_desc.id;
3198 +
3199 + /* get a handle for the DPNI object */
3200 + err = dpni_open(priv->mc_io, 0, priv->dpni_id, &priv->mc_token);
3201 + if (err) {
3202 + dev_err(dev, "dpni_open() failed\n");
3203 + goto err_open;
3204 + }
3205 +
3206 + ls_dev->mc_io = priv->mc_io;
3207 + ls_dev->mc_handle = priv->mc_token;
3208 +
3209 + err = dpni_reset(priv->mc_io, 0, priv->mc_token);
3210 + if (err) {
3211 + dev_err(dev, "dpni_reset() failed\n");
3212 + goto err_reset;
3213 + }
3214 +
3215 + err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
3216 + &priv->dpni_attrs);
3217 +
3218 + if (err) {
3219 + dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
3220 + goto err_get_attr;
3221 + }
3222 +
3223 + /* due to a limitation in WRIOP 1.0.0 (ERR009354), the Rx buf
3224 + * align value must be a multiple of 256.
3225 + */
3226 + priv->rx_buf_align =
3227 + priv->dpni_attrs.wriop_version & 0x3ff ?
3228 + DPAA2_ETH_RX_BUF_ALIGN : DPAA2_ETH_RX_BUF_ALIGN_V1;
3229 +
3230 + /* Update number of logical FQs in netdev */
3231 + err = netif_set_real_num_tx_queues(net_dev,
3232 + dpaa2_eth_queue_count(priv));
3233 + if (err) {
3234 + dev_err(dev, "netif_set_real_num_tx_queues failed (%d)\n", err);
3235 + goto err_set_tx_queues;
3236 + }
3237 +
3238 + err = netif_set_real_num_rx_queues(net_dev,
3239 + dpaa2_eth_queue_count(priv));
3240 + if (err) {
3241 + dev_err(dev, "netif_set_real_num_rx_queues failed (%d)\n", err);
3242 + goto err_set_rx_queues;
3243 + }
3244 +
3245 + /* Configure buffer layouts */
3246 + /* rx buffer */
3247 + buf_layout.pass_parser_result = true;
3248 + buf_layout.pass_frame_status = true;
3249 + buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
3250 + buf_layout.data_align = priv->rx_buf_align;
3251 + buf_layout.data_head_room = DPAA2_ETH_RX_HEAD_ROOM;
3252 + buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
3253 + DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
3254 + DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
3255 + DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
3256 + DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;
3257 + err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3258 + DPNI_QUEUE_RX, &buf_layout);
3259 + if (err) {
3260 + dev_err(dev,
3261 + "dpni_set_buffer_layout(RX) failed\n");
3262 + goto err_buf_layout;
3263 + }
3264 +
3265 + /* tx buffer */
3266 + buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
3267 + DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
3268 + DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
3269 + buf_layout.pass_timestamp = true;
3270 + err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3271 + DPNI_QUEUE_TX, &buf_layout);
3272 + if (err) {
3273 + dev_err(dev,
3274 + "dpni_set_buffer_layout(TX) failed\n");
3275 + goto err_buf_layout;
3276 + }
3277 +
3278 + /* tx-confirm buffer */
3279 + buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
3280 + DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
3281 + err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3282 + DPNI_QUEUE_TX_CONFIRM, &buf_layout);
3283 + if (err) {
3284 + dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
3285 + goto err_buf_layout;
3286 + }
3287 +
3288 + /* Now that we've set our tx buffer layout, retrieve the minimum
3289 + * required tx data offset.
3290 + */
3291 + err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
3292 + &priv->tx_data_offset);
3293 + if (err) {
3294 + dev_err(dev, "dpni_get_tx_data_offset() failed (%d)\n", err);
3295 + goto err_data_offset;
3296 + }
3297 +
3298 + if ((priv->tx_data_offset % 64) != 0)
3299 + dev_warn(dev, "Tx data offset (%d) not a multiple of 64B",
3300 + priv->tx_data_offset);
3301 +
3302 + /* Accommodate software annotation space (SWA) */
3303 + priv->tx_data_offset += DPAA2_ETH_SWA_SIZE;
3304 +
3305 + /* Enable congestion notifications for Tx queues */
3306 + err = setup_tx_congestion(priv);
3307 + if (err)
3308 + goto err_tx_cong;
3309 +
3310 + /* allocate classification rule space */
3311 + priv->cls_rule = kzalloc(sizeof(*priv->cls_rule) *
3312 + dpaa2_eth_fs_count(priv), GFP_KERNEL);
3313 + if (!priv->cls_rule)
3314 + goto err_cls_rule;
3315 +
3316 + /* Enable flow control */
3317 + cfg.options = DPNI_LINK_OPT_AUTONEG | DPNI_LINK_OPT_PAUSE;
3318 + priv->tx_pause_frames = 1;
3319 +
3320 + err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
3321 + if (err) {
3322 + netdev_err(net_dev, "ERROR %d setting link cfg", err);
3323 + goto err_set_link_cfg;
3324 + }
3325 +
3326 + return 0;
3327 +
3328 +err_set_link_cfg:
3329 +err_cls_rule:
3330 +err_tx_cong:
3331 +err_data_offset:
3332 +err_buf_layout:
3333 +err_set_rx_queues:
3334 +err_set_tx_queues:
3335 +err_get_attr:
3336 +err_reset:
3337 + dpni_close(priv->mc_io, 0, priv->mc_token);
3338 +err_open:
3339 + return err;
3340 +}
3341 +
3342 +static void free_dpni(struct dpaa2_eth_priv *priv)
3343 +{
3344 + struct device *dev = priv->net_dev->dev.parent;
3345 + int err;
3346 +
3347 + err = dpni_reset(priv->mc_io, 0, priv->mc_token);
3348 + if (err)
3349 + netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
3350 + err);
3351 +
3352 + dpni_close(priv->mc_io, 0, priv->mc_token);
3353 +
3354 + kfree(priv->cls_rule);
3355 +
3356 + dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
3357 + kfree(priv->cscn_unaligned);
3358 +}
3359 +
3360 +int setup_fqs_taildrop(struct dpaa2_eth_priv *priv,
3361 + bool enable)
3362 +{
3363 + struct device *dev = priv->net_dev->dev.parent;
3364 + struct dpni_taildrop td;
3365 + int err = 0, i;
3366 +
3367 + td.enable = enable;
3368 + td.threshold = DPAA2_ETH_TAILDROP_THRESH;
3369 +
3370 + if (enable) {
3371 + priv->num_bufs = DPAA2_ETH_NUM_BUFS_TD;
3372 + priv->refill_thresh = DPAA2_ETH_REFILL_THRESH_TD;
3373 + } else {
3374 + priv->num_bufs = DPAA2_ETH_NUM_BUFS_FC /
3375 + priv->num_channels;
3376 + priv->refill_thresh = priv->num_bufs - DPAA2_ETH_BUFS_PER_CMD;
3377 + }
3378 +
3379 + for (i = 0; i < priv->num_fqs; i++) {
3380 + if (priv->fq[i].type != DPAA2_RX_FQ)
3381 + continue;
3382 +
3383 + err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
3384 + DPNI_CP_QUEUE, DPNI_QUEUE_RX, 0,
3385 + priv->fq[i].flowid, &td);
3386 + if (err) {
3387 + dev_err(dev, "dpni_set_taildrop() failed (%d)\n", err);
3388 + break;
3389 + }
3390 + }
3391 +
3392 + return err;
3393 +}
3394 +
3395 +static int setup_rx_flow(struct dpaa2_eth_priv *priv,
3396 + struct dpaa2_eth_fq *fq)
3397 +{
3398 + struct device *dev = priv->net_dev->dev.parent;
3399 + struct dpni_queue q = { { 0 } };
3400 + struct dpni_queue_id qid;
3401 + u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
3402 + int err;
3403 +
3404 + err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3405 + DPNI_QUEUE_RX, 0, fq->flowid, &q, &qid);
3406 + if (err) {
3407 + dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
3408 + return err;
3409 + }
3410 +
3411 + fq->fqid = qid.fqid;
3412 +
3413 + q.destination.id = fq->channel->dpcon_id;
3414 + q.destination.type = DPNI_DEST_DPCON;
3415 + q.destination.priority = 1;
3416 + q.user_context = (u64)fq;
3417 + err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3418 + DPNI_QUEUE_RX, 0, fq->flowid, q_opt, &q);
3419 + if (err) {
3420 + dev_err(dev, "dpni_set_queue() failed (%d)\n", err);
3421 + return err;
3422 + }
3423 +
3424 + return 0;
3425 +}
3426 +
3427 +static int setup_tx_flow(struct dpaa2_eth_priv *priv,
3428 + struct dpaa2_eth_fq *fq)
3429 +{
3430 + struct device *dev = priv->net_dev->dev.parent;
3431 + struct dpni_queue q = { { 0 } };
3432 + struct dpni_queue_id qid;
3433 + u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
3434 + int err;
3435 +
3436 + err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3437 + DPNI_QUEUE_TX, 0, fq->flowid, &q, &qid);
3438 + if (err) {
3439 + dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
3440 + return err;
3441 + }
3442 +
3443 + fq->tx_qdbin = qid.qdbin;
3444 +
3445 + err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3446 + DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, &q, &qid);
3447 + if (err) {
3448 + dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
3449 + return err;
3450 + }
3451 +
3452 + fq->fqid = qid.fqid;
3453 +
3454 + q.destination.id = fq->channel->dpcon_id;
3455 + q.destination.type = DPNI_DEST_DPCON;
3456 + q.destination.priority = 0;
3457 + q.user_context = (u64)fq;
3458 + err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3459 + DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, q_opt, &q);
3460 + if (err) {
3461 + dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
3462 + return err;
3463 + }
3464 +
3465 + return 0;
3466 +}
3467 +
3468 +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
3469 +static int setup_rx_err_flow(struct dpaa2_eth_priv *priv,
3470 + struct dpaa2_eth_fq *fq)
3471 +{
3472 + struct device *dev = priv->net_dev->dev.parent;
3473 + struct dpni_queue q = { { 0 } };
3474 + struct dpni_queue_id qid;
3475 + u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
3476 + int err;
3477 +
3478 + err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3479 + DPNI_QUEUE_RX_ERR, 0, 0, &q, &qid);
3480 + if (err) {
3481 + dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
3482 + return err;
3483 + }
3484 +
3485 + fq->fqid = qid.fqid;
3486 +
3487 + q.destination.id = fq->channel->dpcon_id;
3488 + q.destination.type = DPNI_DEST_DPCON;
3489 + q.destination.priority = 1;
3490 + q.user_context = (u64)fq;
3491 + err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3492 + DPNI_QUEUE_RX_ERR, 0, 0, q_opt, &q);
3493 + if (err) {
3494 + dev_err(dev, "dpni_set_queue() failed (%d)\n", err);
3495 + return err;
3496 + }
3497 +
3498 + return 0;
3499 +}
3500 +#endif
3501 +
3502 +/* default hash key fields */
3503 +static struct dpaa2_eth_hash_fields default_hash_fields[] = {
3504 + {
3505 + /* L2 header */
3506 + .rxnfc_field = RXH_L2DA,
3507 + .cls_prot = NET_PROT_ETH,
3508 + .cls_field = NH_FLD_ETH_DA,
3509 + .size = 6,
3510 + }, {
3511 + .cls_prot = NET_PROT_ETH,
3512 + .cls_field = NH_FLD_ETH_SA,
3513 + .size = 6,
3514 + }, {
3515 + /* This is the last ethertype field parsed:
3516 + * depending on frame format, it can be the MAC ethertype
3517 + * or the VLAN etype.
3518 + */
3519 + .cls_prot = NET_PROT_ETH,
3520 + .cls_field = NH_FLD_ETH_TYPE,
3521 + .size = 2,
3522 + }, {
3523 + /* VLAN header */
3524 + .rxnfc_field = RXH_VLAN,
3525 + .cls_prot = NET_PROT_VLAN,
3526 + .cls_field = NH_FLD_VLAN_TCI,
3527 + .size = 2,
3528 + }, {
3529 + /* IP header */
3530 + .rxnfc_field = RXH_IP_SRC,
3531 + .cls_prot = NET_PROT_IP,
3532 + .cls_field = NH_FLD_IP_SRC,
3533 + .size = 4,
3534 + }, {
3535 + .rxnfc_field = RXH_IP_DST,
3536 + .cls_prot = NET_PROT_IP,
3537 + .cls_field = NH_FLD_IP_DST,
3538 + .size = 4,
3539 + }, {
3540 + .rxnfc_field = RXH_L3_PROTO,
3541 + .cls_prot = NET_PROT_IP,
3542 + .cls_field = NH_FLD_IP_PROTO,
3543 + .size = 1,
3544 + }, {
3545 + /* Using UDP ports, this is functionally equivalent to raw
3546 + * byte pairs from L4 header.
3547 + */
3548 + .rxnfc_field = RXH_L4_B_0_1,
3549 + .cls_prot = NET_PROT_UDP,
3550 + .cls_field = NH_FLD_UDP_PORT_SRC,
3551 + .size = 2,
3552 + }, {
3553 + .rxnfc_field = RXH_L4_B_2_3,
3554 + .cls_prot = NET_PROT_UDP,
3555 + .cls_field = NH_FLD_UDP_PORT_DST,
3556 + .size = 2,
3557 + },
3558 +};
3559 +
3560 +/* Set RX hash options */
3561 +static int set_hash(struct dpaa2_eth_priv *priv)
3562 +{
3563 + struct device *dev = priv->net_dev->dev.parent;
3564 + struct dpkg_profile_cfg cls_cfg;
3565 + struct dpni_rx_tc_dist_cfg dist_cfg;
3566 + u8 *dma_mem;
3567 + int i;
3568 + int err = 0;
3569 +
3570 + memset(&cls_cfg, 0, sizeof(cls_cfg));
3571 +
3572 + for (i = 0; i < priv->num_hash_fields; i++) {
3573 + struct dpkg_extract *key =
3574 + &cls_cfg.extracts[cls_cfg.num_extracts];
3575 +
3576 + key->type = DPKG_EXTRACT_FROM_HDR;
3577 + key->extract.from_hdr.prot = priv->hash_fields[i].cls_prot;
3578 + key->extract.from_hdr.type = DPKG_FULL_FIELD;
3579 + key->extract.from_hdr.field = priv->hash_fields[i].cls_field;
3580 + cls_cfg.num_extracts++;
3581 +
3582 + priv->rx_flow_hash |= priv->hash_fields[i].rxnfc_field;
3583 + }
3584 +
3585 + dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_DMA | GFP_KERNEL);
3586 + if (!dma_mem)
3587 + return -ENOMEM;
3588 +
3589 + err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
3590 + if (err) {
3591 + dev_err(dev, "dpni_prepare_key_cfg() failed (%d)", err);
3592 + goto err_prep_key;
3593 + }
3594 +
3595 + memset(&dist_cfg, 0, sizeof(dist_cfg));
3596 +
3597 + /* Prepare for setting the rx dist */
3598 + dist_cfg.key_cfg_iova = dma_map_single(dev, dma_mem,
3599 + DPAA2_CLASSIFIER_DMA_SIZE,
3600 + DMA_TO_DEVICE);
3601 + if (dma_mapping_error(dev, dist_cfg.key_cfg_iova)) {
3602 + dev_err(dev, "DMA mapping failed\n");
3603 + err = -ENOMEM;
3604 + goto err_dma_map;
3605 + }
3606 +
3607 + dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
3608 + if (dpaa2_eth_fs_enabled(priv)) {
3609 + dist_cfg.dist_mode = DPNI_DIST_MODE_FS;
3610 + dist_cfg.fs_cfg.miss_action = DPNI_FS_MISS_HASH;
3611 + } else {
3612 + dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
3613 + }
3614 +
3615 + err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
3616 + dma_unmap_single(dev, dist_cfg.key_cfg_iova,
3617 + DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE);
3618 + if (err)
3619 + dev_err(dev, "dpni_set_rx_tc_dist() failed (%d)\n", err);
3620 +
3621 +err_dma_map:
3622 +err_prep_key:
3623 + kfree(dma_mem);
3624 + return err;
3625 +}
3626 +
3627 +/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
3628 + * frame queues and channels
3629 + */
3630 +static int bind_dpni(struct dpaa2_eth_priv *priv)
3631 +{
3632 + struct net_device *net_dev = priv->net_dev;
3633 + struct device *dev = net_dev->dev.parent;
3634 + struct dpni_pools_cfg pools_params;
3635 + struct dpni_error_cfg err_cfg;
3636 + int err = 0;
3637 + int i;
3638 +
3639 + pools_params.num_dpbp = 1;
3640 + pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
3641 + pools_params.pools[0].backup_pool = 0;
3642 + pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
3643 + err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
3644 + if (err) {
3645 + dev_err(dev, "dpni_set_pools() failed\n");
3646 + return err;
3647 + }
3648 +
3649 + /* Verify classification options and disable hashing and/or
3650 + * flow steering support in case of invalid configuration values
3651 + */
3652 + priv->hash_fields = default_hash_fields;
3653 + priv->num_hash_fields = ARRAY_SIZE(default_hash_fields);
3654 + check_cls_support(priv);
3655 +
3656 + /* have the interface implicitly distribute traffic based on
3657 + * a static hash key
3658 + */
3659 + if (dpaa2_eth_hash_enabled(priv)) {
3660 + err = set_hash(priv);
3661 + if (err) {
3662 + dev_err(dev, "Hashing configuration failed\n");
3663 + return err;
3664 + }
3665 + }
3666 +
3667 + /* Configure handling of error frames */
3668 + err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
3669 + err_cfg.set_frame_annotation = 1;
3670 +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
3671 + err_cfg.error_action = DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE;
3672 +#else
3673 + err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
3674 +#endif
3675 + err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
3676 + &err_cfg);
3677 + if (err) {
3678 + dev_err(dev, "dpni_set_errors_behavior() failed (%d)\n", err);
3679 + return err;
3680 + }
3681 +
3682 + /* Configure Rx and Tx conf queues to generate CDANs */
3683 + for (i = 0; i < priv->num_fqs; i++) {
3684 + switch (priv->fq[i].type) {
3685 + case DPAA2_RX_FQ:
3686 + err = setup_rx_flow(priv, &priv->fq[i]);
3687 + break;
3688 + case DPAA2_TX_CONF_FQ:
3689 + err = setup_tx_flow(priv, &priv->fq[i]);
3690 + break;
3691 +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
3692 + case DPAA2_RX_ERR_FQ:
3693 + err = setup_rx_err_flow(priv, &priv->fq[i]);
3694 + break;
3695 +#endif
3696 + default:
3697 + dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
3698 + return -EINVAL;
3699 + }
3700 + if (err)
3701 + return err;
3702 + }
3703 +
3704 + err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token, DPNI_QUEUE_TX,
3705 + &priv->tx_qdid);
3706 + if (err) {
3707 + dev_err(dev, "dpni_get_qdid() failed\n");
3708 + return err;
3709 + }
3710 +
3711 + return 0;
3712 +}
3713 +
3714 +/* Allocate rings for storing incoming frame descriptors */
3715 +static int alloc_rings(struct dpaa2_eth_priv *priv)
3716 +{
3717 + struct net_device *net_dev = priv->net_dev;
3718 + struct device *dev = net_dev->dev.parent;
3719 + int i;
3720 +
3721 + for (i = 0; i < priv->num_channels; i++) {
3722 + priv->channel[i]->store =
3723 + dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
3724 + if (!priv->channel[i]->store) {
3725 + netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
3726 + goto err_ring;
3727 + }
3728 + }
3729 +
3730 + return 0;
3731 +
3732 +err_ring:
3733 + for (i = 0; i < priv->num_channels; i++) {
3734 + if (!priv->channel[i]->store)
3735 + break;
3736 + dpaa2_io_store_destroy(priv->channel[i]->store);
3737 + }
3738 +
3739 + return -ENOMEM;
3740 +}
3741 +
3742 +static void free_rings(struct dpaa2_eth_priv *priv)
3743 +{
3744 + int i;
3745 +
3746 + for (i = 0; i < priv->num_channels; i++)
3747 + dpaa2_io_store_destroy(priv->channel[i]->store);
3748 +}
3749 +
3750 +static int netdev_init(struct net_device *net_dev)
3751 +{
3752 + int err;
3753 + struct device *dev = net_dev->dev.parent;
3754 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3755 + u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
3756 + u8 bcast_addr[ETH_ALEN];
3757 + u16 rx_headroom, rx_req_headroom;
3758 +
3759 + net_dev->netdev_ops = &dpaa2_eth_ops;
3760 +
3761 + /* Get firmware address, if any */
3762 + err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
3763 + if (err) {
3764 + dev_err(dev, "dpni_get_port_mac_addr() failed (%d)\n", err);
3765 + return err;
3766 + }
3767 +
3768 + /* Get DPNI atttributes address, if any */
3769 + err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
3770 + dpni_mac_addr);
3771 + if (err) {
3772 + dev_err(dev, "dpni_get_primary_mac_addr() failed (%d)\n", err);
3773 + return err;
3774 + }
3775 +
3776 + /* First check if firmware has any address configured by bootloader */
3777 + if (!is_zero_ether_addr(mac_addr)) {
3778 + /* If the DPMAC addr != the DPNI addr, update it */
3779 + if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
3780 + err = dpni_set_primary_mac_addr(priv->mc_io, 0,
3781 + priv->mc_token,
3782 + mac_addr);
3783 + if (err) {
3784 + dev_err(dev,
3785 + "dpni_set_primary_mac_addr() failed (%d)\n",
3786 + err);
3787 + return err;
3788 + }
3789 + }
3790 + memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
3791 + } else if (is_zero_ether_addr(dpni_mac_addr)) {
3792 + /* Fills in net_dev->dev_addr, as required by
3793 + * register_netdevice()
3794 + */
3795 + eth_hw_addr_random(net_dev);
3796 + /* Make the user aware, without cluttering the boot log */
3797 + dev_dbg_once(dev, " device(s) have all-zero hwaddr, replaced with random\n");
3798 + err = dpni_set_primary_mac_addr(priv->mc_io, 0,
3799 + priv->mc_token, net_dev->dev_addr);
3800 + if (err) {
3801 + dev_err(dev,
3802 + "dpni_set_primary_mac_addr() failed (%d)\n", err);
3803 + return err;
3804 + }
3805 + /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
3806 + * practical purposes, this will be our "permanent" mac address,
3807 + * at least until the next reboot. This move will also permit
3808 + * register_netdevice() to properly fill up net_dev->perm_addr.
3809 + */
3810 + net_dev->addr_assign_type = NET_ADDR_PERM;
3811 + /* If DPMAC address is non-zero, use that one */
3812 + } else {
3813 + /* NET_ADDR_PERM is default, all we have to do is
3814 + * fill in the device addr.
3815 + */
3816 + memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len);
3817 + }
3818 +
3819 + /* Explicitly add the broadcast address to the MAC filtering table;
3820 + * the MC won't do that for us.
3821 + */
3822 + eth_broadcast_addr(bcast_addr);
3823 + err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
3824 + if (err) {
3825 + dev_warn(dev, "dpni_add_mac_addr() failed (%d)\n", err);
3826 + /* Won't return an error; at least, we'd have egress traffic */
3827 + }
3828 +
3829 + /* Reserve enough space to align buffer as per hardware requirement;
3830 + * NOTE: priv->tx_data_offset MUST be initialized at this point.
3831 + */
3832 + net_dev->needed_headroom = DPAA2_ETH_NEEDED_HEADROOM(priv);
3833 +
3834 + /* Set MTU limits */
3835 + net_dev->min_mtu = 68;
3836 + net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
3837 +
3838 + /* Required headroom for Rx skbs, to avoid reallocation on
3839 + * forwarding path.
3840 + */
3841 + rx_req_headroom = LL_RESERVED_SPACE(net_dev) - ETH_HLEN;
3842 + rx_headroom = ALIGN(DPAA2_ETH_RX_HWA_SIZE + DPAA2_ETH_SWA_SIZE +
3843 + DPAA2_ETH_RX_HEAD_ROOM, priv->rx_buf_align);
3844 + if (rx_req_headroom > rx_headroom)
3845 + dev_info_once(dev,
3846 + "Required headroom (%d) greater than available (%d).\n"
3847 + "This will impact performance due to reallocations.\n",
3848 + rx_req_headroom, rx_headroom);
3849 +
3850 + /* Our .ndo_init will be called herein */
3851 + err = register_netdev(net_dev);
3852 + if (err < 0) {
3853 + dev_err(dev, "register_netdev() failed (%d)\n", err);
3854 + return err;
3855 + }
3856 +
3857 + return 0;
3858 +}
3859 +
3860 +static int poll_link_state(void *arg)
3861 +{
3862 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
3863 + int err;
3864 +
3865 + while (!kthread_should_stop()) {
3866 + err = link_state_update(priv);
3867 + if (unlikely(err))
3868 + return err;
3869 +
3870 + msleep(DPAA2_ETH_LINK_STATE_REFRESH);
3871 + }
3872 +
3873 + return 0;
3874 +}
3875 +
3876 +static irqreturn_t dpni_irq0_handler(int irq_num, void *arg)
3877 +{
3878 + return IRQ_WAKE_THREAD;
3879 +}
3880 +
3881 +static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
3882 +{
3883 + u32 status = 0, clear = 0;
3884 + struct device *dev = (struct device *)arg;
3885 + struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
3886 + struct net_device *net_dev = dev_get_drvdata(dev);
3887 + int err;
3888 +
3889 + err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
3890 + DPNI_IRQ_INDEX, &status);
3891 + if (unlikely(err)) {
3892 + netdev_err(net_dev, "Can't get irq status (err %d)", err);
3893 + clear = 0xffffffff;
3894 + goto out;
3895 + }
3896 +
3897 + if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
3898 + clear |= DPNI_IRQ_EVENT_LINK_CHANGED;
3899 + link_state_update(netdev_priv(net_dev));
3900 + }
3901 +
3902 +out:
3903 + dpni_clear_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
3904 + DPNI_IRQ_INDEX, clear);
3905 + return IRQ_HANDLED;
3906 +}
3907 +
3908 +static int setup_irqs(struct fsl_mc_device *ls_dev)
3909 +{
3910 + int err = 0;
3911 + struct fsl_mc_device_irq *irq;
3912 +
3913 + err = fsl_mc_allocate_irqs(ls_dev);
3914 + if (err) {
3915 + dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
3916 + return err;
3917 + }
3918 +
3919 + irq = ls_dev->irqs[0];
3920 + err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
3921 + dpni_irq0_handler,
3922 + dpni_irq0_handler_thread,
3923 + IRQF_NO_SUSPEND | IRQF_ONESHOT,
3924 + dev_name(&ls_dev->dev), &ls_dev->dev);
3925 + if (err < 0) {
3926 + dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d", err);
3927 + goto free_mc_irq;
3928 + }
3929 +
3930 + err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
3931 + DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED);
3932 + if (err < 0) {
3933 + dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d", err);
3934 + goto free_irq;
3935 + }
3936 +
3937 + err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
3938 + DPNI_IRQ_INDEX, 1);
3939 + if (err < 0) {
3940 + dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d", err);
3941 + goto free_irq;
3942 + }
3943 +
3944 + return 0;
3945 +
3946 +free_irq:
3947 + devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev);
3948 +free_mc_irq:
3949 + fsl_mc_free_irqs(ls_dev);
3950 +
3951 + return err;
3952 +}
3953 +
3954 +static void add_ch_napi(struct dpaa2_eth_priv *priv)
3955 +{
3956 + int i;
3957 + struct dpaa2_eth_channel *ch;
3958 +
3959 + for (i = 0; i < priv->num_channels; i++) {
3960 + ch = priv->channel[i];
3961 + /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
3962 + netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
3963 + NAPI_POLL_WEIGHT);
3964 + }
3965 +}
3966 +
3967 +static void del_ch_napi(struct dpaa2_eth_priv *priv)
3968 +{
3969 + int i;
3970 + struct dpaa2_eth_channel *ch;
3971 +
3972 + for (i = 0; i < priv->num_channels; i++) {
3973 + ch = priv->channel[i];
3974 + netif_napi_del(&ch->napi);
3975 + }
3976 +}
3977 +
3978 +/* SysFS support */
3979 +static ssize_t dpaa2_eth_show_tx_shaping(struct device *dev,
3980 + struct device_attribute *attr,
3981 + char *buf)
3982 +{
3983 + struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
3984 + /* No MC API for getting the shaping config. We're stateful. */
3985 + struct dpni_tx_shaping_cfg *scfg = &priv->shaping_cfg;
3986 +
3987 + return sprintf(buf, "%u %hu\n", scfg->rate_limit, scfg->max_burst_size);
3988 +}
3989 +
3990 +static ssize_t dpaa2_eth_write_tx_shaping(struct device *dev,
3991 + struct device_attribute *attr,
3992 + const char *buf,
3993 + size_t count)
3994 +{
3995 + int err, items;
3996 + struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
3997 + struct dpni_tx_shaping_cfg scfg;
3998 +
3999 + items = sscanf(buf, "%u %hu", &scfg.rate_limit, &scfg.max_burst_size);
4000 + if (items != 2) {
4001 + pr_err("Expected format: \"rate_limit(Mbps) max_burst_size(bytes)\"\n");
4002 + return -EINVAL;
4003 + }
4004 + /* Size restriction as per MC API documentation */
4005 + if (scfg.max_burst_size > DPAA2_ETH_MAX_BURST_SIZE) {
4006 + pr_err("max_burst_size must be <= %d\n",
4007 + DPAA2_ETH_MAX_BURST_SIZE);
4008 + return -EINVAL;
4009 + }
4010 +
4011 + err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &scfg);
4012 + if (err) {
4013 + dev_err(dev, "dpni_set_tx_shaping() failed\n");
4014 + return -EPERM;
4015 + }
4016 + /* If successful, save the current configuration for future inquiries */
4017 + priv->shaping_cfg = scfg;
4018 +
4019 + return count;
4020 +}
4021 +
4022 +static ssize_t dpaa2_eth_show_txconf_cpumask(struct device *dev,
4023 + struct device_attribute *attr,
4024 + char *buf)
4025 +{
4026 + struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
4027 +
4028 + return cpumap_print_to_pagebuf(1, buf, &priv->txconf_cpumask);
4029 +}
4030 +
4031 +static ssize_t dpaa2_eth_write_txconf_cpumask(struct device *dev,
4032 + struct device_attribute *attr,
4033 + const char *buf,
4034 + size_t count)
4035 +{
4036 + struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
4037 + struct dpaa2_eth_fq *fq;
4038 + bool running = netif_running(priv->net_dev);
4039 + int i, err;
4040 +
4041 + err = cpulist_parse(buf, &priv->txconf_cpumask);
4042 + if (err)
4043 + return err;
4044 +
4045 + /* Only accept CPUs that have an affine DPIO */
4046 + if (!cpumask_subset(&priv->txconf_cpumask, &priv->dpio_cpumask)) {
4047 + netdev_info(priv->net_dev,
4048 + "cpumask must be a subset of 0x%lx\n",
4049 + *cpumask_bits(&priv->dpio_cpumask));
4050 + cpumask_and(&priv->txconf_cpumask, &priv->dpio_cpumask,
4051 + &priv->txconf_cpumask);
4052 + }
4053 +
4054 + /* Rewiring the TxConf FQs requires interface shutdown.
4055 + */
4056 + if (running) {
4057 + err = dpaa2_eth_stop(priv->net_dev);
4058 + if (err)
4059 + return -ENODEV;
4060 + }
4061 +
4062 + /* Set the new TxConf FQ affinities */
4063 + set_fq_affinity(priv);
4064 +
4065 + /* dpaa2_eth_open() below will *stop* the Tx queues until an explicit
4066 + * link up notification is received. Give the polling thread enough time
4067 + * to detect the link state change, or else we'll end up with the
4068 + * transmission side forever shut down.
4069 + */
4070 + if (priv->do_link_poll)
4071 + msleep(2 * DPAA2_ETH_LINK_STATE_REFRESH);
4072 +
4073 + for (i = 0; i < priv->num_fqs; i++) {
4074 + fq = &priv->fq[i];
4075 + if (fq->type != DPAA2_TX_CONF_FQ)
4076 + continue;
4077 + setup_tx_flow(priv, fq);
4078 + }
4079 +
4080 + if (running) {
4081 + err = dpaa2_eth_open(priv->net_dev);
4082 + if (err)
4083 + return -ENODEV;
4084 + }
4085 +
4086 + return count;
4087 +}
4088 +
4089 +static struct device_attribute dpaa2_eth_attrs[] = {
4090 + __ATTR(txconf_cpumask,
4091 + 0600,
4092 + dpaa2_eth_show_txconf_cpumask,
4093 + dpaa2_eth_write_txconf_cpumask),
4094 +
4095 + __ATTR(tx_shaping,
4096 + 0600,
4097 + dpaa2_eth_show_tx_shaping,
4098 + dpaa2_eth_write_tx_shaping),
4099 +};
4100 +
4101 +static void dpaa2_eth_sysfs_init(struct device *dev)
4102 +{
4103 + int i, err;
4104 +
4105 + for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++) {
4106 + err = device_create_file(dev, &dpaa2_eth_attrs[i]);
4107 + if (err) {
4108 + dev_err(dev, "ERROR creating sysfs file\n");
4109 + goto undo;
4110 + }
4111 + }
4112 + return;
4113 +
4114 +undo:
4115 + while (i > 0)
4116 + device_remove_file(dev, &dpaa2_eth_attrs[--i]);
4117 +}
4118 +
4119 +static void dpaa2_eth_sysfs_remove(struct device *dev)
4120 +{
4121 + int i;
4122 +
4123 + for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++)
4124 + device_remove_file(dev, &dpaa2_eth_attrs[i]);
4125 +}
4126 +
4127 +static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
4128 +{
4129 + struct device *dev;
4130 + struct net_device *net_dev = NULL;
4131 + struct dpaa2_eth_priv *priv = NULL;
4132 + int err = 0;
4133 +
4134 + dev = &dpni_dev->dev;
4135 +
4136 + /* Net device */
4137 + net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES);
4138 + if (!net_dev) {
4139 + dev_err(dev, "alloc_etherdev_mq() failed\n");
4140 + return -ENOMEM;
4141 + }
4142 +
4143 + SET_NETDEV_DEV(net_dev, dev);
4144 + dev_set_drvdata(dev, net_dev);
4145 +
4146 + priv = netdev_priv(net_dev);
4147 + priv->net_dev = net_dev;
4148 +
4149 + priv->iommu_domain = iommu_get_domain_for_dev(dev);
4150 +
4151 + /* Obtain a MC portal */
4152 + err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
4153 + &priv->mc_io);
4154 + if (err) {
4155 + dev_err(dev, "MC portal allocation failed\n");
4156 + goto err_portal_alloc;
4157 + }
4158 +
4159 + /* MC objects initialization and configuration */
4160 + err = setup_dpni(dpni_dev);
4161 + if (err)
4162 + goto err_dpni_setup;
4163 +
4164 + err = setup_dpio(priv);
4165 + if (err) {
4166 + dev_info(dev, "Defer probing as no DPIO available\n");
4167 + err = -EPROBE_DEFER;
4168 + goto err_dpio_setup;
4169 + }
4170 +
4171 + setup_fqs(priv);
4172 +
4173 + err = setup_dpbp(priv);
4174 + if (err)
4175 + goto err_dpbp_setup;
4176 +
4177 + err = bind_dpni(priv);
4178 + if (err)
4179 + goto err_bind;
4180 +
4181 + /* Add a NAPI context for each channel */
4182 + add_ch_napi(priv);
4183 + enable_ch_napi(priv);
4184 +
4185 + /* Percpu statistics */
4186 + priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
4187 + if (!priv->percpu_stats) {
4188 + dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
4189 + err = -ENOMEM;
4190 + goto err_alloc_percpu_stats;
4191 + }
4192 + priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
4193 + if (!priv->percpu_extras) {
4194 + dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
4195 + err = -ENOMEM;
4196 + goto err_alloc_percpu_extras;
4197 + }
4198 +
4199 + snprintf(net_dev->name, IFNAMSIZ, "ni%d", dpni_dev->obj_desc.id);
4200 + if (!dev_valid_name(net_dev->name)) {
4201 + dev_warn(&net_dev->dev,
4202 + "netdevice name \"%s\" cannot be used, reverting to default..\n",
4203 + net_dev->name);
4204 + dev_alloc_name(net_dev, "eth%d");
4205 + dev_warn(&net_dev->dev, "using name \"%s\"\n", net_dev->name);
4206 + }
4207 +
4208 + err = netdev_init(net_dev);
4209 + if (err)
4210 + goto err_netdev_init;
4211 +
4212 + /* Configure checksum offload based on current interface flags */
4213 + err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
4214 + if (err)
4215 + goto err_csum;
4216 +
4217 + err = set_tx_csum(priv, !!(net_dev->features &
4218 + (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
4219 + if (err)
4220 + goto err_csum;
4221 +
4222 + err = alloc_rings(priv);
4223 + if (err)
4224 + goto err_alloc_rings;
4225 +
4226 + net_dev->ethtool_ops = &dpaa2_ethtool_ops;
4227 +
4228 + err = setup_irqs(dpni_dev);
4229 + if (err) {
4230 + netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
4231 + priv->poll_thread = kthread_run(poll_link_state, priv,
4232 + "%s_poll_link", net_dev->name);
4233 + if (IS_ERR(priv->poll_thread)) {
4234 + netdev_err(net_dev, "Error starting polling thread\n");
4235 + goto err_poll_thread;
4236 + }
4237 + priv->do_link_poll = true;
4238 + }
4239 +
4240 + dpaa2_eth_sysfs_init(&net_dev->dev);
4241 +#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
4242 + dpaa2_dbg_add(priv);
4243 +#endif
4244 +
4245 + dev_info(dev, "Probed interface %s\n", net_dev->name);
4246 + return 0;
4247 +
4248 +err_poll_thread:
4249 + free_rings(priv);
4250 +err_alloc_rings:
4251 +err_csum:
4252 + unregister_netdev(net_dev);
4253 +err_netdev_init:
4254 + free_percpu(priv->percpu_extras);
4255 +err_alloc_percpu_extras:
4256 + free_percpu(priv->percpu_stats);
4257 +err_alloc_percpu_stats:
4258 + disable_ch_napi(priv);
4259 + del_ch_napi(priv);
4260 +err_bind:
4261 + free_dpbp(priv);
4262 +err_dpbp_setup:
4263 + free_dpio(priv);
4264 +err_dpio_setup:
4265 + free_dpni(priv);
4266 +err_dpni_setup:
4267 + fsl_mc_portal_free(priv->mc_io);
4268 +err_portal_alloc:
4269 + dev_set_drvdata(dev, NULL);
4270 + free_netdev(net_dev);
4271 +
4272 + return err;
4273 +}
4274 +
4275 +static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
4276 +{
4277 + struct device *dev;
4278 + struct net_device *net_dev;
4279 + struct dpaa2_eth_priv *priv;
4280 +
4281 + dev = &ls_dev->dev;
4282 + net_dev = dev_get_drvdata(dev);
4283 + priv = netdev_priv(net_dev);
4284 +
4285 +#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
4286 + dpaa2_dbg_remove(priv);
4287 +#endif
4288 + dpaa2_eth_sysfs_remove(&net_dev->dev);
4289 +
4290 + unregister_netdev(net_dev);
4291 + dev_info(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
4292 +
4293 + if (priv->do_link_poll)
4294 + kthread_stop(priv->poll_thread);
4295 + else
4296 + fsl_mc_free_irqs(ls_dev);
4297 +
4298 + free_rings(priv);
4299 + free_percpu(priv->percpu_stats);
4300 + free_percpu(priv->percpu_extras);
4301 +
4302 + disable_ch_napi(priv);
4303 + del_ch_napi(priv);
4304 + free_dpbp(priv);
4305 + free_dpio(priv);
4306 + free_dpni(priv);
4307 +
4308 + fsl_mc_portal_free(priv->mc_io);
4309 +
4310 + dev_set_drvdata(dev, NULL);
4311 + free_netdev(net_dev);
4312 +
4313 + return 0;
4314 +}
4315 +
4316 +static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
4317 + {
4318 + .vendor = FSL_MC_VENDOR_FREESCALE,
4319 + .obj_type = "dpni",
4320 + },
4321 + { .vendor = 0x0 }
4322 +};
4323 +MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
4324 +
4325 +static struct fsl_mc_driver dpaa2_eth_driver = {
4326 + .driver = {
4327 + .name = KBUILD_MODNAME,
4328 + .owner = THIS_MODULE,
4329 + },
4330 + .probe = dpaa2_eth_probe,
4331 + .remove = dpaa2_eth_remove,
4332 + .match_id_table = dpaa2_eth_match_id_table
4333 +};
4334 +
4335 +static int __init dpaa2_eth_driver_init(void)
4336 +{
4337 + int err;
4338 +
4339 + dpaa2_eth_dbg_init();
4340 + err = fsl_mc_driver_register(&dpaa2_eth_driver);
4341 + if (err) {
4342 + dpaa2_eth_dbg_exit();
4343 + return err;
4344 + }
4345 +
4346 + return 0;
4347 +}
4348 +
4349 +static void __exit dpaa2_eth_driver_exit(void)
4350 +{
4351 + dpaa2_eth_dbg_exit();
4352 + fsl_mc_driver_unregister(&dpaa2_eth_driver);
4353 +}
4354 +
4355 +module_init(dpaa2_eth_driver_init);
4356 +module_exit(dpaa2_eth_driver_exit);
4357 --- /dev/null
4358 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
4359 @@ -0,0 +1,460 @@
4360 +/* Copyright 2014-2015 Freescale Semiconductor Inc.
4361 + *
4362 + * Redistribution and use in source and binary forms, with or without
4363 + * modification, are permitted provided that the following conditions are met:
4364 + * * Redistributions of source code must retain the above copyright
4365 + * notice, this list of conditions and the following disclaimer.
4366 + * * Redistributions in binary form must reproduce the above copyright
4367 + * notice, this list of conditions and the following disclaimer in the
4368 + * documentation and/or other materials provided with the distribution.
4369 + * * Neither the name of Freescale Semiconductor nor the
4370 + * names of its contributors may be used to endorse or promote products
4371 + * derived from this software without specific prior written permission.
4372 + *
4373 + *
4374 + * ALTERNATIVELY, this software may be distributed under the terms of the
4375 + * GNU General Public License ("GPL") as published by the Free Software
4376 + * Foundation, either version 2 of that License or (at your option) any
4377 + * later version.
4378 + *
4379 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
4380 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
4381 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
4382 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
4383 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
4384 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
4385 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
4386 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
4387 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
4388 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
4389 + */
4390 +
4391 +#ifndef __DPAA2_ETH_H
4392 +#define __DPAA2_ETH_H
4393 +
4394 +#include <linux/atomic.h>
4395 +#include <linux/netdevice.h>
4396 +#include <linux/if_vlan.h>
4397 +#include "../../fsl-mc/include/dpaa2-io.h"
4398 +#include "dpni.h"
4399 +#include "net.h"
4400 +
4401 +#include "dpaa2-eth-debugfs.h"
4402 +
4403 +#define DPAA2_ETH_STORE_SIZE 16
4404 +
4405 +/* We set a max threshold for how many Tx confirmations we should process
4406 + * on a NAPI poll call, they take less processing time.
4407 + */
4408 +#define TX_CONF_PER_NAPI_POLL 256
4409 +
4410 +/* Maximum number of scatter-gather entries in an ingress frame,
4411 + * considering the maximum receive frame size is 64K
4412 + */
4413 +#define DPAA2_ETH_MAX_SG_ENTRIES ((64 * 1024) / DPAA2_ETH_RX_BUF_SIZE)
4414 +
4415 +/* Maximum acceptable MTU value. It is in direct relation with the hardware
4416 + * enforced Max Frame Length (currently 10k).
4417 + */
4418 +#define DPAA2_ETH_MFL (10 * 1024)
4419 +#define DPAA2_ETH_MAX_MTU (DPAA2_ETH_MFL - VLAN_ETH_HLEN)
4420 +/* Convert L3 MTU to L2 MFL */
4421 +#define DPAA2_ETH_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN)
4422 +
4423 +/* Maximum burst size value for Tx shaping */
4424 +#define DPAA2_ETH_MAX_BURST_SIZE 0xF7FF
4425 +
4426 +/* Maximum number of buffers that can be acquired/released through a single
4427 + * QBMan command
4428 + */
4429 +#define DPAA2_ETH_BUFS_PER_CMD 7
4430 +
4431 +/* Set the taildrop threshold (in bytes) to allow the enqueue of several jumbo
4432 + * frames in the Rx queues (length of the current frame is not
4433 + * taken into account when making the taildrop decision)
4434 + */
4435 +#define DPAA2_ETH_TAILDROP_THRESH (64 * 1024)
4436 +
4437 +/* Buffer quota per queue. Must be large enough such that for minimum sized
4438 + * frames taildrop kicks in before the bpool gets depleted, so we compute
4439 + * how many 64B frames fit inside the taildrop threshold and add a margin
4440 + * to accommodate the buffer refill delay.
4441 + */
4442 +#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64)
4443 +#define DPAA2_ETH_NUM_BUFS_TD (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256)
4444 +#define DPAA2_ETH_REFILL_THRESH_TD \
4445 + (DPAA2_ETH_NUM_BUFS_TD - DPAA2_ETH_BUFS_PER_CMD)
4446 +
4447 +/* Buffer quota per queue to use when flow control is active. */
4448 +#define DPAA2_ETH_NUM_BUFS_FC 256
4449 +
4450 +/* Hardware requires alignment for ingress/egress buffer addresses
4451 + * and ingress buffer lengths.
4452 + */
4453 +#define DPAA2_ETH_RX_BUF_SIZE 2048
4454 +#define DPAA2_ETH_TX_BUF_ALIGN 64
4455 +#define DPAA2_ETH_RX_BUF_ALIGN 64
4456 +#define DPAA2_ETH_RX_BUF_ALIGN_V1 256
4457 +#define DPAA2_ETH_NEEDED_HEADROOM(p_priv) \
4458 + ((p_priv)->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN)
4459 +
4460 +/* rx_extra_head prevents reallocations in L3 processing. */
4461 +#define DPAA2_ETH_SKB_SIZE \
4462 + (DPAA2_ETH_RX_BUF_SIZE + \
4463 + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
4464 +
4465 +/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but we need to allocate ingress
4466 + * buffers large enough to allow building an skb around them and also account
4467 + * for alignment restrictions.
4468 + */
4469 +#define DPAA2_ETH_BUF_RAW_SIZE(p_priv) \
4470 + (DPAA2_ETH_SKB_SIZE + \
4471 + (p_priv)->rx_buf_align)
4472 +
4473 +/* PTP nominal frequency 1GHz */
4474 +#define DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS 1
4475 +
4476 +/* Leave enough extra space in the headroom to make sure the skb is
4477 + * not realloc'd in forwarding scenarios.
4478 + */
4479 +#define DPAA2_ETH_RX_HEAD_ROOM 192
4480 +
4481 +/* We are accommodating a skb backpointer and some S/G info
4482 + * in the frame's software annotation. The hardware
4483 + * options are either 0 or 64, so we choose the latter.
4484 + */
4485 +#define DPAA2_ETH_SWA_SIZE 64
4486 +
4487 +/* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */
4488 +struct dpaa2_eth_swa {
4489 + struct sk_buff *skb;
4490 + struct scatterlist *scl;
4491 + int num_sg;
4492 + int num_dma_bufs;
4493 +};
4494 +
4495 +/* Annotation valid bits in FD FRC */
4496 +#define DPAA2_FD_FRC_FASV 0x8000
4497 +#define DPAA2_FD_FRC_FAEADV 0x4000
4498 +#define DPAA2_FD_FRC_FAPRV 0x2000
4499 +#define DPAA2_FD_FRC_FAIADV 0x1000
4500 +#define DPAA2_FD_FRC_FASWOV 0x0800
4501 +#define DPAA2_FD_FRC_FAICFDV 0x0400
4502 +
4503 +#define DPAA2_FD_RX_ERR_MASK (FD_CTRL_SBE | FD_CTRL_FAERR)
4504 +#define DPAA2_FD_TX_ERR_MASK (FD_CTRL_UFD | \
4505 + FD_CTRL_SBE | \
4506 + FD_CTRL_FSE | \
4507 + FD_CTRL_FAERR)
4508 +
4509 +/* Annotation bits in FD CTRL */
4510 +#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128 */
4511 +
4512 +/* Size of hardware annotation area based on the current buffer layout
4513 + * configuration
4514 + */
4515 +#define DPAA2_ETH_RX_HWA_SIZE 64
4516 +#define DPAA2_ETH_TX_HWA_SIZE 128
4517 +
4518 +/* Frame annotation status */
4519 +struct dpaa2_fas {
4520 + u8 reserved;
4521 + u8 ppid;
4522 + __le16 ifpid;
4523 + __le32 status;
4524 +} __packed;
4525 +
4526 +/* Frame annotation status word is located in the first 8 bytes
4527 + * of the buffer's hardware annotation area
4528 + */
4529 +#define DPAA2_FAS_OFFSET 0
4530 +#define DPAA2_FAS_SIZE (sizeof(struct dpaa2_fas))
4531 +
4532 +/* Timestamp is located in the next 8 bytes of the buffer's
4533 + * hardware annotation area
4534 + */
4535 +#define DPAA2_TS_OFFSET 0x8
4536 +
4537 +/* Frame annotation egress action descriptor */
4538 +#define DPAA2_FAEAD_OFFSET 0x58
4539 +
4540 +struct dpaa2_faead {
4541 + __le32 conf_fqid;
4542 + __le32 ctrl;
4543 +};
4544 +
4545 +#define DPAA2_FAEAD_A2V 0x20000000
4546 +#define DPAA2_FAEAD_UPDV 0x00001000
4547 +#define DPAA2_FAEAD_UPD 0x00000010
4548 +
4549 +/* accessors for the hardware annotation fields that we use */
4550 +#define dpaa2_eth_get_hwa(buf_addr) \
4551 + ((void *)(buf_addr) + DPAA2_ETH_SWA_SIZE)
4552 +
4553 +#define dpaa2_eth_get_fas(buf_addr) \
4554 + (struct dpaa2_fas *)(dpaa2_eth_get_hwa(buf_addr) + DPAA2_FAS_OFFSET)
4555 +
4556 +#define dpaa2_eth_get_ts(buf_addr) \
4557 + (u64 *)(dpaa2_eth_get_hwa(buf_addr) + DPAA2_TS_OFFSET)
4558 +
4559 +#define dpaa2_eth_get_faead(buf_addr) \
4560 + (struct dpaa2_faead *)(dpaa2_eth_get_hwa(buf_addr) + DPAA2_FAEAD_OFFSET)
4561 +
4562 +/* Error and status bits in the frame annotation status word */
4563 +/* Debug frame, otherwise supposed to be discarded */
4564 +#define DPAA2_FAS_DISC 0x80000000
4565 +/* MACSEC frame */
4566 +#define DPAA2_FAS_MS 0x40000000
4567 +#define DPAA2_FAS_PTP 0x08000000
4568 +/* Ethernet multicast frame */
4569 +#define DPAA2_FAS_MC 0x04000000
4570 +/* Ethernet broadcast frame */
4571 +#define DPAA2_FAS_BC 0x02000000
4572 +#define DPAA2_FAS_KSE 0x00040000
4573 +#define DPAA2_FAS_EOFHE 0x00020000
4574 +#define DPAA2_FAS_MNLE 0x00010000
4575 +#define DPAA2_FAS_TIDE 0x00008000
4576 +#define DPAA2_FAS_PIEE 0x00004000
4577 +/* Frame length error */
4578 +#define DPAA2_FAS_FLE 0x00002000
4579 +/* Frame physical error */
4580 +#define DPAA2_FAS_FPE 0x00001000
4581 +#define DPAA2_FAS_PTE 0x00000080
4582 +#define DPAA2_FAS_ISP 0x00000040
4583 +#define DPAA2_FAS_PHE 0x00000020
4584 +#define DPAA2_FAS_BLE 0x00000010
4585 +/* L3 csum validation performed */
4586 +#define DPAA2_FAS_L3CV 0x00000008
4587 +/* L3 csum error */
4588 +#define DPAA2_FAS_L3CE 0x00000004
4589 +/* L4 csum validation performed */
4590 +#define DPAA2_FAS_L4CV 0x00000002
4591 +/* L4 csum error */
4592 +#define DPAA2_FAS_L4CE 0x00000001
4593 +/* Possible errors on the ingress path */
4594 +#define DPAA2_FAS_RX_ERR_MASK ((DPAA2_FAS_KSE) | \
4595 + (DPAA2_FAS_EOFHE) | \
4596 + (DPAA2_FAS_MNLE) | \
4597 + (DPAA2_FAS_TIDE) | \
4598 + (DPAA2_FAS_PIEE) | \
4599 + (DPAA2_FAS_FLE) | \
4600 + (DPAA2_FAS_FPE) | \
4601 + (DPAA2_FAS_PTE) | \
4602 + (DPAA2_FAS_ISP) | \
4603 + (DPAA2_FAS_PHE) | \
4604 + (DPAA2_FAS_BLE) | \
4605 + (DPAA2_FAS_L3CE) | \
4606 + (DPAA2_FAS_L4CE))
4607 +/* Tx errors */
4608 +#define DPAA2_FAS_TX_ERR_MASK ((DPAA2_FAS_KSE) | \
4609 + (DPAA2_FAS_EOFHE) | \
4610 + (DPAA2_FAS_MNLE) | \
4611 + (DPAA2_FAS_TIDE))
4612 +
4613 +/* Time in milliseconds between link state updates */
4614 +#define DPAA2_ETH_LINK_STATE_REFRESH 1000
4615 +
4616 +/* Number of times to retry a frame enqueue before giving up.
4617 + * Value determined empirically, in order to minimize the number
4618 + * of frames dropped on Tx
4619 + */
4620 +#define DPAA2_ETH_ENQUEUE_RETRIES 10
4621 +
4622 +/* Tx congestion entry & exit thresholds, in number of bytes.
4623 + * We allow a maximum of 512KB worth of frames pending processing on the Tx
4624 + * queues of an interface
4625 + */
4626 +#define DPAA2_ETH_TX_CONG_ENTRY_THRESH (512 * 1024)
4627 +#define DPAA2_ETH_TX_CONG_EXIT_THRESH (DPAA2_ETH_TX_CONG_ENTRY_THRESH * 9/10)
4628 +
4629 +/* Driver statistics, other than those in struct rtnl_link_stats64.
4630 + * These are usually collected per-CPU and aggregated by ethtool.
4631 + */
4632 +struct dpaa2_eth_drv_stats {
4633 + __u64 tx_conf_frames;
4634 + __u64 tx_conf_bytes;
4635 + __u64 tx_sg_frames;
4636 + __u64 tx_sg_bytes;
4637 + __u64 rx_sg_frames;
4638 + __u64 rx_sg_bytes;
4639 + /* Enqueues retried due to portal busy */
4640 + __u64 tx_portal_busy;
4641 +};
4642 +
4643 +/* Per-FQ statistics */
4644 +struct dpaa2_eth_fq_stats {
4645 + /* Number of frames received on this queue */
4646 + __u64 frames;
4647 + /* Number of times this queue entered congestion */
4648 + __u64 congestion_entry;
4649 +};
4650 +
4651 +/* Per-channel statistics */
4652 +struct dpaa2_eth_ch_stats {
4653 + /* Volatile dequeues retried due to portal busy */
4654 + __u64 dequeue_portal_busy;
4655 + /* Number of CDANs; useful to estimate avg NAPI len */
4656 + __u64 cdan;
4657 + /* Number of frames received on queues from this channel */
4658 + __u64 frames;
4659 + /* Pull errors */
4660 + __u64 pull_err;
4661 +};
4662 +
4663 +/* Maximum number of queues associated with a DPNI */
4664 +#define DPAA2_ETH_MAX_RX_QUEUES 16
4665 +#define DPAA2_ETH_MAX_TX_QUEUES NR_CPUS
4666 +#define DPAA2_ETH_MAX_RX_ERR_QUEUES 1
4667 +#define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \
4668 + DPAA2_ETH_MAX_TX_QUEUES + \
4669 + DPAA2_ETH_MAX_RX_ERR_QUEUES)
4670 +
4671 +#define DPAA2_ETH_MAX_DPCONS NR_CPUS
4672 +
4673 +enum dpaa2_eth_fq_type {
4674 + DPAA2_RX_FQ = 0,
4675 + DPAA2_TX_CONF_FQ,
4676 + DPAA2_RX_ERR_FQ
4677 +};
4678 +
4679 +struct dpaa2_eth_priv;
4680 +
4681 +struct dpaa2_eth_fq {
4682 + u32 fqid;
4683 + u32 tx_qdbin;
4684 + u16 flowid;
4685 + int target_cpu;
4686 + struct dpaa2_eth_channel *channel;
4687 + enum dpaa2_eth_fq_type type;
4688 +
4689 + void (*consume)(struct dpaa2_eth_priv *,
4690 + struct dpaa2_eth_channel *,
4691 + const struct dpaa2_fd *,
4692 + struct napi_struct *,
4693 + u16 queue_id);
4694 + struct dpaa2_eth_fq_stats stats;
4695 +};
4696 +
4697 +struct dpaa2_eth_channel {
4698 + struct dpaa2_io_notification_ctx nctx;
4699 + struct fsl_mc_device *dpcon;
4700 + int dpcon_id;
4701 + int ch_id;
4702 + int dpio_id;
4703 + struct napi_struct napi;
4704 + struct dpaa2_io_store *store;
4705 + struct dpaa2_eth_priv *priv;
4706 + int buf_count;
4707 + struct dpaa2_eth_ch_stats stats;
4708 +};
4709 +
4710 +struct dpaa2_eth_cls_rule {
4711 + struct ethtool_rx_flow_spec fs;
4712 + bool in_use;
4713 +};
4714 +
4715 +struct dpaa2_eth_hash_fields {
4716 + u64 rxnfc_field;
4717 + enum net_prot cls_prot;
4718 + int cls_field;
4719 + int offset;
4720 + int size;
4721 +};
4722 +
4723 +/* Driver private data */
4724 +struct dpaa2_eth_priv {
4725 + struct net_device *net_dev;
4726 +
4727 + /* Standard statistics */
4728 + struct rtnl_link_stats64 __percpu *percpu_stats;
4729 + /* Extra stats, in addition to the ones known by the kernel */
4730 + struct dpaa2_eth_drv_stats __percpu *percpu_extras;
4731 + struct iommu_domain *iommu_domain;
4732 +
4733 + bool ts_tx_en; /* Tx timestamping enabled */
4734 + bool ts_rx_en; /* Rx timestamping enabled */
4735 +
4736 + u16 tx_data_offset;
4737 + u16 rx_buf_align;
4738 +
4739 + u16 bpid;
4740 + u16 tx_qdid;
4741 +
4742 + int tx_pause_frames;
4743 + int num_bufs;
4744 + int refill_thresh;
4745 +
4746 + /* Tx congestion notifications are written here */
4747 + void *cscn_mem;
4748 + void *cscn_unaligned;
4749 + dma_addr_t cscn_dma;
4750 +
4751 + u8 num_fqs;
4752 + /* Tx queues are at the beginning of the array */
4753 + struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES];
4754 +
4755 + u8 num_channels;
4756 + struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS];
4757 +
4758 + int dpni_id;
4759 + struct dpni_attr dpni_attrs;
4760 + struct fsl_mc_device *dpbp_dev;
4761 +
4762 + struct fsl_mc_io *mc_io;
4763 + /* SysFS-controlled affinity mask for TxConf FQs */
4764 + struct cpumask txconf_cpumask;
4765 + /* Cores which have an affine DPIO/DPCON.
4766 + * This is the cpu set on which Rx frames are processed;
4767 + * Tx confirmation frames are processed on a subset of this,
4768 + * depending on user settings.
4769 + */
4770 + struct cpumask dpio_cpumask;
4771 +
4772 + u16 mc_token;
4773 +
4774 + struct dpni_link_state link_state;
4775 + bool do_link_poll;
4776 + struct task_struct *poll_thread;
4777 +
4778 + struct dpaa2_eth_hash_fields *hash_fields;
4779 + u8 num_hash_fields;
4780 + /* enabled ethtool hashing bits */
4781 + u64 rx_flow_hash;
4782 +
4783 +#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
4784 + struct dpaa2_debugfs dbg;
4785 +#endif
4786 +
4787 + /* array of classification rules */
4788 + struct dpaa2_eth_cls_rule *cls_rule;
4789 +
4790 + struct dpni_tx_shaping_cfg shaping_cfg;
4791 +};
4792 +
4793 +#define dpaa2_eth_hash_enabled(priv) \
4794 + ((priv)->dpni_attrs.num_queues > 1)
4795 +
4796 +#define dpaa2_eth_fs_enabled(priv) \
4797 + (!((priv)->dpni_attrs.options & DPNI_OPT_NO_FS))
4798 +
4799 +#define dpaa2_eth_fs_mask_enabled(priv) \
4800 + ((priv)->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)
4801 +
4802 +#define dpaa2_eth_fs_count(priv) \
4803 + ((priv)->dpni_attrs.fs_entries)
4804 +
4805 +/* size of DMA memory used to pass configuration to classifier, in bytes */
4806 +#define DPAA2_CLASSIFIER_DMA_SIZE 256
4807 +
4808 +extern const struct ethtool_ops dpaa2_ethtool_ops;
4809 +extern const char dpaa2_eth_drv_version[];
4810 +
4811 +static inline int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv)
4812 +{
4813 + return priv->dpni_attrs.num_queues;
4814 +}
4815 +
4816 +void check_cls_support(struct dpaa2_eth_priv *priv);
4817 +
4818 +int setup_fqs_taildrop(struct dpaa2_eth_priv *priv, bool enable);
4819 +#endif /* __DPAA2_H */
4820 --- /dev/null
4821 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
4822 @@ -0,0 +1,856 @@
4823 +/* Copyright 2014-2015 Freescale Semiconductor Inc.
4824 + *
4825 + * Redistribution and use in source and binary forms, with or without
4826 + * modification, are permitted provided that the following conditions are met:
4827 + * * Redistributions of source code must retain the above copyright
4828 + * notice, this list of conditions and the following disclaimer.
4829 + * * Redistributions in binary form must reproduce the above copyright
4830 + * notice, this list of conditions and the following disclaimer in the
4831 + * documentation and/or other materials provided with the distribution.
4832 + * * Neither the name of Freescale Semiconductor nor the
4833 + * names of its contributors may be used to endorse or promote products
4834 + * derived from this software without specific prior written permission.
4835 + *
4836 + *
4837 + * ALTERNATIVELY, this software may be distributed under the terms of the
4838 + * GNU General Public License ("GPL") as published by the Free Software
4839 + * Foundation, either version 2 of that License or (at your option) any
4840 + * later version.
4841 + *
4842 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
4843 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
4844 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
4845 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
4846 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
4847 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
4848 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
4849 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
4850 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
4851 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
4852 + */
4853 +
4854 +#include "dpni.h" /* DPNI_LINK_OPT_* */
4855 +#include "dpaa2-eth.h"
4856 +
4857 +/* To be kept in sync with dpni_statistics */
4858 +static char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = {
4859 + "rx frames",
4860 + "rx bytes",
4861 + "rx mcast frames",
4862 + "rx mcast bytes",
4863 + "rx bcast frames",
4864 + "rx bcast bytes",
4865 + "tx frames",
4866 + "tx bytes",
4867 + "tx mcast frames",
4868 + "tx mcast bytes",
4869 + "tx bcast frames",
4870 + "tx bcast bytes",
4871 + "rx filtered frames",
4872 + "rx discarded frames",
4873 + "rx nobuffer discards",
4874 + "tx discarded frames",
4875 + "tx confirmed frames",
4876 +};
4877 +
4878 +#define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats)
4879 +
4880 +/* To be kept in sync with 'struct dpaa2_eth_drv_stats' */
4881 +static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
4882 + /* per-cpu stats */
4883 +
4884 + "tx conf frames",
4885 + "tx conf bytes",
4886 + "tx sg frames",
4887 + "tx sg bytes",
4888 + "rx sg frames",
4889 + "rx sg bytes",
4890 + /* how many times we had to retry the enqueue command */
4891 + "enqueue portal busy",
4892 +
4893 + /* Channel stats */
4894 + /* How many times we had to retry the volatile dequeue command */
4895 + "dequeue portal busy",
4896 + "channel pull errors",
4897 + /* Number of notifications received */
4898 + "cdan",
4899 + "tx congestion state",
4900 +#ifdef CONFIG_FSL_QBMAN_DEBUG
4901 + /* FQ stats */
4902 + "rx pending frames",
4903 + "rx pending bytes",
4904 + "tx conf pending frames",
4905 + "tx conf pending bytes",
4906 + "buffer count"
4907 +#endif
4908 +};
4909 +
4910 +#define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras)
4911 +
4912 +static void dpaa2_eth_get_drvinfo(struct net_device *net_dev,
4913 + struct ethtool_drvinfo *drvinfo)
4914 +{
4915 + strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
4916 + strlcpy(drvinfo->version, dpaa2_eth_drv_version,
4917 + sizeof(drvinfo->version));
4918 + strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
4919 + strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
4920 + sizeof(drvinfo->bus_info));
4921 +}
4922 +
4923 +static int dpaa2_eth_get_settings(struct net_device *net_dev,
4924 + struct ethtool_cmd *cmd)
4925 +{
4926 + struct dpni_link_state state = {0};
4927 + int err = 0;
4928 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4929 +
4930 + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
4931 + if (err) {
4932 + netdev_err(net_dev, "ERROR %d getting link state", err);
4933 + goto out;
4934 + }
4935 +
4936 + /* At the moment, we have no way of interrogating the DPMAC
4937 + * from the DPNI side - and for that matter there may exist
4938 + * no DPMAC at all. So for now we just don't report anything
4939 + * beyond the DPNI attributes.
4940 + */
4941 + if (state.options & DPNI_LINK_OPT_AUTONEG)
4942 + cmd->autoneg = AUTONEG_ENABLE;
4943 + if (!(state.options & DPNI_LINK_OPT_HALF_DUPLEX))
4944 + cmd->duplex = DUPLEX_FULL;
4945 + ethtool_cmd_speed_set(cmd, state.rate);
4946 +
4947 +out:
4948 + return err;
4949 +}
4950 +
4951 +static int dpaa2_eth_set_settings(struct net_device *net_dev,
4952 + struct ethtool_cmd *cmd)
4953 +{
4954 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4955 + struct dpni_link_state state = {0};
4956 + struct dpni_link_cfg cfg = {0};
4957 + int err = 0;
4958 +
4959 + netdev_dbg(net_dev, "Setting link parameters...");
4960 +
4961 + /* Need to interrogate on link state to get flow control params */
4962 + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
4963 + if (err) {
4964 + netdev_err(net_dev, "ERROR %d getting link state", err);
4965 + goto out;
4966 + }
4967 +
4968 + cfg.options = state.options;
4969 + cfg.rate = ethtool_cmd_speed(cmd);
4970 + if (cmd->autoneg == AUTONEG_ENABLE)
4971 + cfg.options |= DPNI_LINK_OPT_AUTONEG;
4972 + else
4973 + cfg.options &= ~DPNI_LINK_OPT_AUTONEG;
4974 + if (cmd->duplex == DUPLEX_HALF)
4975 + cfg.options |= DPNI_LINK_OPT_HALF_DUPLEX;
4976 + else
4977 + cfg.options &= ~DPNI_LINK_OPT_HALF_DUPLEX;
4978 +
4979 + err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
4980 + if (err)
4981 + /* ethtool will be loud enough if we return an error; no point
4982 + * in putting our own error message on the console by default
4983 + */
4984 + netdev_dbg(net_dev, "ERROR %d setting link cfg", err);
4985 +
4986 +out:
4987 + return err;
4988 +}
4989 +
4990 +static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
4991 + struct ethtool_pauseparam *pause)
4992 +{
4993 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4994 + struct dpni_link_state state = {0};
4995 + int err;
4996 +
4997 + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
4998 + if (err)
4999 + netdev_dbg(net_dev, "ERROR %d getting link state", err);
5000 +
5001 + /* for now, pause frames autonegotiation is not separate */
5002 + pause->autoneg = !!(state.options & DPNI_LINK_OPT_AUTONEG);
5003 + pause->rx_pause = !!(state.options & DPNI_LINK_OPT_PAUSE);
5004 + pause->tx_pause = pause->rx_pause ^
5005 + !!(state.options & DPNI_LINK_OPT_ASYM_PAUSE);
5006 +}
5007 +
5008 +static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
5009 + struct ethtool_pauseparam *pause)
5010 +{
5011 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5012 + struct dpni_link_state state = {0};
5013 + struct dpni_link_cfg cfg = {0};
5014 + u32 current_tx_pause;
5015 + int err = 0;
5016 +
5017 + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
5018 + if (err) {
5019 + netdev_dbg(net_dev, "ERROR %d getting link state", err);
5020 + goto out;
5021 + }
5022 +
5023 + cfg.rate = state.rate;
5024 + cfg.options = state.options;
5025 + current_tx_pause = !!(cfg.options & DPNI_LINK_OPT_PAUSE) ^
5026 + !!(cfg.options & DPNI_LINK_OPT_ASYM_PAUSE);
5027 +
5028 + if (pause->autoneg != !!(state.options & DPNI_LINK_OPT_AUTONEG))
5029 + netdev_warn(net_dev,
5030 + "WARN: Can't change pause frames autoneg separately\n");
5031 +
5032 + if (pause->rx_pause)
5033 + cfg.options |= DPNI_LINK_OPT_PAUSE;
5034 + else
5035 + cfg.options &= ~DPNI_LINK_OPT_PAUSE;
5036 +
5037 + if (pause->rx_pause ^ pause->tx_pause)
5038 + cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
5039 + else
5040 + cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
5041 +
5042 + err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
5043 + if (err) {
5044 + /* ethtool will be loud enough if we return an error; no point
5045 + * in putting our own error message on the console by default
5046 + */
5047 + netdev_dbg(net_dev, "ERROR %d setting link cfg", err);
5048 + goto out;
5049 + }
5050 +
5051 + /* Enable / disable taildrops if Tx pause frames have changed */
5052 + if (current_tx_pause == pause->tx_pause)
5053 + goto out;
5054 +
5055 + err = setup_fqs_taildrop(priv, !pause->tx_pause);
5056 + if (err)
5057 + netdev_dbg(net_dev, "ERROR %d configuring taildrop", err);
5058 +
5059 + priv->tx_pause_frames = pause->tx_pause;
5060 +out:
5061 + return err;
5062 +}
5063 +
5064 +static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
5065 + u8 *data)
5066 +{
5067 + u8 *p = data;
5068 + int i;
5069 +
5070 + switch (stringset) {
5071 + case ETH_SS_STATS:
5072 + for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) {
5073 + strlcpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN);
5074 + p += ETH_GSTRING_LEN;
5075 + }
5076 + for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) {
5077 + strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN);
5078 + p += ETH_GSTRING_LEN;
5079 + }
5080 + break;
5081 + }
5082 +}
5083 +
5084 +static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset)
5085 +{
5086 + switch (sset) {
5087 + case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */
5088 + return DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS;
5089 + default:
5090 + return -EOPNOTSUPP;
5091 + }
5092 +}
5093 +
5094 +/** Fill in hardware counters, as returned by MC.
5095 + */
5096 +static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
5097 + struct ethtool_stats *stats,
5098 + u64 *data)
5099 +{
5100 + int i = 0; /* Current index in the data array */
5101 + int j = 0, k, err;
5102 + union dpni_statistics dpni_stats;
5103 +
5104 +#ifdef CONFIG_FSL_QBMAN_DEBUG
5105 + u32 fcnt, bcnt;
5106 + u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
5107 + u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
5108 + u32 buf_cnt;
5109 +#endif
5110 + u64 cdan = 0;
5111 + u64 portal_busy = 0, pull_err = 0;
5112 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5113 + struct dpaa2_eth_drv_stats *extras;
5114 + struct dpaa2_eth_ch_stats *ch_stats;
5115 +
5116 + memset(data, 0,
5117 + sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS));
5118 +
5119 + /* Print standard counters, from DPNI statistics */
5120 + for (j = 0; j <= 2; j++) {
5121 + err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token,
5122 + j, &dpni_stats);
5123 + if (err != 0)
5124 + netdev_warn(net_dev, "Err %d getting DPNI stats page %d",
5125 + err, j);
5126 +
5127 + switch (j) {
5128 + case 0:
5129 + *(data + i++) = dpni_stats.page_0.ingress_all_frames;
5130 + *(data + i++) = dpni_stats.page_0.ingress_all_bytes;
5131 + *(data + i++) = dpni_stats.page_0.ingress_multicast_frames;
5132 + *(data + i++) = dpni_stats.page_0.ingress_multicast_bytes;
5133 + *(data + i++) = dpni_stats.page_0.ingress_broadcast_frames;
5134 + *(data + i++) = dpni_stats.page_0.ingress_broadcast_bytes;
5135 + break;
5136 + case 1:
5137 + *(data + i++) = dpni_stats.page_1.egress_all_frames;
5138 + *(data + i++) = dpni_stats.page_1.egress_all_bytes;
5139 + *(data + i++) = dpni_stats.page_1.egress_multicast_frames;
5140 + *(data + i++) = dpni_stats.page_1.egress_multicast_bytes;
5141 + *(data + i++) = dpni_stats.page_1.egress_broadcast_frames;
5142 + *(data + i++) = dpni_stats.page_1.egress_broadcast_bytes;
5143 + break;
5144 + case 2:
5145 + *(data + i++) = dpni_stats.page_2.ingress_filtered_frames;
5146 + *(data + i++) = dpni_stats.page_2.ingress_discarded_frames;
5147 + *(data + i++) = dpni_stats.page_2.ingress_nobuffer_discards;
5148 + *(data + i++) = dpni_stats.page_2.egress_discarded_frames;
5149 + *(data + i++) = dpni_stats.page_2.egress_confirmed_frames;
5150 + break;
5151 + default:
5152 + break;
5153 + }
5154 + }
5155 +
5156 + /* Print per-cpu extra stats */
5157 + for_each_online_cpu(k) {
5158 + extras = per_cpu_ptr(priv->percpu_extras, k);
5159 + for (j = 0; j < sizeof(*extras) / sizeof(__u64); j++)
5160 + *((__u64 *)data + i + j) += *((__u64 *)extras + j);
5161 + }
5162 +
5163 + i += j;
5164 +
5165 + /* We may be using fewer DPIOs than actual CPUs */
5166 + for (j = 0; j < priv->num_channels; j++) {
5167 + ch_stats = &priv->channel[j]->stats;
5168 + cdan += ch_stats->cdan;
5169 + portal_busy += ch_stats->dequeue_portal_busy;
5170 + pull_err += ch_stats->pull_err;
5171 + }
5172 +
5173 + *(data + i++) = portal_busy;
5174 + *(data + i++) = pull_err;
5175 + *(data + i++) = cdan;
5176 +
5177 + *(data + i++) = dpaa2_cscn_state_congested(priv->cscn_mem);
5178 +
5179 +#ifdef CONFIG_FSL_QBMAN_DEBUG
5180 + for (j = 0; j < priv->num_fqs; j++) {
5181 + /* Print FQ instantaneous counts */
5182 + err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid,
5183 + &fcnt, &bcnt);
5184 + if (err) {
5185 + netdev_warn(net_dev, "FQ query error %d", err);
5186 + return;
5187 + }
5188 +
5189 + if (priv->fq[j].type == DPAA2_TX_CONF_FQ) {
5190 + fcnt_tx_total += fcnt;
5191 + bcnt_tx_total += bcnt;
5192 + } else {
5193 + fcnt_rx_total += fcnt;
5194 + bcnt_rx_total += bcnt;
5195 + }
5196 + }
5197 +
5198 + *(data + i++) = fcnt_rx_total;
5199 + *(data + i++) = bcnt_rx_total;
5200 + *(data + i++) = fcnt_tx_total;
5201 + *(data + i++) = bcnt_tx_total;
5202 +
5203 + err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt);
5204 + if (err) {
5205 + netdev_warn(net_dev, "Buffer count query error %d\n", err);
5206 + return;
5207 + }
5208 + *(data + i++) = buf_cnt;
5209 +#endif
5210 +}
5211 +
5212 +static int cls_key_off(struct dpaa2_eth_priv *priv, int prot, int field)
5213 +{
5214 + int i, off = 0;
5215 +
5216 + for (i = 0; i < priv->num_hash_fields; i++) {
5217 + if (priv->hash_fields[i].cls_prot == prot &&
5218 + priv->hash_fields[i].cls_field == field)
5219 + return off;
5220 + off += priv->hash_fields[i].size;
5221 + }
5222 +
5223 + return -1;
5224 +}
5225 +
5226 +static u8 cls_key_size(struct dpaa2_eth_priv *priv)
5227 +{
5228 + u8 i, size = 0;
5229 +
5230 + for (i = 0; i < priv->num_hash_fields; i++)
5231 + size += priv->hash_fields[i].size;
5232 +
5233 + return size;
5234 +}
5235 +
5236 +void check_cls_support(struct dpaa2_eth_priv *priv)
5237 +{
5238 + u8 key_size = cls_key_size(priv);
5239 + struct device *dev = priv->net_dev->dev.parent;
5240 +
5241 + if (dpaa2_eth_hash_enabled(priv)) {
5242 + if (priv->dpni_attrs.fs_key_size < key_size) {
5243 + dev_info(dev, "max_dist_key_size = %d, expected %d. Hashing and steering are disabled\n",
5244 + priv->dpni_attrs.fs_key_size,
5245 + key_size);
5246 + goto disable_fs;
5247 + }
5248 + if (priv->num_hash_fields > DPKG_MAX_NUM_OF_EXTRACTS) {
5249 + dev_info(dev, "Too many key fields (max = %d). Hashing and steering are disabled\n",
5250 + DPKG_MAX_NUM_OF_EXTRACTS);
5251 + goto disable_fs;
5252 + }
5253 + }
5254 +
5255 + if (dpaa2_eth_fs_enabled(priv)) {
5256 + if (!dpaa2_eth_hash_enabled(priv)) {
5257 + dev_info(dev, "Insufficient queues. Steering is disabled\n");
5258 + goto disable_fs;
5259 + }
5260 +
5261 + if (!dpaa2_eth_fs_mask_enabled(priv)) {
5262 + dev_info(dev, "Key masks not supported. Steering is disabled\n");
5263 + goto disable_fs;
5264 + }
5265 + }
5266 +
5267 + return;
5268 +
5269 +disable_fs:
5270 + priv->dpni_attrs.options |= DPNI_OPT_NO_FS;
5271 + priv->dpni_attrs.options &= ~DPNI_OPT_HAS_KEY_MASKING;
5272 +}
5273 +
5274 +static int prep_l4_rule(struct dpaa2_eth_priv *priv,
5275 + struct ethtool_tcpip4_spec *l4_value,
5276 + struct ethtool_tcpip4_spec *l4_mask,
5277 + void *key, void *mask, u8 l4_proto)
5278 +{
5279 + int offset;
5280 +
5281 + if (l4_mask->tos) {
5282 + netdev_err(priv->net_dev, "ToS is not supported for IPv4 L4\n");
5283 + return -EOPNOTSUPP;
5284 + }
5285 +
5286 + if (l4_mask->ip4src) {
5287 + offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_SRC);
5288 + *(u32 *)(key + offset) = l4_value->ip4src;
5289 + *(u32 *)(mask + offset) = l4_mask->ip4src;
5290 + }
5291 +
5292 + if (l4_mask->ip4dst) {
5293 + offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_DST);
5294 + *(u32 *)(key + offset) = l4_value->ip4dst;
5295 + *(u32 *)(mask + offset) = l4_mask->ip4dst;
5296 + }
5297 +
5298 + if (l4_mask->psrc) {
5299 + offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
5300 + *(u32 *)(key + offset) = l4_value->psrc;
5301 + *(u32 *)(mask + offset) = l4_mask->psrc;
5302 + }
5303 +
5304 + if (l4_mask->pdst) {
5305 + offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
5306 + *(u32 *)(key + offset) = l4_value->pdst;
5307 + *(u32 *)(mask + offset) = l4_mask->pdst;
5308 + }
5309 +
5310 + /* Only apply the rule for the user-specified L4 protocol
5311 + * and if ethertype matches IPv4
5312 + */
5313 + offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_TYPE);
5314 + *(u16 *)(key + offset) = htons(ETH_P_IP);
5315 + *(u16 *)(mask + offset) = 0xFFFF;
5316 +
5317 + offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_PROTO);
5318 + *(u8 *)(key + offset) = l4_proto;
5319 + *(u8 *)(mask + offset) = 0xFF;
5320 +
5321 + /* TODO: check IP version */
5322 +
5323 + return 0;
5324 +}
5325 +
5326 +static int prep_eth_rule(struct dpaa2_eth_priv *priv,
5327 + struct ethhdr *eth_value, struct ethhdr *eth_mask,
5328 + void *key, void *mask)
5329 +{
5330 + int offset;
5331 +
5332 + if (eth_mask->h_proto) {
5333 + netdev_err(priv->net_dev, "Ethertype is not supported!\n");
5334 + return -EOPNOTSUPP;
5335 + }
5336 +
5337 + if (!is_zero_ether_addr(eth_mask->h_source)) {
5338 + offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_SA);
5339 + ether_addr_copy(key + offset, eth_value->h_source);
5340 + ether_addr_copy(mask + offset, eth_mask->h_source);
5341 + }
5342 +
5343 + if (!is_zero_ether_addr(eth_mask->h_dest)) {
5344 + offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_DA);
5345 + ether_addr_copy(key + offset, eth_value->h_dest);
5346 + ether_addr_copy(mask + offset, eth_mask->h_dest);
5347 + }
5348 +
5349 + return 0;
5350 +}
5351 +
5352 +static int prep_user_ip_rule(struct dpaa2_eth_priv *priv,
5353 + struct ethtool_usrip4_spec *uip_value,
5354 + struct ethtool_usrip4_spec *uip_mask,
5355 + void *key, void *mask)
5356 +{
5357 + int offset;
5358 +
5359 + if (uip_mask->tos)
5360 + return -EOPNOTSUPP;
5361 +
5362 + if (uip_mask->ip4src) {
5363 + offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_SRC);
5364 + *(u32 *)(key + offset) = uip_value->ip4src;
5365 + *(u32 *)(mask + offset) = uip_mask->ip4src;
5366 + }
5367 +
5368 + if (uip_mask->ip4dst) {
5369 + offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_DST);
5370 + *(u32 *)(key + offset) = uip_value->ip4dst;
5371 + *(u32 *)(mask + offset) = uip_mask->ip4dst;
5372 + }
5373 +
5374 + if (uip_mask->proto) {
5375 + offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_PROTO);
5376 + *(u32 *)(key + offset) = uip_value->proto;
5377 + *(u32 *)(mask + offset) = uip_mask->proto;
5378 + }
5379 + if (uip_mask->l4_4_bytes) {
5380 + offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
5381 + *(u16 *)(key + offset) = uip_value->l4_4_bytes << 16;
5382 + *(u16 *)(mask + offset) = uip_mask->l4_4_bytes << 16;
5383 +
5384 + offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
5385 + *(u16 *)(key + offset) = uip_value->l4_4_bytes & 0xFFFF;
5386 + *(u16 *)(mask + offset) = uip_mask->l4_4_bytes & 0xFFFF;
5387 + }
5388 +
5389 + /* Ethertype must be IP */
5390 + offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_TYPE);
5391 + *(u16 *)(key + offset) = htons(ETH_P_IP);
5392 + *(u16 *)(mask + offset) = 0xFFFF;
5393 +
5394 + return 0;
5395 +}
5396 +
5397 +static int prep_ext_rule(struct dpaa2_eth_priv *priv,
5398 + struct ethtool_flow_ext *ext_value,
5399 + struct ethtool_flow_ext *ext_mask,
5400 + void *key, void *mask)
5401 +{
5402 + int offset;
5403 +
5404 + if (ext_mask->vlan_etype)
5405 + return -EOPNOTSUPP;
5406 +
5407 + if (ext_mask->vlan_tci) {
5408 + offset = cls_key_off(priv, NET_PROT_VLAN, NH_FLD_VLAN_TCI);
5409 + *(u16 *)(key + offset) = ext_value->vlan_tci;
5410 + *(u16 *)(mask + offset) = ext_mask->vlan_tci;
5411 + }
5412 +
5413 + return 0;
5414 +}
5415 +
5416 +static int prep_mac_ext_rule(struct dpaa2_eth_priv *priv,
5417 + struct ethtool_flow_ext *ext_value,
5418 + struct ethtool_flow_ext *ext_mask,
5419 + void *key, void *mask)
5420 +{
5421 + int offset;
5422 +
5423 + if (!is_zero_ether_addr(ext_mask->h_dest)) {
5424 + offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_DA);
5425 + ether_addr_copy(key + offset, ext_value->h_dest);
5426 + ether_addr_copy(mask + offset, ext_mask->h_dest);
5427 + }
5428 +
5429 + return 0;
5430 +}
5431 +
5432 +static int prep_cls_rule(struct net_device *net_dev,
5433 + struct ethtool_rx_flow_spec *fs,
5434 + void *key)
5435 +{
5436 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5437 + const u8 key_size = cls_key_size(priv);
5438 + void *msk = key + key_size;
5439 + int err;
5440 +
5441 + memset(key, 0, key_size * 2);
5442 +
5443 + switch (fs->flow_type & 0xff) {
5444 + case TCP_V4_FLOW:
5445 + err = prep_l4_rule(priv, &fs->h_u.tcp_ip4_spec,
5446 + &fs->m_u.tcp_ip4_spec, key, msk,
5447 + IPPROTO_TCP);
5448 + break;
5449 + case UDP_V4_FLOW:
5450 + err = prep_l4_rule(priv, &fs->h_u.udp_ip4_spec,
5451 + &fs->m_u.udp_ip4_spec, key, msk,
5452 + IPPROTO_UDP);
5453 + break;
5454 + case SCTP_V4_FLOW:
5455 + err = prep_l4_rule(priv, &fs->h_u.sctp_ip4_spec,
5456 + &fs->m_u.sctp_ip4_spec, key, msk,
5457 + IPPROTO_SCTP);
5458 + break;
5459 + case ETHER_FLOW:
5460 + err = prep_eth_rule(priv, &fs->h_u.ether_spec,
5461 + &fs->m_u.ether_spec, key, msk);
5462 + break;
5463 + case IP_USER_FLOW:
5464 + err = prep_user_ip_rule(priv, &fs->h_u.usr_ip4_spec,
5465 + &fs->m_u.usr_ip4_spec, key, msk);
5466 + break;
5467 + default:
5468 + /* TODO: AH, ESP */
5469 + return -EOPNOTSUPP;
5470 + }
5471 + if (err)
5472 + return err;
5473 +
5474 + if (fs->flow_type & FLOW_EXT) {
5475 + err = prep_ext_rule(priv, &fs->h_ext, &fs->m_ext, key, msk);
5476 + if (err)
5477 + return err;
5478 + }
5479 +
5480 + if (fs->flow_type & FLOW_MAC_EXT) {
5481 + err = prep_mac_ext_rule(priv, &fs->h_ext, &fs->m_ext, key, msk);
5482 + if (err)
5483 + return err;
5484 + }
5485 +
5486 + return 0;
5487 +}
5488 +
5489 +static int del_cls(struct net_device *net_dev, int location);
5490 +
5491 +static int do_cls(struct net_device *net_dev,
5492 + struct ethtool_rx_flow_spec *fs,
5493 + bool add)
5494 +{
5495 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5496 + struct device *dev = net_dev->dev.parent;
5497 + const int rule_cnt = dpaa2_eth_fs_count(priv);
5498 + struct dpni_rule_cfg rule_cfg;
5499 + struct dpni_fs_action_cfg fs_act = { 0 };
5500 + void *dma_mem;
5501 + int err = 0;
5502 +
5503 + if (!dpaa2_eth_fs_enabled(priv)) {
5504 + netdev_err(net_dev, "dev does not support steering!\n");
5505 + /* dev doesn't support steering */
5506 + return -EOPNOTSUPP;
5507 + }
5508 +
5509 + if ((fs->ring_cookie != RX_CLS_FLOW_DISC &&
5510 + fs->ring_cookie >= dpaa2_eth_queue_count(priv)) ||
5511 + fs->location >= rule_cnt)
5512 + return -EINVAL;
5513 +
5514 + /* When adding a new rule, check if location if available,
5515 + * and if not free the existing table entry before inserting
5516 + * the new one
5517 + */
5518 + if (add && (priv->cls_rule[fs->location].in_use == true))
5519 + del_cls(net_dev, fs->location);
5520 +
5521 + memset(&rule_cfg, 0, sizeof(rule_cfg));
5522 + rule_cfg.key_size = cls_key_size(priv);
5523 +
5524 + /* allocate twice the key size, for the actual key and for mask */
5525 + dma_mem = kzalloc(rule_cfg.key_size * 2, GFP_DMA | GFP_KERNEL);
5526 + if (!dma_mem)
5527 + return -ENOMEM;
5528 +
5529 + err = prep_cls_rule(net_dev, fs, dma_mem);
5530 + if (err)
5531 + goto err_free_mem;
5532 +
5533 + rule_cfg.key_iova = dma_map_single(dev, dma_mem,
5534 + rule_cfg.key_size * 2,
5535 + DMA_TO_DEVICE);
5536 +
5537 + rule_cfg.mask_iova = rule_cfg.key_iova + rule_cfg.key_size;
5538 +
5539 + if (fs->ring_cookie == RX_CLS_FLOW_DISC)
5540 + fs_act.options |= DPNI_FS_OPT_DISCARD;
5541 + else
5542 + fs_act.flow_id = fs->ring_cookie;
5543 +
5544 + if (add)
5545 + err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token,
5546 + 0, fs->location, &rule_cfg, &fs_act);
5547 + else
5548 + err = dpni_remove_fs_entry(priv->mc_io, 0, priv->mc_token,
5549 + 0, &rule_cfg);
5550 +
5551 + dma_unmap_single(dev, rule_cfg.key_iova,
5552 + rule_cfg.key_size * 2, DMA_TO_DEVICE);
5553 +
5554 + if (err)
5555 + netdev_err(net_dev, "dpaa2_add/remove_cls() error %d\n", err);
5556 +
5557 +err_free_mem:
5558 + kfree(dma_mem);
5559 +
5560 + return err;
5561 +}
5562 +
5563 +static int add_cls(struct net_device *net_dev,
5564 + struct ethtool_rx_flow_spec *fs)
5565 +{
5566 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5567 + int err;
5568 +
5569 + err = do_cls(net_dev, fs, true);
5570 + if (err)
5571 + return err;
5572 +
5573 + priv->cls_rule[fs->location].in_use = true;
5574 + priv->cls_rule[fs->location].fs = *fs;
5575 +
5576 + return 0;
5577 +}
5578 +
5579 +static int del_cls(struct net_device *net_dev, int location)
5580 +{
5581 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5582 + int err;
5583 +
5584 + err = do_cls(net_dev, &priv->cls_rule[location].fs, false);
5585 + if (err)
5586 + return err;
5587 +
5588 + priv->cls_rule[location].in_use = false;
5589 +
5590 + return 0;
5591 +}
5592 +
5593 +static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
5594 + struct ethtool_rxnfc *rxnfc)
5595 +{
5596 + int err = 0;
5597 +
5598 + switch (rxnfc->cmd) {
5599 + case ETHTOOL_SRXCLSRLINS:
5600 + err = add_cls(net_dev, &rxnfc->fs);
5601 + break;
5602 +
5603 + case ETHTOOL_SRXCLSRLDEL:
5604 + err = del_cls(net_dev, rxnfc->fs.location);
5605 + break;
5606 +
5607 + default:
5608 + err = -EOPNOTSUPP;
5609 + }
5610 +
5611 + return err;
5612 +}
5613 +
5614 +static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
5615 + struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
5616 +{
5617 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5618 + const int rule_cnt = dpaa2_eth_fs_count(priv);
5619 + int i, j;
5620 +
5621 + switch (rxnfc->cmd) {
5622 + case ETHTOOL_GRXFH:
5623 + /* we purposely ignore cmd->flow_type, because the hashing key
5624 + * is the same (and fixed) for all protocols
5625 + */
5626 + rxnfc->data = priv->rx_flow_hash;
5627 + break;
5628 +
5629 + case ETHTOOL_GRXRINGS:
5630 + rxnfc->data = dpaa2_eth_queue_count(priv);
5631 + break;
5632 +
5633 + case ETHTOOL_GRXCLSRLCNT:
5634 + for (i = 0, rxnfc->rule_cnt = 0; i < rule_cnt; i++)
5635 + if (priv->cls_rule[i].in_use)
5636 + rxnfc->rule_cnt++;
5637 + rxnfc->data = rule_cnt;
5638 + break;
5639 +
5640 + case ETHTOOL_GRXCLSRULE:
5641 + if (!priv->cls_rule[rxnfc->fs.location].in_use)
5642 + return -EINVAL;
5643 +
5644 + rxnfc->fs = priv->cls_rule[rxnfc->fs.location].fs;
5645 + break;
5646 +
5647 + case ETHTOOL_GRXCLSRLALL:
5648 + for (i = 0, j = 0; i < rule_cnt; i++) {
5649 + if (!priv->cls_rule[i].in_use)
5650 + continue;
5651 + if (j == rxnfc->rule_cnt)
5652 + return -EMSGSIZE;
5653 + rule_locs[j++] = i;
5654 + }
5655 + rxnfc->rule_cnt = j;
5656 + rxnfc->data = rule_cnt;
5657 + break;
5658 +
5659 + default:
5660 + return -EOPNOTSUPP;
5661 + }
5662 +
5663 + return 0;
5664 +}
5665 +
5666 +const struct ethtool_ops dpaa2_ethtool_ops = {
5667 + .get_drvinfo = dpaa2_eth_get_drvinfo,
5668 + .get_link = ethtool_op_get_link,
5669 + .get_settings = dpaa2_eth_get_settings,
5670 + .set_settings = dpaa2_eth_set_settings,
5671 + .get_pauseparam = dpaa2_eth_get_pauseparam,
5672 + .set_pauseparam = dpaa2_eth_set_pauseparam,
5673 + .get_sset_count = dpaa2_eth_get_sset_count,
5674 + .get_ethtool_stats = dpaa2_eth_get_ethtool_stats,
5675 + .get_strings = dpaa2_eth_get_strings,
5676 + .get_rxnfc = dpaa2_eth_get_rxnfc,
5677 + .set_rxnfc = dpaa2_eth_set_rxnfc,
5678 +};
5679 --- /dev/null
5680 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpkg.h
5681 @@ -0,0 +1,176 @@
5682 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
5683 + *
5684 + * Redistribution and use in source and binary forms, with or without
5685 + * modification, are permitted provided that the following conditions are met:
5686 + * * Redistributions of source code must retain the above copyright
5687 + * notice, this list of conditions and the following disclaimer.
5688 + * * Redistributions in binary form must reproduce the above copyright
5689 + * notice, this list of conditions and the following disclaimer in the
5690 + * documentation and/or other materials provided with the distribution.
5691 + * * Neither the name of the above-listed copyright holders nor the
5692 + * names of any contributors may be used to endorse or promote products
5693 + * derived from this software without specific prior written permission.
5694 + *
5695 + *
5696 + * ALTERNATIVELY, this software may be distributed under the terms of the
5697 + * GNU General Public License ("GPL") as published by the Free Software
5698 + * Foundation, either version 2 of that License or (at your option) any
5699 + * later version.
5700 + *
5701 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
5702 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
5703 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
5704 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
5705 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
5706 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
5707 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
5708 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
5709 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
5710 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
5711 + * POSSIBILITY OF SUCH DAMAGE.
5712 + */
5713 +#ifndef __FSL_DPKG_H_
5714 +#define __FSL_DPKG_H_
5715 +
5716 +#include <linux/types.h>
5717 +#include "net.h"
5718 +
5719 +/* Data Path Key Generator API
5720 + * Contains initialization APIs and runtime APIs for the Key Generator
5721 + */
5722 +
5723 +/** Key Generator properties */
5724 +
5725 +/**
5726 + * Number of masks per key extraction
5727 + */
5728 +#define DPKG_NUM_OF_MASKS 4
5729 +/**
5730 + * Number of extractions per key profile
5731 + */
5732 +#define DPKG_MAX_NUM_OF_EXTRACTS 10
5733 +
5734 +/**
5735 + * enum dpkg_extract_from_hdr_type - Selecting extraction by header types
5736 + * @DPKG_FROM_HDR: Extract selected bytes from header, by offset
5737 + * @DPKG_FROM_FIELD: Extract selected bytes from header, by offset from field
5738 + * @DPKG_FULL_FIELD: Extract a full field
5739 + */
5740 +enum dpkg_extract_from_hdr_type {
5741 + DPKG_FROM_HDR = 0,
5742 + DPKG_FROM_FIELD = 1,
5743 + DPKG_FULL_FIELD = 2
5744 +};
5745 +
5746 +/**
5747 + * enum dpkg_extract_type - Enumeration for selecting extraction type
5748 + * @DPKG_EXTRACT_FROM_HDR: Extract from the header
5749 + * @DPKG_EXTRACT_FROM_DATA: Extract from data not in specific header
5750 + * @DPKG_EXTRACT_FROM_PARSE: Extract from parser-result;
5751 + * e.g. can be used to extract header existence;
5752 + * please refer to 'Parse Result definition' section in the parser BG
5753 + */
5754 +enum dpkg_extract_type {
5755 + DPKG_EXTRACT_FROM_HDR = 0,
5756 + DPKG_EXTRACT_FROM_DATA = 1,
5757 + DPKG_EXTRACT_FROM_PARSE = 3
5758 +};
5759 +
5760 +/**
5761 + * struct dpkg_mask - A structure for defining a single extraction mask
5762 + * @mask: Byte mask for the extracted content
5763 + * @offset: Offset within the extracted content
5764 + */
5765 +struct dpkg_mask {
5766 + u8 mask;
5767 + u8 offset;
5768 +};
5769 +
5770 +/**
5771 + * struct dpkg_extract - A structure for defining a single extraction
5772 + * @type: Determines how the union below is interpreted:
5773 + * DPKG_EXTRACT_FROM_HDR: selects 'from_hdr';
5774 + * DPKG_EXTRACT_FROM_DATA: selects 'from_data';
5775 + * DPKG_EXTRACT_FROM_PARSE: selects 'from_parse'
5776 + * @extract: Selects extraction method
5777 + * @num_of_byte_masks: Defines the number of valid entries in the array below;
5778 + * This is also the number of bytes to be used as masks
5779 + * @masks: Masks parameters
5780 + */
5781 +struct dpkg_extract {
5782 + enum dpkg_extract_type type;
5783 + /**
5784 + * union extract - Selects extraction method
5785 + * @from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR'
5786 + * @from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA'
5787 + * @from_parse - Used when 'type = DPKG_EXTRACT_FROM_PARSE'
5788 + */
5789 + union {
5790 + /**
5791 + * struct from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR'
5792 + * @prot: Any of the supported headers
5793 + * @type: Defines the type of header extraction:
5794 + * DPKG_FROM_HDR: use size & offset below;
5795 + * DPKG_FROM_FIELD: use field, size and offset below;
5796 + * DPKG_FULL_FIELD: use field below
5797 + * @field: One of the supported fields (NH_FLD_)
5798 + *
5799 + * @size: Size in bytes
5800 + * @offset: Byte offset
5801 + * @hdr_index: Clear for cases not listed below;
5802 + * Used for protocols that may have more than a single
5803 + * header, 0 indicates an outer header;
5804 + * Supported protocols (possible values):
5805 + * NET_PROT_VLAN (0, HDR_INDEX_LAST);
5806 + * NET_PROT_MPLS (0, 1, HDR_INDEX_LAST);
5807 + * NET_PROT_IP(0, HDR_INDEX_LAST);
5808 + * NET_PROT_IPv4(0, HDR_INDEX_LAST);
5809 + * NET_PROT_IPv6(0, HDR_INDEX_LAST);
5810 + */
5811 +
5812 + struct {
5813 + enum net_prot prot;
5814 + enum dpkg_extract_from_hdr_type type;
5815 + u32 field;
5816 + u8 size;
5817 + u8 offset;
5818 + u8 hdr_index;
5819 + } from_hdr;
5820 + /**
5821 + * struct from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA'
5822 + * @size: Size in bytes
5823 + * @offset: Byte offset
5824 + */
5825 + struct {
5826 + u8 size;
5827 + u8 offset;
5828 + } from_data;
5829 +
5830 + /**
5831 + * struct from_parse - Used when
5832 + * 'type = DPKG_EXTRACT_FROM_PARSE'
5833 + * @size: Size in bytes
5834 + * @offset: Byte offset
5835 + */
5836 + struct {
5837 + u8 size;
5838 + u8 offset;
5839 + } from_parse;
5840 + } extract;
5841 +
5842 + u8 num_of_byte_masks;
5843 + struct dpkg_mask masks[DPKG_NUM_OF_MASKS];
5844 +};
5845 +
5846 +/**
5847 + * struct dpkg_profile_cfg - A structure for defining a full Key Generation
5848 + * profile (rule)
5849 + * @num_extracts: Defines the number of valid entries in the array below
5850 + * @extracts: Array of required extractions
5851 + */
5852 +struct dpkg_profile_cfg {
5853 + u8 num_extracts;
5854 + struct dpkg_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS];
5855 +};
5856 +
5857 +#endif /* __FSL_DPKG_H_ */
5858 --- /dev/null
5859 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
5860 @@ -0,0 +1,600 @@
5861 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
5862 + * Copyright 2016 NXP
5863 + *
5864 + * Redistribution and use in source and binary forms, with or without
5865 + * modification, are permitted provided that the following conditions are met:
5866 + * * Redistributions of source code must retain the above copyright
5867 + * notice, this list of conditions and the following disclaimer.
5868 + * * Redistributions in binary form must reproduce the above copyright
5869 + * notice, this list of conditions and the following disclaimer in the
5870 + * documentation and/or other materials provided with the distribution.
5871 + * * Neither the name of the above-listed copyright holders nor the
5872 + * names of any contributors may be used to endorse or promote products
5873 + * derived from this software without specific prior written permission.
5874 + *
5875 + *
5876 + * ALTERNATIVELY, this software may be distributed under the terms of the
5877 + * GNU General Public License ("GPL") as published by the Free Software
5878 + * Foundation, either version 2 of that License or (at your option) any
5879 + * later version.
5880 + *
5881 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
5882 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
5883 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
5884 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
5885 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
5886 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
5887 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
5888 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
5889 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
5890 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
5891 + * POSSIBILITY OF SUCH DAMAGE.
5892 + */
5893 +#ifndef _FSL_DPNI_CMD_H
5894 +#define _FSL_DPNI_CMD_H
5895 +
5896 +/* DPNI Version */
5897 +#define DPNI_VER_MAJOR 7
5898 +#define DPNI_VER_MINOR 0
5899 +#define DPNI_CMD_BASE_VERSION 1
5900 +#define DPNI_CMD_ID_OFFSET 4
5901 +
5902 +#define DPNI_CMD(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION)
5903 +
5904 +#define DPNI_CMDID_OPEN DPNI_CMD(0x801)
5905 +#define DPNI_CMDID_CLOSE DPNI_CMD(0x800)
5906 +#define DPNI_CMDID_CREATE DPNI_CMD(0x901)
5907 +#define DPNI_CMDID_DESTROY DPNI_CMD(0x900)
5908 +#define DPNI_CMDID_GET_API_VERSION DPNI_CMD(0xa01)
5909 +
5910 +#define DPNI_CMDID_ENABLE DPNI_CMD(0x002)
5911 +#define DPNI_CMDID_DISABLE DPNI_CMD(0x003)
5912 +#define DPNI_CMDID_GET_ATTR DPNI_CMD(0x004)
5913 +#define DPNI_CMDID_RESET DPNI_CMD(0x005)
5914 +#define DPNI_CMDID_IS_ENABLED DPNI_CMD(0x006)
5915 +
5916 +#define DPNI_CMDID_SET_IRQ DPNI_CMD(0x010)
5917 +#define DPNI_CMDID_GET_IRQ DPNI_CMD(0x011)
5918 +#define DPNI_CMDID_SET_IRQ_ENABLE DPNI_CMD(0x012)
5919 +#define DPNI_CMDID_GET_IRQ_ENABLE DPNI_CMD(0x013)
5920 +#define DPNI_CMDID_SET_IRQ_MASK DPNI_CMD(0x014)
5921 +#define DPNI_CMDID_GET_IRQ_MASK DPNI_CMD(0x015)
5922 +#define DPNI_CMDID_GET_IRQ_STATUS DPNI_CMD(0x016)
5923 +#define DPNI_CMDID_CLEAR_IRQ_STATUS DPNI_CMD(0x017)
5924 +
5925 +#define DPNI_CMDID_SET_POOLS DPNI_CMD(0x200)
5926 +#define DPNI_CMDID_SET_ERRORS_BEHAVIOR DPNI_CMD(0x20B)
5927 +
5928 +#define DPNI_CMDID_GET_QDID DPNI_CMD(0x210)
5929 +#define DPNI_CMDID_GET_TX_DATA_OFFSET DPNI_CMD(0x212)
5930 +#define DPNI_CMDID_GET_LINK_STATE DPNI_CMD(0x215)
5931 +#define DPNI_CMDID_SET_MAX_FRAME_LENGTH DPNI_CMD(0x216)
5932 +#define DPNI_CMDID_GET_MAX_FRAME_LENGTH DPNI_CMD(0x217)
5933 +#define DPNI_CMDID_SET_LINK_CFG DPNI_CMD(0x21A)
5934 +#define DPNI_CMDID_SET_TX_SHAPING DPNI_CMD(0x21B)
5935 +
5936 +#define DPNI_CMDID_SET_MCAST_PROMISC DPNI_CMD(0x220)
5937 +#define DPNI_CMDID_GET_MCAST_PROMISC DPNI_CMD(0x221)
5938 +#define DPNI_CMDID_SET_UNICAST_PROMISC DPNI_CMD(0x222)
5939 +#define DPNI_CMDID_GET_UNICAST_PROMISC DPNI_CMD(0x223)
5940 +#define DPNI_CMDID_SET_PRIM_MAC DPNI_CMD(0x224)
5941 +#define DPNI_CMDID_GET_PRIM_MAC DPNI_CMD(0x225)
5942 +#define DPNI_CMDID_ADD_MAC_ADDR DPNI_CMD(0x226)
5943 +#define DPNI_CMDID_REMOVE_MAC_ADDR DPNI_CMD(0x227)
5944 +#define DPNI_CMDID_CLR_MAC_FILTERS DPNI_CMD(0x228)
5945 +
5946 +#define DPNI_CMDID_SET_RX_TC_DIST DPNI_CMD(0x235)
5947 +
5948 +#define DPNI_CMDID_ADD_FS_ENT DPNI_CMD(0x244)
5949 +#define DPNI_CMDID_REMOVE_FS_ENT DPNI_CMD(0x245)
5950 +#define DPNI_CMDID_CLR_FS_ENT DPNI_CMD(0x246)
5951 +
5952 +#define DPNI_CMDID_GET_STATISTICS DPNI_CMD(0x25D)
5953 +#define DPNI_CMDID_RESET_STATISTICS DPNI_CMD(0x25E)
5954 +#define DPNI_CMDID_GET_QUEUE DPNI_CMD(0x25F)
5955 +#define DPNI_CMDID_SET_QUEUE DPNI_CMD(0x260)
5956 +#define DPNI_CMDID_GET_TAILDROP DPNI_CMD(0x261)
5957 +#define DPNI_CMDID_SET_TAILDROP DPNI_CMD(0x262)
5958 +
5959 +#define DPNI_CMDID_GET_PORT_MAC_ADDR DPNI_CMD(0x263)
5960 +
5961 +#define DPNI_CMDID_GET_BUFFER_LAYOUT DPNI_CMD(0x264)
5962 +#define DPNI_CMDID_SET_BUFFER_LAYOUT DPNI_CMD(0x265)
5963 +
5964 +#define DPNI_CMDID_SET_TX_CONFIRMATION_MODE DPNI_CMD(0x266)
5965 +#define DPNI_CMDID_SET_CONGESTION_NOTIFICATION DPNI_CMD(0x267)
5966 +#define DPNI_CMDID_GET_CONGESTION_NOTIFICATION DPNI_CMD(0x268)
5967 +#define DPNI_CMDID_SET_EARLY_DROP DPNI_CMD(0x269)
5968 +#define DPNI_CMDID_GET_EARLY_DROP DPNI_CMD(0x26A)
5969 +#define DPNI_CMDID_GET_OFFLOAD DPNI_CMD(0x26B)
5970 +#define DPNI_CMDID_SET_OFFLOAD DPNI_CMD(0x26C)
5971 +
5972 +/* Macros for accessing command fields smaller than 1byte */
5973 +#define DPNI_MASK(field) \
5974 + GENMASK(DPNI_##field##_SHIFT + DPNI_##field##_SIZE - 1, \
5975 + DPNI_##field##_SHIFT)
5976 +
5977 +#define dpni_set_field(var, field, val) \
5978 + ((var) |= (((val) << DPNI_##field##_SHIFT) & DPNI_MASK(field)))
5979 +#define dpni_get_field(var, field) \
5980 + (((var) & DPNI_MASK(field)) >> DPNI_##field##_SHIFT)
5981 +
5982 +struct dpni_cmd_open {
5983 + __le32 dpni_id;
5984 +};
5985 +
5986 +#define DPNI_BACKUP_POOL(val, order) (((val) & 0x1) << (order))
5987 +struct dpni_cmd_set_pools {
5988 + /* cmd word 0 */
5989 + u8 num_dpbp;
5990 + u8 backup_pool_mask;
5991 + __le16 pad;
5992 + /* cmd word 0..4 */
5993 + __le32 dpbp_id[DPNI_MAX_DPBP];
5994 + /* cmd word 4..6 */
5995 + __le16 buffer_size[DPNI_MAX_DPBP];
5996 +};
5997 +
5998 +/* The enable indication is always the least significant bit */
5999 +#define DPNI_ENABLE_SHIFT 0
6000 +#define DPNI_ENABLE_SIZE 1
6001 +
6002 +struct dpni_rsp_is_enabled {
6003 + u8 enabled;
6004 +};
6005 +
6006 +struct dpni_rsp_get_irq {
6007 + /* response word 0 */
6008 + __le32 irq_val;
6009 + __le32 pad;
6010 + /* response word 1 */
6011 + __le64 irq_addr;
6012 + /* response word 2 */
6013 + __le32 irq_num;
6014 + __le32 type;
6015 +};
6016 +
6017 +struct dpni_cmd_set_irq_enable {
6018 + u8 enable;
6019 + u8 pad[3];
6020 + u8 irq_index;
6021 +};
6022 +
6023 +struct dpni_cmd_get_irq_enable {
6024 + __le32 pad;
6025 + u8 irq_index;
6026 +};
6027 +
6028 +struct dpni_rsp_get_irq_enable {
6029 + u8 enabled;
6030 +};
6031 +
6032 +struct dpni_cmd_set_irq_mask {
6033 + __le32 mask;
6034 + u8 irq_index;
6035 +};
6036 +
6037 +struct dpni_cmd_get_irq_mask {
6038 + __le32 pad;
6039 + u8 irq_index;
6040 +};
6041 +
6042 +struct dpni_rsp_get_irq_mask {
6043 + __le32 mask;
6044 +};
6045 +
6046 +struct dpni_cmd_get_irq_status {
6047 + __le32 status;
6048 + u8 irq_index;
6049 +};
6050 +
6051 +struct dpni_rsp_get_irq_status {
6052 + __le32 status;
6053 +};
6054 +
6055 +struct dpni_cmd_clear_irq_status {
6056 + __le32 status;
6057 + u8 irq_index;
6058 +};
6059 +
6060 +struct dpni_rsp_get_attr {
6061 + /* response word 0 */
6062 + __le32 options;
6063 + u8 num_queues;
6064 + u8 num_tcs;
6065 + u8 mac_filter_entries;
6066 + u8 pad0;
6067 + /* response word 1 */
6068 + u8 vlan_filter_entries;
6069 + u8 pad1;
6070 + u8 qos_entries;
6071 + u8 pad2;
6072 + __le16 fs_entries;
6073 + __le16 pad3;
6074 + /* response word 2 */
6075 + u8 qos_key_size;
6076 + u8 fs_key_size;
6077 + __le16 wriop_version;
6078 +};
6079 +
6080 +#define DPNI_ERROR_ACTION_SHIFT 0
6081 +#define DPNI_ERROR_ACTION_SIZE 4
6082 +#define DPNI_FRAME_ANN_SHIFT 4
6083 +#define DPNI_FRAME_ANN_SIZE 1
6084 +
6085 +struct dpni_cmd_set_errors_behavior {
6086 + __le32 errors;
6087 + /* from least significant bit: error_action:4, set_frame_annotation:1 */
6088 + u8 flags;
6089 +};
6090 +
6091 +/* There are 3 separate commands for configuring Rx, Tx and Tx confirmation
6092 + * buffer layouts, but they all share the same parameters.
6093 + * If one of the functions changes, below structure needs to be split.
6094 + */
6095 +
6096 +#define DPNI_PASS_TS_SHIFT 0
6097 +#define DPNI_PASS_TS_SIZE 1
6098 +#define DPNI_PASS_PR_SHIFT 1
6099 +#define DPNI_PASS_PR_SIZE 1
6100 +#define DPNI_PASS_FS_SHIFT 2
6101 +#define DPNI_PASS_FS_SIZE 1
6102 +
6103 +struct dpni_cmd_get_buffer_layout {
6104 + u8 qtype;
6105 +};
6106 +
6107 +struct dpni_rsp_get_buffer_layout {
6108 + /* response word 0 */
6109 + u8 pad0[6];
6110 + /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */
6111 + u8 flags;
6112 + u8 pad1;
6113 + /* response word 1 */
6114 + __le16 private_data_size;
6115 + __le16 data_align;
6116 + __le16 head_room;
6117 + __le16 tail_room;
6118 +};
6119 +
6120 +struct dpni_cmd_set_buffer_layout {
6121 + /* cmd word 0 */
6122 + u8 qtype;
6123 + u8 pad0[3];
6124 + __le16 options;
6125 + /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */
6126 + u8 flags;
6127 + u8 pad1;
6128 + /* cmd word 1 */
6129 + __le16 private_data_size;
6130 + __le16 data_align;
6131 + __le16 head_room;
6132 + __le16 tail_room;
6133 +};
6134 +
6135 +struct dpni_cmd_set_offload {
6136 + u8 pad[3];
6137 + u8 dpni_offload;
6138 + __le32 config;
6139 +};
6140 +
6141 +struct dpni_cmd_get_offload {
6142 + u8 pad[3];
6143 + u8 dpni_offload;
6144 +};
6145 +
6146 +struct dpni_rsp_get_offload {
6147 + __le32 pad;
6148 + __le32 config;
6149 +};
6150 +
6151 +struct dpni_cmd_get_qdid {
6152 + u8 qtype;
6153 +};
6154 +
6155 +struct dpni_rsp_get_qdid {
6156 + __le16 qdid;
6157 +};
6158 +
6159 +struct dpni_rsp_get_tx_data_offset {
6160 + __le16 data_offset;
6161 +};
6162 +
6163 +struct dpni_cmd_get_statistics {
6164 + u8 page_number;
6165 +};
6166 +
6167 +struct dpni_rsp_get_statistics {
6168 + __le64 counter[DPNI_STATISTICS_CNT];
6169 +};
6170 +
6171 +struct dpni_cmd_set_link_cfg {
6172 + /* cmd word 0 */
6173 + __le64 pad0;
6174 + /* cmd word 1 */
6175 + __le32 rate;
6176 + __le32 pad1;
6177 + /* cmd word 2 */
6178 + __le64 options;
6179 +};
6180 +
6181 +#define DPNI_LINK_STATE_SHIFT 0
6182 +#define DPNI_LINK_STATE_SIZE 1
6183 +
6184 +struct dpni_rsp_get_link_state {
6185 + /* response word 0 */
6186 + __le32 pad0;
6187 + /* from LSB: up:1 */
6188 + u8 flags;
6189 + u8 pad1[3];
6190 + /* response word 1 */
6191 + __le32 rate;
6192 + __le32 pad2;
6193 + /* response word 2 */
6194 + __le64 options;
6195 +};
6196 +
6197 +struct dpni_cmd_set_tx_shaping {
6198 + /* cmd word 0 */
6199 + __le16 max_burst_size;
6200 + __le16 pad0[3];
6201 + /* cmd word 1 */
6202 + __le32 rate_limit;
6203 +};
6204 +
6205 +struct dpni_cmd_set_max_frame_length {
6206 + __le16 max_frame_length;
6207 +};
6208 +
6209 +struct dpni_rsp_get_max_frame_length {
6210 + __le16 max_frame_length;
6211 +};
6212 +
6213 +struct dpni_cmd_set_multicast_promisc {
6214 + u8 enable;
6215 +};
6216 +
6217 +struct dpni_rsp_get_multicast_promisc {
6218 + u8 enabled;
6219 +};
6220 +
6221 +struct dpni_cmd_set_unicast_promisc {
6222 + u8 enable;
6223 +};
6224 +
6225 +struct dpni_rsp_get_unicast_promisc {
6226 + u8 enabled;
6227 +};
6228 +
6229 +struct dpni_cmd_set_primary_mac_addr {
6230 + __le16 pad;
6231 + u8 mac_addr[6];
6232 +};
6233 +
6234 +struct dpni_rsp_get_primary_mac_addr {
6235 + __le16 pad;
6236 + u8 mac_addr[6];
6237 +};
6238 +
6239 +struct dpni_rsp_get_port_mac_addr {
6240 + __le16 pad;
6241 + u8 mac_addr[6];
6242 +};
6243 +
6244 +struct dpni_cmd_add_mac_addr {
6245 + __le16 pad;
6246 + u8 mac_addr[6];
6247 +};
6248 +
6249 +struct dpni_cmd_remove_mac_addr {
6250 + __le16 pad;
6251 + u8 mac_addr[6];
6252 +};
6253 +
6254 +#define DPNI_UNICAST_FILTERS_SHIFT 0
6255 +#define DPNI_UNICAST_FILTERS_SIZE 1
6256 +#define DPNI_MULTICAST_FILTERS_SHIFT 1
6257 +#define DPNI_MULTICAST_FILTERS_SIZE 1
6258 +
6259 +struct dpni_cmd_clear_mac_filters {
6260 + /* from LSB: unicast:1, multicast:1 */
6261 + u8 flags;
6262 +};
6263 +
6264 +#define DPNI_DIST_MODE_SHIFT 0
6265 +#define DPNI_DIST_MODE_SIZE 4
6266 +#define DPNI_MISS_ACTION_SHIFT 4
6267 +#define DPNI_MISS_ACTION_SIZE 4
6268 +
6269 +struct dpni_cmd_set_rx_tc_dist {
6270 + /* cmd word 0 */
6271 + __le16 dist_size;
6272 + u8 tc_id;
6273 + /* from LSB: dist_mode:4, miss_action:4 */
6274 + u8 flags;
6275 + __le16 pad0;
6276 + __le16 default_flow_id;
6277 + /* cmd word 1..5 */
6278 + __le64 pad1[5];
6279 + /* cmd word 6 */
6280 + __le64 key_cfg_iova;
6281 +};
6282 +
6283 +/* dpni_set_rx_tc_dist extension (structure of the DMA-able memory at
6284 + * key_cfg_iova)
6285 + */
6286 +struct dpni_mask_cfg {
6287 + u8 mask;
6288 + u8 offset;
6289 +};
6290 +
6291 +#define DPNI_EFH_TYPE_SHIFT 0
6292 +#define DPNI_EFH_TYPE_SIZE 4
6293 +#define DPNI_EXTRACT_TYPE_SHIFT 0
6294 +#define DPNI_EXTRACT_TYPE_SIZE 4
6295 +
6296 +struct dpni_dist_extract {
6297 + /* word 0 */
6298 + u8 prot;
6299 + /* EFH type stored in the 4 least significant bits */
6300 + u8 efh_type;
6301 + u8 size;
6302 + u8 offset;
6303 + __le32 field;
6304 + /* word 1 */
6305 + u8 hdr_index;
6306 + u8 constant;
6307 + u8 num_of_repeats;
6308 + u8 num_of_byte_masks;
6309 + /* Extraction type is stored in the 4 LSBs */
6310 + u8 extract_type;
6311 + u8 pad[3];
6312 + /* word 2 */
6313 + struct dpni_mask_cfg masks[4];
6314 +};
6315 +
6316 +struct dpni_ext_set_rx_tc_dist {
6317 + /* extension word 0 */
6318 + u8 num_extracts;
6319 + u8 pad[7];
6320 + /* words 1..25 */
6321 + struct dpni_dist_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS];
6322 +};
6323 +
6324 +struct dpni_cmd_get_queue {
6325 + u8 qtype;
6326 + u8 tc;
6327 + u8 index;
6328 +};
6329 +
6330 +#define DPNI_DEST_TYPE_SHIFT 0
6331 +#define DPNI_DEST_TYPE_SIZE 4
6332 +#define DPNI_STASH_CTRL_SHIFT 6
6333 +#define DPNI_STASH_CTRL_SIZE 1
6334 +#define DPNI_HOLD_ACTIVE_SHIFT 7
6335 +#define DPNI_HOLD_ACTIVE_SIZE 1
6336 +
6337 +struct dpni_rsp_get_queue {
6338 + /* response word 0 */
6339 + __le64 pad0;
6340 + /* response word 1 */
6341 + __le32 dest_id;
6342 + __le16 pad1;
6343 + u8 dest_prio;
6344 + /* From LSB: dest_type:4, pad:2, flc_stash_ctrl:1, hold_active:1 */
6345 + u8 flags;
6346 + /* response word 2 */
6347 + __le64 flc;
6348 + /* response word 3 */
6349 + __le64 user_context;
6350 + /* response word 4 */
6351 + __le32 fqid;
6352 + __le16 qdbin;
6353 +};
6354 +
6355 +struct dpni_cmd_set_queue {
6356 + /* cmd word 0 */
6357 + u8 qtype;
6358 + u8 tc;
6359 + u8 index;
6360 + u8 options;
6361 + __le32 pad0;
6362 + /* cmd word 1 */
6363 + __le32 dest_id;
6364 + __le16 pad1;
6365 + u8 dest_prio;
6366 + u8 flags;
6367 + /* cmd word 2 */
6368 + __le64 flc;
6369 + /* cmd word 3 */
6370 + __le64 user_context;
6371 +};
6372 +
6373 +struct dpni_cmd_add_fs_entry {
6374 + /* cmd word 0 */
6375 + u16 options;
6376 + u8 tc_id;
6377 + u8 key_size;
6378 + u16 index;
6379 + u16 flow_id;
6380 + /* cmd word 1 */
6381 + u64 key_iova;
6382 + /* cmd word 2 */
6383 + u64 mask_iova;
6384 + /* cmd word 3 */
6385 + u64 flc;
6386 +};
6387 +
6388 +struct dpni_cmd_remove_fs_entry {
6389 + /* cmd word 0 */
6390 + __le16 pad0;
6391 + u8 tc_id;
6392 + u8 key_size;
6393 + __le32 pad1;
6394 + /* cmd word 1 */
6395 + u64 key_iova;
6396 + /* cmd word 2 */
6397 + u64 mask_iova;
6398 +};
6399 +
6400 +struct dpni_cmd_set_taildrop {
6401 + /* cmd word 0 */
6402 + u8 congestion_point;
6403 + u8 qtype;
6404 + u8 tc;
6405 + u8 index;
6406 + __le32 pad0;
6407 + /* cmd word 1 */
6408 + /* Only least significant bit is relevant */
6409 + u8 enable;
6410 + u8 pad1;
6411 + u8 units;
6412 + u8 pad2;
6413 + __le32 threshold;
6414 +};
6415 +
6416 +struct dpni_cmd_get_taildrop {
6417 + u8 congestion_point;
6418 + u8 qtype;
6419 + u8 tc;
6420 + u8 index;
6421 +};
6422 +
6423 +struct dpni_rsp_get_taildrop {
6424 + /* cmd word 0 */
6425 + __le64 pad0;
6426 + /* cmd word 1 */
6427 + /* only least significant bit is relevant */
6428 + u8 enable;
6429 + u8 pad1;
6430 + u8 units;
6431 + u8 pad2;
6432 + __le32 threshold;
6433 +};
6434 +
6435 +#define DPNI_DEST_TYPE_SHIFT 0
6436 +#define DPNI_DEST_TYPE_SIZE 4
6437 +#define DPNI_CONG_UNITS_SHIFT 4
6438 +#define DPNI_CONG_UNITS_SIZE 2
6439 +
6440 +struct dpni_cmd_set_congestion_notification {
6441 + /* cmd word 0 */
6442 + u8 qtype;
6443 + u8 tc;
6444 + u8 pad[6];
6445 + /* cmd word 1 */
6446 + u32 dest_id;
6447 + u16 notification_mode;
6448 + u8 dest_priority;
6449 + /* from LSB: dest_type: 4 units:2 */
6450 + u8 type_units;
6451 + /* cmd word 2 */
6452 + u64 message_iova;
6453 + /* cmd word 3 */
6454 + u64 message_ctx;
6455 + /* cmd word 4 */
6456 + u32 threshold_entry;
6457 + u32 threshold_exit;
6458 +};
6459 +
6460 +#endif /* _FSL_DPNI_CMD_H */
6461 --- /dev/null
6462 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.c
6463 @@ -0,0 +1,1770 @@
6464 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
6465 + * Copyright 2016 NXP
6466 + *
6467 + * Redistribution and use in source and binary forms, with or without
6468 + * modification, are permitted provided that the following conditions are met:
6469 + * * Redistributions of source code must retain the above copyright
6470 + * notice, this list of conditions and the following disclaimer.
6471 + * * Redistributions in binary form must reproduce the above copyright
6472 + * notice, this list of conditions and the following disclaimer in the
6473 + * documentation and/or other materials provided with the distribution.
6474 + * * Neither the name of the above-listed copyright holders nor the
6475 + * names of any contributors may be used to endorse or promote products
6476 + * derived from this software without specific prior written permission.
6477 + *
6478 + *
6479 + * ALTERNATIVELY, this software may be distributed under the terms of the
6480 + * GNU General Public License ("GPL") as published by the Free Software
6481 + * Foundation, either version 2 of that License or (at your option) any
6482 + * later version.
6483 + *
6484 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
6485 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
6486 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
6487 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
6488 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
6489 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
6490 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
6491 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
6492 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
6493 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
6494 + * POSSIBILITY OF SUCH DAMAGE.
6495 + */
6496 +#include "../../fsl-mc/include/mc-sys.h"
6497 +#include "../../fsl-mc/include/mc-cmd.h"
6498 +#include "dpni.h"
6499 +#include "dpni-cmd.h"
6500 +
6501 +/**
6502 + * dpni_prepare_key_cfg() - function prepare extract parameters
6503 + * @cfg: defining a full Key Generation profile (rule)
6504 + * @key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA
6505 + *
6506 + * This function has to be called before the following functions:
6507 + * - dpni_set_rx_tc_dist()
6508 + * - dpni_set_qos_table()
6509 + */
6510 +int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, u8 *key_cfg_buf)
6511 +{
6512 + int i, j;
6513 + struct dpni_ext_set_rx_tc_dist *dpni_ext;
6514 + struct dpni_dist_extract *extr;
6515 +
6516 + if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS)
6517 + return -EINVAL;
6518 +
6519 + dpni_ext = (struct dpni_ext_set_rx_tc_dist *)key_cfg_buf;
6520 + dpni_ext->num_extracts = cfg->num_extracts;
6521 +
6522 + for (i = 0; i < cfg->num_extracts; i++) {
6523 + extr = &dpni_ext->extracts[i];
6524 +
6525 + switch (cfg->extracts[i].type) {
6526 + case DPKG_EXTRACT_FROM_HDR:
6527 + extr->prot = cfg->extracts[i].extract.from_hdr.prot;
6528 + dpni_set_field(extr->efh_type, EFH_TYPE,
6529 + cfg->extracts[i].extract.from_hdr.type);
6530 + extr->size = cfg->extracts[i].extract.from_hdr.size;
6531 + extr->offset = cfg->extracts[i].extract.from_hdr.offset;
6532 + extr->field = cpu_to_le32(
6533 + cfg->extracts[i].extract.from_hdr.field);
6534 + extr->hdr_index =
6535 + cfg->extracts[i].extract.from_hdr.hdr_index;
6536 + break;
6537 + case DPKG_EXTRACT_FROM_DATA:
6538 + extr->size = cfg->extracts[i].extract.from_data.size;
6539 + extr->offset =
6540 + cfg->extracts[i].extract.from_data.offset;
6541 + break;
6542 + case DPKG_EXTRACT_FROM_PARSE:
6543 + extr->size = cfg->extracts[i].extract.from_parse.size;
6544 + extr->offset =
6545 + cfg->extracts[i].extract.from_parse.offset;
6546 + break;
6547 + default:
6548 + return -EINVAL;
6549 + }
6550 +
6551 + extr->num_of_byte_masks = cfg->extracts[i].num_of_byte_masks;
6552 + dpni_set_field(extr->extract_type, EXTRACT_TYPE,
6553 + cfg->extracts[i].type);
6554 +
6555 + for (j = 0; j < DPKG_NUM_OF_MASKS; j++) {
6556 + extr->masks[j].mask = cfg->extracts[i].masks[j].mask;
6557 + extr->masks[j].offset =
6558 + cfg->extracts[i].masks[j].offset;
6559 + }
6560 + }
6561 +
6562 + return 0;
6563 +}
6564 +
6565 +/**
6566 + * dpni_open() - Open a control session for the specified object
6567 + * @mc_io: Pointer to MC portal's I/O object
6568 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
6569 + * @dpni_id: DPNI unique ID
6570 + * @token: Returned token; use in subsequent API calls
6571 + *
6572 + * This function can be used to open a control session for an
6573 + * already created object; an object may have been declared in
6574 + * the DPL or by calling the dpni_create() function.
6575 + * This function returns a unique authentication token,
6576 + * associated with the specific object ID and the specific MC
6577 + * portal; this token must be used in all subsequent commands for
6578 + * this specific object.
6579 + *
6580 + * Return: '0' on Success; Error code otherwise.
6581 + */
6582 +int dpni_open(struct fsl_mc_io *mc_io,
6583 + u32 cmd_flags,
6584 + int dpni_id,
6585 + u16 *token)
6586 +{
6587 + struct mc_command cmd = { 0 };
6588 + struct dpni_cmd_open *cmd_params;
6589 +
6590 + int err;
6591 +
6592 + /* prepare command */
6593 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_OPEN,
6594 + cmd_flags,
6595 + 0);
6596 + cmd_params = (struct dpni_cmd_open *)cmd.params;
6597 + cmd_params->dpni_id = cpu_to_le32(dpni_id);
6598 +
6599 + /* send command to mc*/
6600 + err = mc_send_command(mc_io, &cmd);
6601 + if (err)
6602 + return err;
6603 +
6604 + /* retrieve response parameters */
6605 + *token = mc_cmd_hdr_read_token(&cmd);
6606 +
6607 + return 0;
6608 +}
6609 +
6610 +/**
6611 + * dpni_close() - Close the control session of the object
6612 + * @mc_io: Pointer to MC portal's I/O object
6613 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
6614 + * @token: Token of DPNI object
6615 + *
6616 + * After this function is called, no further operations are
6617 + * allowed on the object without opening a new control session.
6618 + *
6619 + * Return: '0' on Success; Error code otherwise.
6620 + */
6621 +int dpni_close(struct fsl_mc_io *mc_io,
6622 + u32 cmd_flags,
6623 + u16 token)
6624 +{
6625 + struct mc_command cmd = { 0 };
6626 +
6627 + /* prepare command */
6628 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE,
6629 + cmd_flags,
6630 + token);
6631 +
6632 + /* send command to mc*/
6633 + return mc_send_command(mc_io, &cmd);
6634 +}
6635 +
6636 +/**
6637 + * dpni_set_pools() - Set buffer pools configuration
6638 + * @mc_io: Pointer to MC portal's I/O object
6639 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
6640 + * @token: Token of DPNI object
6641 + * @cfg: Buffer pools configuration
6642 + *
6643 + * mandatory for DPNI operation
6644 + * warning:Allowed only when DPNI is disabled
6645 + *
6646 + * Return: '0' on Success; Error code otherwise.
6647 + */
6648 +int dpni_set_pools(struct fsl_mc_io *mc_io,
6649 + u32 cmd_flags,
6650 + u16 token,
6651 + const struct dpni_pools_cfg *cfg)
6652 +{
6653 + struct mc_command cmd = { 0 };
6654 + struct dpni_cmd_set_pools *cmd_params;
6655 + int i;
6656 +
6657 + /* prepare command */
6658 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_POOLS,
6659 + cmd_flags,
6660 + token);
6661 + cmd_params = (struct dpni_cmd_set_pools *)cmd.params;
6662 + cmd_params->num_dpbp = cfg->num_dpbp;
6663 + for (i = 0; i < DPNI_MAX_DPBP; i++) {
6664 + cmd_params->dpbp_id[i] = cpu_to_le32(cfg->pools[i].dpbp_id);
6665 + cmd_params->buffer_size[i] =
6666 + cpu_to_le16(cfg->pools[i].buffer_size);
6667 + cmd_params->backup_pool_mask |=
6668 + DPNI_BACKUP_POOL(cfg->pools[i].backup_pool, i);
6669 + }
6670 +
6671 + /* send command to mc*/
6672 + return mc_send_command(mc_io, &cmd);
6673 +}
6674 +
6675 +/**
6676 + * dpni_enable() - Enable the DPNI, allow sending and receiving frames.
6677 + * @mc_io: Pointer to MC portal's I/O object
6678 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
6679 + * @token: Token of DPNI object
6680 + *
6681 + * Return: '0' on Success; Error code otherwise.
6682 + */
6683 +int dpni_enable(struct fsl_mc_io *mc_io,
6684 + u32 cmd_flags,
6685 + u16 token)
6686 +{
6687 + struct mc_command cmd = { 0 };
6688 +
6689 + /* prepare command */
6690 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE,
6691 + cmd_flags,
6692 + token);
6693 +
6694 + /* send command to mc*/
6695 + return mc_send_command(mc_io, &cmd);
6696 +}
6697 +
6698 +/**
6699 + * dpni_disable() - Disable the DPNI, stop sending and receiving frames.
6700 + * @mc_io: Pointer to MC portal's I/O object
6701 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
6702 + * @token: Token of DPNI object
6703 + *
6704 + * Return: '0' on Success; Error code otherwise.
6705 + */
6706 +int dpni_disable(struct fsl_mc_io *mc_io,
6707 + u32 cmd_flags,
6708 + u16 token)
6709 +{
6710 + struct mc_command cmd = { 0 };
6711 +
6712 + /* prepare command */
6713 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE,
6714 + cmd_flags,
6715 + token);
6716 +
6717 + /* send command to mc*/
6718 + return mc_send_command(mc_io, &cmd);
6719 +}
6720 +
6721 +/**
6722 + * dpni_is_enabled() - Check if the DPNI is enabled.
6723 + * @mc_io: Pointer to MC portal's I/O object
6724 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
6725 + * @token: Token of DPNI object
6726 + * @en: Returns '1' if object is enabled; '0' otherwise
6727 + *
6728 + * Return: '0' on Success; Error code otherwise.
6729 + */
6730 +int dpni_is_enabled(struct fsl_mc_io *mc_io,
6731 + u32 cmd_flags,
6732 + u16 token,
6733 + int *en)
6734 +{
6735 + struct mc_command cmd = { 0 };
6736 + struct dpni_rsp_is_enabled *rsp_params;
6737 + int err;
6738 +
6739 + /* prepare command */
6740 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_IS_ENABLED,
6741 + cmd_flags,
6742 + token);
6743 +
6744 + /* send command to mc*/
6745 + err = mc_send_command(mc_io, &cmd);
6746 + if (err)
6747 + return err;
6748 +
6749 + /* retrieve response parameters */
6750 + rsp_params = (struct dpni_rsp_is_enabled *)cmd.params;
6751 + *en = dpni_get_field(rsp_params->enabled, ENABLE);
6752 +
6753 + return 0;
6754 +}
6755 +
6756 +/**
6757 + * dpni_reset() - Reset the DPNI, returns the object to initial state.
6758 + * @mc_io: Pointer to MC portal's I/O object
6759 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
6760 + * @token: Token of DPNI object
6761 + *
6762 + * Return: '0' on Success; Error code otherwise.
6763 + */
6764 +int dpni_reset(struct fsl_mc_io *mc_io,
6765 + u32 cmd_flags,
6766 + u16 token)
6767 +{
6768 + struct mc_command cmd = { 0 };
6769 +
6770 + /* prepare command */
6771 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET,
6772 + cmd_flags,
6773 + token);
6774 +
6775 + /* send command to mc*/
6776 + return mc_send_command(mc_io, &cmd);
6777 +}
6778 +
6779 +/**
6780 + * dpni_set_irq_enable() - Set overall interrupt state.
6781 + * @mc_io: Pointer to MC portal's I/O object
6782 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
6783 + * @token: Token of DPNI object
6784 + * @irq_index: The interrupt index to configure
6785 + * @en: Interrupt state: - enable = 1, disable = 0
6786 + *
6787 + * Allows GPP software to control when interrupts are generated.
6788 + * Each interrupt can have up to 32 causes. The enable/disable control's the
6789 + * overall interrupt state. if the interrupt is disabled no causes will cause
6790 + * an interrupt.
6791 + *
6792 + * Return: '0' on Success; Error code otherwise.
6793 + */
6794 +int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
6795 + u32 cmd_flags,
6796 + u16 token,
6797 + u8 irq_index,
6798 + u8 en)
6799 +{
6800 + struct mc_command cmd = { 0 };
6801 + struct dpni_cmd_set_irq_enable *cmd_params;
6802 +
6803 + /* prepare command */
6804 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_ENABLE,
6805 + cmd_flags,
6806 + token);
6807 + cmd_params = (struct dpni_cmd_set_irq_enable *)cmd.params;
6808 + dpni_set_field(cmd_params->enable, ENABLE, en);
6809 + cmd_params->irq_index = irq_index;
6810 +
6811 + /* send command to mc*/
6812 + return mc_send_command(mc_io, &cmd);
6813 +}
6814 +
6815 +/**
6816 + * dpni_get_irq_enable() - Get overall interrupt state
6817 + * @mc_io: Pointer to MC portal's I/O object
6818 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
6819 + * @token: Token of DPNI object
6820 + * @irq_index: The interrupt index to configure
6821 + * @en: Returned interrupt state - enable = 1, disable = 0
6822 + *
6823 + * Return: '0' on Success; Error code otherwise.
6824 + */
6825 +int dpni_get_irq_enable(struct fsl_mc_io *mc_io,
6826 + u32 cmd_flags,
6827 + u16 token,
6828 + u8 irq_index,
6829 + u8 *en)
6830 +{
6831 + struct mc_command cmd = { 0 };
6832 + struct dpni_cmd_get_irq_enable *cmd_params;
6833 + struct dpni_rsp_get_irq_enable *rsp_params;
6834 +
6835 + int err;
6836 +
6837 + /* prepare command */
6838 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_ENABLE,
6839 + cmd_flags,
6840 + token);
6841 + cmd_params = (struct dpni_cmd_get_irq_enable *)cmd.params;
6842 + cmd_params->irq_index = irq_index;
6843 +
6844 + /* send command to mc*/
6845 + err = mc_send_command(mc_io, &cmd);
6846 + if (err)
6847 + return err;
6848 +
6849 + /* retrieve response parameters */
6850 + rsp_params = (struct dpni_rsp_get_irq_enable *)cmd.params;
6851 + *en = dpni_get_field(rsp_params->enabled, ENABLE);
6852 +
6853 + return 0;
6854 +}
6855 +
6856 +/**
6857 + * dpni_set_irq_mask() - Set interrupt mask.
6858 + * @mc_io: Pointer to MC portal's I/O object
6859 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
6860 + * @token: Token of DPNI object
6861 + * @irq_index: The interrupt index to configure
6862 + * @mask: event mask to trigger interrupt;
6863 + * each bit:
6864 + * 0 = ignore event
6865 + * 1 = consider event for asserting IRQ
6866 + *
6867 + * Every interrupt can have up to 32 causes and the interrupt model supports
6868 + * masking/unmasking each cause independently
6869 + *
6870 + * Return: '0' on Success; Error code otherwise.
6871 + */
6872 +int dpni_set_irq_mask(struct fsl_mc_io *mc_io,
6873 + u32 cmd_flags,
6874 + u16 token,
6875 + u8 irq_index,
6876 + u32 mask)
6877 +{
6878 + struct mc_command cmd = { 0 };
6879 + struct dpni_cmd_set_irq_mask *cmd_params;
6880 +
6881 + /* prepare command */
6882 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_MASK,
6883 + cmd_flags,
6884 + token);
6885 + cmd_params = (struct dpni_cmd_set_irq_mask *)cmd.params;
6886 + cmd_params->mask = cpu_to_le32(mask);
6887 + cmd_params->irq_index = irq_index;
6888 +
6889 + /* send command to mc*/
6890 + return mc_send_command(mc_io, &cmd);
6891 +}
6892 +
6893 +/**
6894 + * dpni_get_irq_mask() - Get interrupt mask.
6895 + * @mc_io: Pointer to MC portal's I/O object
6896 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
6897 + * @token: Token of DPNI object
6898 + * @irq_index: The interrupt index to configure
6899 + * @mask: Returned event mask to trigger interrupt
6900 + *
6901 + * Every interrupt can have up to 32 causes and the interrupt model supports
6902 + * masking/unmasking each cause independently
6903 + *
6904 + * Return: '0' on Success; Error code otherwise.
6905 + */
6906 +int dpni_get_irq_mask(struct fsl_mc_io *mc_io,
6907 + u32 cmd_flags,
6908 + u16 token,
6909 + u8 irq_index,
6910 + u32 *mask)
6911 +{
6912 + struct mc_command cmd = { 0 };
6913 + struct dpni_cmd_get_irq_mask *cmd_params;
6914 + struct dpni_rsp_get_irq_mask *rsp_params;
6915 + int err;
6916 +
6917 + /* prepare command */
6918 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_MASK,
6919 + cmd_flags,
6920 + token);
6921 + cmd_params = (struct dpni_cmd_get_irq_mask *)cmd.params;
6922 + cmd_params->irq_index = irq_index;
6923 +
6924 + /* send command to mc*/
6925 + err = mc_send_command(mc_io, &cmd);
6926 + if (err)
6927 + return err;
6928 +
6929 + /* retrieve response parameters */
6930 + rsp_params = (struct dpni_rsp_get_irq_mask *)cmd.params;
6931 + *mask = le32_to_cpu(rsp_params->mask);
6932 +
6933 + return 0;
6934 +}
6935 +
6936 +/**
6937 + * dpni_get_irq_status() - Get the current status of any pending interrupts.
6938 + * @mc_io: Pointer to MC portal's I/O object
6939 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
6940 + * @token: Token of DPNI object
6941 + * @irq_index: The interrupt index to configure
6942 + * @status: Returned interrupts status - one bit per cause:
6943 + * 0 = no interrupt pending
6944 + * 1 = interrupt pending
6945 + *
6946 + * Return: '0' on Success; Error code otherwise.
6947 + */
6948 +int dpni_get_irq_status(struct fsl_mc_io *mc_io,
6949 + u32 cmd_flags,
6950 + u16 token,
6951 + u8 irq_index,
6952 + u32 *status)
6953 +{
6954 + struct mc_command cmd = { 0 };
6955 + struct dpni_cmd_get_irq_status *cmd_params;
6956 + struct dpni_rsp_get_irq_status *rsp_params;
6957 + int err;
6958 +
6959 + /* prepare command */
6960 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_STATUS,
6961 + cmd_flags,
6962 + token);
6963 + cmd_params = (struct dpni_cmd_get_irq_status *)cmd.params;
6964 + cmd_params->status = cpu_to_le32(*status);
6965 + cmd_params->irq_index = irq_index;
6966 +
6967 + /* send command to mc*/
6968 + err = mc_send_command(mc_io, &cmd);
6969 + if (err)
6970 + return err;
6971 +
6972 + /* retrieve response parameters */
6973 + rsp_params = (struct dpni_rsp_get_irq_status *)cmd.params;
6974 + *status = le32_to_cpu(rsp_params->status);
6975 +
6976 + return 0;
6977 +}
6978 +
6979 +/**
6980 + * dpni_clear_irq_status() - Clear a pending interrupt's status
6981 + * @mc_io: Pointer to MC portal's I/O object
6982 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
6983 + * @token: Token of DPNI object
6984 + * @irq_index: The interrupt index to configure
6985 + * @status: bits to clear (W1C) - one bit per cause:
6986 + * 0 = don't change
6987 + * 1 = clear status bit
6988 + *
6989 + * Return: '0' on Success; Error code otherwise.
6990 + */
6991 +int dpni_clear_irq_status(struct fsl_mc_io *mc_io,
6992 + u32 cmd_flags,
6993 + u16 token,
6994 + u8 irq_index,
6995 + u32 status)
6996 +{
6997 + struct mc_command cmd = { 0 };
6998 + struct dpni_cmd_clear_irq_status *cmd_params;
6999 +
7000 + /* prepare command */
7001 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLEAR_IRQ_STATUS,
7002 + cmd_flags,
7003 + token);
7004 + cmd_params = (struct dpni_cmd_clear_irq_status *)cmd.params;
7005 + cmd_params->irq_index = irq_index;
7006 + cmd_params->status = cpu_to_le32(status);
7007 +
7008 + /* send command to mc*/
7009 + return mc_send_command(mc_io, &cmd);
7010 +}
7011 +
7012 +/**
7013 + * dpni_get_attributes() - Retrieve DPNI attributes.
7014 + * @mc_io: Pointer to MC portal's I/O object
7015 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7016 + * @token: Token of DPNI object
7017 + * @attr: Object's attributes
7018 + *
7019 + * Return: '0' on Success; Error code otherwise.
7020 + */
7021 +int dpni_get_attributes(struct fsl_mc_io *mc_io,
7022 + u32 cmd_flags,
7023 + u16 token,
7024 + struct dpni_attr *attr)
7025 +{
7026 + struct mc_command cmd = { 0 };
7027 + struct dpni_rsp_get_attr *rsp_params;
7028 +
7029 + int err;
7030 +
7031 + /* prepare command */
7032 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_ATTR,
7033 + cmd_flags,
7034 + token);
7035 +
7036 + /* send command to mc*/
7037 + err = mc_send_command(mc_io, &cmd);
7038 + if (err)
7039 + return err;
7040 +
7041 + /* retrieve response parameters */
7042 + rsp_params = (struct dpni_rsp_get_attr *)cmd.params;
7043 + attr->options = le32_to_cpu(rsp_params->options);
7044 + attr->num_queues = rsp_params->num_queues;
7045 + attr->num_tcs = rsp_params->num_tcs;
7046 + attr->mac_filter_entries = rsp_params->mac_filter_entries;
7047 + attr->vlan_filter_entries = rsp_params->vlan_filter_entries;
7048 + attr->qos_entries = rsp_params->qos_entries;
7049 + attr->fs_entries = le16_to_cpu(rsp_params->fs_entries);
7050 + attr->qos_key_size = rsp_params->qos_key_size;
7051 + attr->fs_key_size = rsp_params->fs_key_size;
7052 + attr->wriop_version = le16_to_cpu(rsp_params->wriop_version);
7053 +
7054 + return 0;
7055 +}
7056 +
7057 +/**
7058 + * dpni_set_errors_behavior() - Set errors behavior
7059 + * @mc_io: Pointer to MC portal's I/O object
7060 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7061 + * @token: Token of DPNI object
7062 + * @cfg: Errors configuration
7063 + *
7064 + * this function may be called numerous times with different
7065 + * error masks
7066 + *
7067 + * Return: '0' on Success; Error code otherwise.
7068 + */
7069 +int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
7070 + u32 cmd_flags,
7071 + u16 token,
7072 + struct dpni_error_cfg *cfg)
7073 +{
7074 + struct mc_command cmd = { 0 };
7075 + struct dpni_cmd_set_errors_behavior *cmd_params;
7076 +
7077 + /* prepare command */
7078 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_ERRORS_BEHAVIOR,
7079 + cmd_flags,
7080 + token);
7081 + cmd_params = (struct dpni_cmd_set_errors_behavior *)cmd.params;
7082 + cmd_params->errors = cpu_to_le32(cfg->errors);
7083 + dpni_set_field(cmd_params->flags, ERROR_ACTION, cfg->error_action);
7084 + dpni_set_field(cmd_params->flags, FRAME_ANN, cfg->set_frame_annotation);
7085 +
7086 + /* send command to mc*/
7087 + return mc_send_command(mc_io, &cmd);
7088 +}
7089 +
7090 +/**
7091 + * dpni_get_buffer_layout() - Retrieve buffer layout attributes.
7092 + * @mc_io: Pointer to MC portal's I/O object
7093 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7094 + * @token: Token of DPNI object
7095 + * @qtype: Type of queue to retrieve configuration for
7096 + * @layout: Returns buffer layout attributes
7097 + *
7098 + * Return: '0' on Success; Error code otherwise.
7099 + */
7100 +int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
7101 + u32 cmd_flags,
7102 + u16 token,
7103 + enum dpni_queue_type qtype,
7104 + struct dpni_buffer_layout *layout)
7105 +{
7106 + struct mc_command cmd = { 0 };
7107 + struct dpni_cmd_get_buffer_layout *cmd_params;
7108 + struct dpni_rsp_get_buffer_layout *rsp_params;
7109 + int err;
7110 +
7111 + /* prepare command */
7112 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_BUFFER_LAYOUT,
7113 + cmd_flags,
7114 + token);
7115 + cmd_params = (struct dpni_cmd_get_buffer_layout *)cmd.params;
7116 + cmd_params->qtype = qtype;
7117 +
7118 + /* send command to mc*/
7119 + err = mc_send_command(mc_io, &cmd);
7120 + if (err)
7121 + return err;
7122 +
7123 + /* retrieve response parameters */
7124 + rsp_params = (struct dpni_rsp_get_buffer_layout *)cmd.params;
7125 + layout->pass_timestamp = dpni_get_field(rsp_params->flags, PASS_TS);
7126 + layout->pass_parser_result = dpni_get_field(rsp_params->flags, PASS_PR);
7127 + layout->pass_frame_status = dpni_get_field(rsp_params->flags, PASS_FS);
7128 + layout->private_data_size = le16_to_cpu(rsp_params->private_data_size);
7129 + layout->data_align = le16_to_cpu(rsp_params->data_align);
7130 + layout->data_head_room = le16_to_cpu(rsp_params->head_room);
7131 + layout->data_tail_room = le16_to_cpu(rsp_params->tail_room);
7132 +
7133 + return 0;
7134 +}
7135 +
7136 +/**
7137 + * dpni_set_buffer_layout() - Set buffer layout configuration.
7138 + * @mc_io: Pointer to MC portal's I/O object
7139 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7140 + * @token: Token of DPNI object
7141 + * @qtype: Type of queue this configuration applies to
7142 + * @layout: Buffer layout configuration
7143 + *
7144 + * Return: '0' on Success; Error code otherwise.
7145 + *
7146 + * @warning Allowed only when DPNI is disabled
7147 + */
7148 +int dpni_set_buffer_layout(struct fsl_mc_io *mc_io,
7149 + u32 cmd_flags,
7150 + u16 token,
7151 + enum dpni_queue_type qtype,
7152 + const struct dpni_buffer_layout *layout)
7153 +{
7154 + struct mc_command cmd = { 0 };
7155 + struct dpni_cmd_set_buffer_layout *cmd_params;
7156 +
7157 + /* prepare command */
7158 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_BUFFER_LAYOUT,
7159 + cmd_flags,
7160 + token);
7161 + cmd_params = (struct dpni_cmd_set_buffer_layout *)cmd.params;
7162 + cmd_params->qtype = qtype;
7163 + cmd_params->options = cpu_to_le16(layout->options);
7164 + dpni_set_field(cmd_params->flags, PASS_TS, layout->pass_timestamp);
7165 + dpni_set_field(cmd_params->flags, PASS_PR, layout->pass_parser_result);
7166 + dpni_set_field(cmd_params->flags, PASS_FS, layout->pass_frame_status);
7167 + cmd_params->private_data_size = cpu_to_le16(layout->private_data_size);
7168 + cmd_params->data_align = cpu_to_le16(layout->data_align);
7169 + cmd_params->head_room = cpu_to_le16(layout->data_head_room);
7170 + cmd_params->tail_room = cpu_to_le16(layout->data_tail_room);
7171 +
7172 + /* send command to mc*/
7173 + return mc_send_command(mc_io, &cmd);
7174 +}
7175 +
7176 +/**
7177 + * dpni_set_offload() - Set DPNI offload configuration.
7178 + * @mc_io: Pointer to MC portal's I/O object
7179 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7180 + * @token: Token of DPNI object
7181 + * @type: Type of DPNI offload
7182 + * @config: Offload configuration.
7183 + * For checksum offloads, non-zero value enables the offload
7184 + *
7185 + * Return: '0' on Success; Error code otherwise.
7186 + *
7187 + * @warning Allowed only when DPNI is disabled
7188 + */
7189 +
7190 +int dpni_set_offload(struct fsl_mc_io *mc_io,
7191 + u32 cmd_flags,
7192 + u16 token,
7193 + enum dpni_offload type,
7194 + u32 config)
7195 +{
7196 + struct mc_command cmd = { 0 };
7197 + struct dpni_cmd_set_offload *cmd_params;
7198 +
7199 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_OFFLOAD,
7200 + cmd_flags,
7201 + token);
7202 + cmd_params = (struct dpni_cmd_set_offload *)cmd.params;
7203 + cmd_params->dpni_offload = type;
7204 + cmd_params->config = cpu_to_le32(config);
7205 +
7206 + return mc_send_command(mc_io, &cmd);
7207 +}
7208 +
7209 +int dpni_get_offload(struct fsl_mc_io *mc_io,
7210 + u32 cmd_flags,
7211 + u16 token,
7212 + enum dpni_offload type,
7213 + u32 *config)
7214 +{
7215 + struct mc_command cmd = { 0 };
7216 + struct dpni_cmd_get_offload *cmd_params;
7217 + struct dpni_rsp_get_offload *rsp_params;
7218 + int err;
7219 +
7220 + /* prepare command */
7221 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_OFFLOAD,
7222 + cmd_flags,
7223 + token);
7224 + cmd_params = (struct dpni_cmd_get_offload *)cmd.params;
7225 + cmd_params->dpni_offload = type;
7226 +
7227 + /* send command to mc*/
7228 + err = mc_send_command(mc_io, &cmd);
7229 + if (err)
7230 + return err;
7231 +
7232 + /* retrieve response parameters */
7233 + rsp_params = (struct dpni_rsp_get_offload *)cmd.params;
7234 + *config = le32_to_cpu(rsp_params->config);
7235 +
7236 + return 0;
7237 +}
7238 +
7239 +/**
7240 + * dpni_get_qdid() - Get the Queuing Destination ID (QDID) that should be used
7241 + * for enqueue operations
7242 + * @mc_io: Pointer to MC portal's I/O object
7243 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7244 + * @token: Token of DPNI object
7245 + * @qtype: Type of queue to receive QDID for
7246 + * @qdid: Returned virtual QDID value that should be used as an argument
7247 + * in all enqueue operations
7248 + *
7249 + * Return: '0' on Success; Error code otherwise.
7250 + */
7251 +int dpni_get_qdid(struct fsl_mc_io *mc_io,
7252 + u32 cmd_flags,
7253 + u16 token,
7254 + enum dpni_queue_type qtype,
7255 + u16 *qdid)
7256 +{
7257 + struct mc_command cmd = { 0 };
7258 + struct dpni_cmd_get_qdid *cmd_params;
7259 + struct dpni_rsp_get_qdid *rsp_params;
7260 + int err;
7261 +
7262 + /* prepare command */
7263 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QDID,
7264 + cmd_flags,
7265 + token);
7266 + cmd_params = (struct dpni_cmd_get_qdid *)cmd.params;
7267 + cmd_params->qtype = qtype;
7268 +
7269 + /* send command to mc*/
7270 + err = mc_send_command(mc_io, &cmd);
7271 + if (err)
7272 + return err;
7273 +
7274 + /* retrieve response parameters */
7275 + rsp_params = (struct dpni_rsp_get_qdid *)cmd.params;
7276 + *qdid = le16_to_cpu(rsp_params->qdid);
7277 +
7278 + return 0;
7279 +}
7280 +
7281 +/**
7282 + * dpni_get_tx_data_offset() - Get the Tx data offset (from start of buffer)
7283 + * @mc_io: Pointer to MC portal's I/O object
7284 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7285 + * @token: Token of DPNI object
7286 + * @data_offset: Tx data offset (from start of buffer)
7287 + *
7288 + * Return: '0' on Success; Error code otherwise.
7289 + */
7290 +int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
7291 + u32 cmd_flags,
7292 + u16 token,
7293 + u16 *data_offset)
7294 +{
7295 + struct mc_command cmd = { 0 };
7296 + struct dpni_rsp_get_tx_data_offset *rsp_params;
7297 + int err;
7298 +
7299 + /* prepare command */
7300 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_DATA_OFFSET,
7301 + cmd_flags,
7302 + token);
7303 +
7304 + /* send command to mc*/
7305 + err = mc_send_command(mc_io, &cmd);
7306 + if (err)
7307 + return err;
7308 +
7309 + /* retrieve response parameters */
7310 + rsp_params = (struct dpni_rsp_get_tx_data_offset *)cmd.params;
7311 + *data_offset = le16_to_cpu(rsp_params->data_offset);
7312 +
7313 + return 0;
7314 +}
7315 +
7316 +/**
7317 + * dpni_set_link_cfg() - set the link configuration.
7318 + * @mc_io: Pointer to MC portal's I/O object
7319 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7320 + * @token: Token of DPNI object
7321 + * @cfg: Link configuration
7322 + *
7323 + * Return: '0' on Success; Error code otherwise.
7324 + */
7325 +int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
7326 + u32 cmd_flags,
7327 + u16 token,
7328 + const struct dpni_link_cfg *cfg)
7329 +{
7330 + struct mc_command cmd = { 0 };
7331 + struct dpni_cmd_set_link_cfg *cmd_params;
7332 +
7333 + /* prepare command */
7334 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG,
7335 + cmd_flags,
7336 + token);
7337 + cmd_params = (struct dpni_cmd_set_link_cfg *)cmd.params;
7338 + cmd_params->rate = cpu_to_le32(cfg->rate);
7339 + cmd_params->options = cpu_to_le64(cfg->options);
7340 +
7341 + /* send command to mc*/
7342 + return mc_send_command(mc_io, &cmd);
7343 +}
7344 +
7345 +/**
7346 + * dpni_get_link_state() - Return the link state (either up or down)
7347 + * @mc_io: Pointer to MC portal's I/O object
7348 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7349 + * @token: Token of DPNI object
7350 + * @state: Returned link state;
7351 + *
7352 + * Return: '0' on Success; Error code otherwise.
7353 + */
7354 +int dpni_get_link_state(struct fsl_mc_io *mc_io,
7355 + u32 cmd_flags,
7356 + u16 token,
7357 + struct dpni_link_state *state)
7358 +{
7359 + struct mc_command cmd = { 0 };
7360 + struct dpni_rsp_get_link_state *rsp_params;
7361 + int err;
7362 +
7363 + /* prepare command */
7364 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_STATE,
7365 + cmd_flags,
7366 + token);
7367 +
7368 + /* send command to mc*/
7369 + err = mc_send_command(mc_io, &cmd);
7370 + if (err)
7371 + return err;
7372 +
7373 + /* retrieve response parameters */
7374 + rsp_params = (struct dpni_rsp_get_link_state *)cmd.params;
7375 + state->up = dpni_get_field(rsp_params->flags, LINK_STATE);
7376 + state->rate = le32_to_cpu(rsp_params->rate);
7377 + state->options = le64_to_cpu(rsp_params->options);
7378 +
7379 + return 0;
7380 +}
7381 +
7382 +/**
7383 + * dpni_set_tx_shaping() - Set the transmit shaping
7384 + * @mc_io: Pointer to MC portal's I/O object
7385 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7386 + * @token: Token of DPNI object
7387 + * @tx_shaper: tx shaping configuration
7388 + *
7389 + * Return: '0' on Success; Error code otherwise.
7390 + */
7391 +int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
7392 + u32 cmd_flags,
7393 + u16 token,
7394 + const struct dpni_tx_shaping_cfg *tx_shaper)
7395 +{
7396 + struct mc_command cmd = { 0 };
7397 + struct dpni_cmd_set_tx_shaping *cmd_params;
7398 +
7399 + /* prepare command */
7400 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SHAPING,
7401 + cmd_flags,
7402 + token);
7403 + cmd_params = (struct dpni_cmd_set_tx_shaping *)cmd.params;
7404 + cmd_params->max_burst_size = cpu_to_le16(tx_shaper->max_burst_size);
7405 + cmd_params->rate_limit = cpu_to_le32(tx_shaper->rate_limit);
7406 +
7407 + /* send command to mc*/
7408 + return mc_send_command(mc_io, &cmd);
7409 +}
7410 +
7411 +/**
7412 + * dpni_set_max_frame_length() - Set the maximum received frame length.
7413 + * @mc_io: Pointer to MC portal's I/O object
7414 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7415 + * @token: Token of DPNI object
7416 + * @max_frame_length: Maximum received frame length (in
7417 + * bytes); frame is discarded if its
7418 + * length exceeds this value
7419 + *
7420 + * Return: '0' on Success; Error code otherwise.
7421 + */
7422 +int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
7423 + u32 cmd_flags,
7424 + u16 token,
7425 + u16 max_frame_length)
7426 +{
7427 + struct mc_command cmd = { 0 };
7428 + struct dpni_cmd_set_max_frame_length *cmd_params;
7429 +
7430 + /* prepare command */
7431 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MAX_FRAME_LENGTH,
7432 + cmd_flags,
7433 + token);
7434 + cmd_params = (struct dpni_cmd_set_max_frame_length *)cmd.params;
7435 + cmd_params->max_frame_length = cpu_to_le16(max_frame_length);
7436 +
7437 + /* send command to mc*/
7438 + return mc_send_command(mc_io, &cmd);
7439 +}
7440 +
7441 +/**
7442 + * dpni_get_max_frame_length() - Get the maximum received frame length.
7443 + * @mc_io: Pointer to MC portal's I/O object
7444 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7445 + * @token: Token of DPNI object
7446 + * @max_frame_length: Maximum received frame length (in
7447 + * bytes); frame is discarded if its
7448 + * length exceeds this value
7449 + *
7450 + * Return: '0' on Success; Error code otherwise.
7451 + */
7452 +int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
7453 + u32 cmd_flags,
7454 + u16 token,
7455 + u16 *max_frame_length)
7456 +{
7457 + struct mc_command cmd = { 0 };
7458 + struct dpni_rsp_get_max_frame_length *rsp_params;
7459 + int err;
7460 +
7461 + /* prepare command */
7462 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MAX_FRAME_LENGTH,
7463 + cmd_flags,
7464 + token);
7465 +
7466 + /* send command to mc*/
7467 + err = mc_send_command(mc_io, &cmd);
7468 + if (err)
7469 + return err;
7470 +
7471 + /* retrieve response parameters */
7472 + rsp_params = (struct dpni_rsp_get_max_frame_length *)cmd.params;
7473 + *max_frame_length = le16_to_cpu(rsp_params->max_frame_length);
7474 +
7475 + return 0;
7476 +}
7477 +
7478 +/**
7479 + * dpni_set_multicast_promisc() - Enable/disable multicast promiscuous mode
7480 + * @mc_io: Pointer to MC portal's I/O object
7481 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7482 + * @token: Token of DPNI object
7483 + * @en: Set to '1' to enable; '0' to disable
7484 + *
7485 + * Return: '0' on Success; Error code otherwise.
7486 + */
7487 +int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
7488 + u32 cmd_flags,
7489 + u16 token,
7490 + int en)
7491 +{
7492 + struct mc_command cmd = { 0 };
7493 + struct dpni_cmd_set_multicast_promisc *cmd_params;
7494 +
7495 + /* prepare command */
7496 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MCAST_PROMISC,
7497 + cmd_flags,
7498 + token);
7499 + cmd_params = (struct dpni_cmd_set_multicast_promisc *)cmd.params;
7500 + dpni_set_field(cmd_params->enable, ENABLE, en);
7501 +
7502 + /* send command to mc*/
7503 + return mc_send_command(mc_io, &cmd);
7504 +}
7505 +
7506 +/**
7507 + * dpni_get_multicast_promisc() - Get multicast promiscuous mode
7508 + * @mc_io: Pointer to MC portal's I/O object
7509 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7510 + * @token: Token of DPNI object
7511 + * @en: Returns '1' if enabled; '0' otherwise
7512 + *
7513 + * Return: '0' on Success; Error code otherwise.
7514 + */
7515 +int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
7516 + u32 cmd_flags,
7517 + u16 token,
7518 + int *en)
7519 +{
7520 + struct mc_command cmd = { 0 };
7521 + struct dpni_rsp_get_multicast_promisc *rsp_params;
7522 + int err;
7523 +
7524 + /* prepare command */
7525 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MCAST_PROMISC,
7526 + cmd_flags,
7527 + token);
7528 +
7529 + /* send command to mc*/
7530 + err = mc_send_command(mc_io, &cmd);
7531 + if (err)
7532 + return err;
7533 +
7534 + /* retrieve response parameters */
7535 + rsp_params = (struct dpni_rsp_get_multicast_promisc *)cmd.params;
7536 + *en = dpni_get_field(rsp_params->enabled, ENABLE);
7537 +
7538 + return 0;
7539 +}
7540 +
7541 +/**
7542 + * dpni_set_unicast_promisc() - Enable/disable unicast promiscuous mode
7543 + * @mc_io: Pointer to MC portal's I/O object
7544 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7545 + * @token: Token of DPNI object
7546 + * @en: Set to '1' to enable; '0' to disable
7547 + *
7548 + * Return: '0' on Success; Error code otherwise.
7549 + */
7550 +int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
7551 + u32 cmd_flags,
7552 + u16 token,
7553 + int en)
7554 +{
7555 + struct mc_command cmd = { 0 };
7556 + struct dpni_cmd_set_unicast_promisc *cmd_params;
7557 +
7558 + /* prepare command */
7559 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_UNICAST_PROMISC,
7560 + cmd_flags,
7561 + token);
7562 + cmd_params = (struct dpni_cmd_set_unicast_promisc *)cmd.params;
7563 + dpni_set_field(cmd_params->enable, ENABLE, en);
7564 +
7565 + /* send command to mc*/
7566 + return mc_send_command(mc_io, &cmd);
7567 +}
7568 +
7569 +/**
7570 + * dpni_get_unicast_promisc() - Get unicast promiscuous mode
7571 + * @mc_io: Pointer to MC portal's I/O object
7572 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7573 + * @token: Token of DPNI object
7574 + * @en: Returns '1' if enabled; '0' otherwise
7575 + *
7576 + * Return: '0' on Success; Error code otherwise.
7577 + */
7578 +int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
7579 + u32 cmd_flags,
7580 + u16 token,
7581 + int *en)
7582 +{
7583 + struct mc_command cmd = { 0 };
7584 + struct dpni_rsp_get_unicast_promisc *rsp_params;
7585 + int err;
7586 +
7587 + /* prepare command */
7588 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_UNICAST_PROMISC,
7589 + cmd_flags,
7590 + token);
7591 +
7592 + /* send command to mc*/
7593 + err = mc_send_command(mc_io, &cmd);
7594 + if (err)
7595 + return err;
7596 +
7597 + /* retrieve response parameters */
7598 + rsp_params = (struct dpni_rsp_get_unicast_promisc *)cmd.params;
7599 + *en = dpni_get_field(rsp_params->enabled, ENABLE);
7600 +
7601 + return 0;
7602 +}
7603 +
7604 +/**
7605 + * dpni_set_primary_mac_addr() - Set the primary MAC address
7606 + * @mc_io: Pointer to MC portal's I/O object
7607 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7608 + * @token: Token of DPNI object
7609 + * @mac_addr: MAC address to set as primary address
7610 + *
7611 + * Return: '0' on Success; Error code otherwise.
7612 + */
7613 +int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
7614 + u32 cmd_flags,
7615 + u16 token,
7616 + const u8 mac_addr[6])
7617 +{
7618 + struct mc_command cmd = { 0 };
7619 + struct dpni_cmd_set_primary_mac_addr *cmd_params;
7620 + int i;
7621 +
7622 + /* prepare command */
7623 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_PRIM_MAC,
7624 + cmd_flags,
7625 + token);
7626 + cmd_params = (struct dpni_cmd_set_primary_mac_addr *)cmd.params;
7627 + for (i = 0; i < 6; i++)
7628 + cmd_params->mac_addr[i] = mac_addr[5 - i];
7629 +
7630 + /* send command to mc*/
7631 + return mc_send_command(mc_io, &cmd);
7632 +}
7633 +
7634 +/**
7635 + * dpni_get_primary_mac_addr() - Get the primary MAC address
7636 + * @mc_io: Pointer to MC portal's I/O object
7637 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7638 + * @token: Token of DPNI object
7639 + * @mac_addr: Returned MAC address
7640 + *
7641 + * Return: '0' on Success; Error code otherwise.
7642 + */
7643 +int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
7644 + u32 cmd_flags,
7645 + u16 token,
7646 + u8 mac_addr[6])
7647 +{
7648 + struct mc_command cmd = { 0 };
7649 + struct dpni_rsp_get_primary_mac_addr *rsp_params;
7650 + int i, err;
7651 +
7652 + /* prepare command */
7653 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PRIM_MAC,
7654 + cmd_flags,
7655 + token);
7656 +
7657 + /* send command to mc*/
7658 + err = mc_send_command(mc_io, &cmd);
7659 + if (err)
7660 + return err;
7661 +
7662 + /* retrieve response parameters */
7663 + rsp_params = (struct dpni_rsp_get_primary_mac_addr *)cmd.params;
7664 + for (i = 0; i < 6; i++)
7665 + mac_addr[5 - i] = rsp_params->mac_addr[i];
7666 +
7667 + return 0;
7668 +}
7669 +
7670 +/**
7671 + * dpni_get_port_mac_addr() - Retrieve MAC address associated to the physical
7672 + * port the DPNI is attached to
7673 + * @mc_io: Pointer to MC portal's I/O object
7674 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7675 + * @token: Token of DPNI object
7676 + * @mac_addr: MAC address of the physical port, if any, otherwise 0
7677 + *
7678 + * The primary MAC address is not cleared by this operation.
7679 + *
7680 + * Return: '0' on Success; Error code otherwise.
7681 + */
7682 +int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
7683 + u32 cmd_flags,
7684 + u16 token,
7685 + u8 mac_addr[6])
7686 +{
7687 + struct mc_command cmd = { 0 };
7688 + struct dpni_rsp_get_port_mac_addr *rsp_params;
7689 + int i, err;
7690 +
7691 + /* prepare command */
7692 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PORT_MAC_ADDR,
7693 + cmd_flags,
7694 + token);
7695 +
7696 + /* send command to mc*/
7697 + err = mc_send_command(mc_io, &cmd);
7698 + if (err)
7699 + return err;
7700 +
7701 + /* retrieve response parameters */
7702 + rsp_params = (struct dpni_rsp_get_port_mac_addr *)cmd.params;
7703 + for (i = 0; i < 6; i++)
7704 + mac_addr[5 - i] = rsp_params->mac_addr[i];
7705 +
7706 + return 0;
7707 +}
7708 +
7709 +/**
7710 + * dpni_add_mac_addr() - Add MAC address filter
7711 + * @mc_io: Pointer to MC portal's I/O object
7712 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7713 + * @token: Token of DPNI object
7714 + * @mac_addr: MAC address to add
7715 + *
7716 + * Return: '0' on Success; Error code otherwise.
7717 + */
7718 +int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
7719 + u32 cmd_flags,
7720 + u16 token,
7721 + const u8 mac_addr[6])
7722 +{
7723 + struct mc_command cmd = { 0 };
7724 + struct dpni_cmd_add_mac_addr *cmd_params;
7725 + int i;
7726 +
7727 + /* prepare command */
7728 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_MAC_ADDR,
7729 + cmd_flags,
7730 + token);
7731 + cmd_params = (struct dpni_cmd_add_mac_addr *)cmd.params;
7732 + for (i = 0; i < 6; i++)
7733 + cmd_params->mac_addr[i] = mac_addr[5 - i];
7734 +
7735 + /* send command to mc*/
7736 + return mc_send_command(mc_io, &cmd);
7737 +}
7738 +
7739 +/**
7740 + * dpni_remove_mac_addr() - Remove MAC address filter
7741 + * @mc_io: Pointer to MC portal's I/O object
7742 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7743 + * @token: Token of DPNI object
7744 + * @mac_addr: MAC address to remove
7745 + *
7746 + * Return: '0' on Success; Error code otherwise.
7747 + */
7748 +int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
7749 + u32 cmd_flags,
7750 + u16 token,
7751 + const u8 mac_addr[6])
7752 +{
7753 + struct mc_command cmd = { 0 };
7754 + struct dpni_cmd_remove_mac_addr *cmd_params;
7755 + int i;
7756 +
7757 + /* prepare command */
7758 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_MAC_ADDR,
7759 + cmd_flags,
7760 + token);
7761 + cmd_params = (struct dpni_cmd_remove_mac_addr *)cmd.params;
7762 + for (i = 0; i < 6; i++)
7763 + cmd_params->mac_addr[i] = mac_addr[5 - i];
7764 +
7765 + /* send command to mc*/
7766 + return mc_send_command(mc_io, &cmd);
7767 +}
7768 +
7769 +/**
7770 + * dpni_clear_mac_filters() - Clear all unicast and/or multicast MAC filters
7771 + * @mc_io: Pointer to MC portal's I/O object
7772 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7773 + * @token: Token of DPNI object
7774 + * @unicast: Set to '1' to clear unicast addresses
7775 + * @multicast: Set to '1' to clear multicast addresses
7776 + *
7777 + * The primary MAC address is not cleared by this operation.
7778 + *
7779 + * Return: '0' on Success; Error code otherwise.
7780 + */
7781 +int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
7782 + u32 cmd_flags,
7783 + u16 token,
7784 + int unicast,
7785 + int multicast)
7786 +{
7787 + struct mc_command cmd = { 0 };
7788 + struct dpni_cmd_clear_mac_filters *cmd_params;
7789 +
7790 + /* prepare command */
7791 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_MAC_FILTERS,
7792 + cmd_flags,
7793 + token);
7794 + cmd_params = (struct dpni_cmd_clear_mac_filters *)cmd.params;
7795 + dpni_set_field(cmd_params->flags, UNICAST_FILTERS, unicast);
7796 + dpni_set_field(cmd_params->flags, MULTICAST_FILTERS, multicast);
7797 +
7798 + /* send command to mc*/
7799 + return mc_send_command(mc_io, &cmd);
7800 +}
7801 +
7802 +/**
7803 + * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration
7804 + * @mc_io: Pointer to MC portal's I/O object
7805 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7806 + * @token: Token of DPNI object
7807 + * @tc_id: Traffic class selection (0-7)
7808 + * @cfg: Traffic class distribution configuration
7809 + *
7810 + * warning: if 'dist_mode != DPNI_DIST_MODE_NONE', call dpni_prepare_key_cfg()
7811 + * first to prepare the key_cfg_iova parameter
7812 + *
7813 + * Return: '0' on Success; error code otherwise.
7814 + */
7815 +int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
7816 + u32 cmd_flags,
7817 + u16 token,
7818 + u8 tc_id,
7819 + const struct dpni_rx_tc_dist_cfg *cfg)
7820 +{
7821 + struct mc_command cmd = { 0 };
7822 + struct dpni_cmd_set_rx_tc_dist *cmd_params;
7823 +
7824 + /* prepare command */
7825 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_DIST,
7826 + cmd_flags,
7827 + token);
7828 + cmd_params = (struct dpni_cmd_set_rx_tc_dist *)cmd.params;
7829 + cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
7830 + cmd_params->tc_id = tc_id;
7831 + dpni_set_field(cmd_params->flags, DIST_MODE, cfg->dist_mode);
7832 + dpni_set_field(cmd_params->flags, MISS_ACTION, cfg->fs_cfg.miss_action);
7833 + cmd_params->default_flow_id = cpu_to_le16(cfg->fs_cfg.default_flow_id);
7834 + cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
7835 +
7836 + /* send command to mc*/
7837 + return mc_send_command(mc_io, &cmd);
7838 +}
7839 +
7840 +/**
7841 + * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class
7842 + * (to select a flow ID)
7843 + * @mc_io: Pointer to MC portal's I/O object
7844 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7845 + * @token: Token of DPNI object
7846 + * @tc_id: Traffic class selection (0-7)
7847 + * @index: Location in the QoS table where to insert the entry.
7848 + * Only relevant if MASKING is enabled for QoS
7849 + * classification on this DPNI, it is ignored for exact match.
7850 + * @cfg: Flow steering rule to add
7851 + * @action: Action to be taken as result of a classification hit
7852 + *
7853 + * Return: '0' on Success; Error code otherwise.
7854 + */
7855 +int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
7856 + u32 cmd_flags,
7857 + u16 token,
7858 + u8 tc_id,
7859 + u16 index,
7860 + const struct dpni_rule_cfg *cfg,
7861 + const struct dpni_fs_action_cfg *action)
7862 +{
7863 + struct dpni_cmd_add_fs_entry *cmd_params;
7864 + struct mc_command cmd = { 0 };
7865 +
7866 + /* prepare command */
7867 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT,
7868 + cmd_flags,
7869 + token);
7870 + cmd_params = (struct dpni_cmd_add_fs_entry *)cmd.params;
7871 + cmd_params->tc_id = tc_id;
7872 + cmd_params->key_size = cfg->key_size;
7873 + cmd_params->index = cpu_to_le16(index);
7874 + cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
7875 + cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
7876 + cmd_params->options = cpu_to_le16(action->options);
7877 + cmd_params->flow_id = cpu_to_le16(action->flow_id);
7878 + cmd_params->flc = cpu_to_le64(action->flc);
7879 +
7880 + /* send command to mc*/
7881 + return mc_send_command(mc_io, &cmd);
7882 +}
7883 +
7884 +/**
7885 + * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific
7886 + * traffic class
7887 + * @mc_io: Pointer to MC portal's I/O object
7888 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7889 + * @token: Token of DPNI object
7890 + * @tc_id: Traffic class selection (0-7)
7891 + * @cfg: Flow steering rule to remove
7892 + *
7893 + * Return: '0' on Success; Error code otherwise.
7894 + */
7895 +int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
7896 + u32 cmd_flags,
7897 + u16 token,
7898 + u8 tc_id,
7899 + const struct dpni_rule_cfg *cfg)
7900 +{
7901 + struct dpni_cmd_remove_fs_entry *cmd_params;
7902 + struct mc_command cmd = { 0 };
7903 +
7904 + /* prepare command */
7905 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT,
7906 + cmd_flags,
7907 + token);
7908 + cmd_params = (struct dpni_cmd_remove_fs_entry *)cmd.params;
7909 + cmd_params->tc_id = tc_id;
7910 + cmd_params->key_size = cfg->key_size;
7911 + cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
7912 + cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
7913 +
7914 + /* send command to mc*/
7915 + return mc_send_command(mc_io, &cmd);
7916 +}
7917 +
7918 +/**
7919 + * dpni_set_congestion_notification() - Set traffic class congestion
7920 + * notification configuration
7921 + * @mc_io: Pointer to MC portal's I/O object
7922 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7923 + * @token: Token of DPNI object
7924 + * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported
7925 + * @tc_id: Traffic class selection (0-7)
7926 + * @cfg: congestion notification configuration
7927 + *
7928 + * Return: '0' on Success; error code otherwise.
7929 + */
7930 +int dpni_set_congestion_notification(struct fsl_mc_io *mc_io,
7931 + u32 cmd_flags,
7932 + u16 token,
7933 + enum dpni_queue_type qtype,
7934 + u8 tc_id,
7935 + const struct dpni_congestion_notification_cfg *cfg)
7936 +{
7937 + struct dpni_cmd_set_congestion_notification *cmd_params;
7938 + struct mc_command cmd = { 0 };
7939 +
7940 + /* prepare command */
7941 + cmd.header = mc_encode_cmd_header(
7942 + DPNI_CMDID_SET_CONGESTION_NOTIFICATION,
7943 + cmd_flags,
7944 + token);
7945 + cmd_params = (struct dpni_cmd_set_congestion_notification *)cmd.params;
7946 + cmd_params->qtype = qtype;
7947 + cmd_params->tc = tc_id;
7948 + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
7949 + cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
7950 + cmd_params->dest_priority = cfg->dest_cfg.priority;
7951 + dpni_set_field(cmd_params->type_units, DEST_TYPE,
7952 + cfg->dest_cfg.dest_type);
7953 + dpni_set_field(cmd_params->type_units, CONG_UNITS, cfg->units);
7954 + cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
7955 + cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
7956 + cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
7957 + cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
7958 +
7959 + /* send command to mc*/
7960 + return mc_send_command(mc_io, &cmd);
7961 +}
7962 +
7963 +/**
7964 + * dpni_set_queue() - Set queue parameters
7965 + * @mc_io: Pointer to MC portal's I/O object
7966 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7967 + * @token: Token of DPNI object
7968 + * @qtype: Type of queue - all queue types are supported, although
7969 + * the command is ignored for Tx
7970 + * @tc: Traffic class, in range 0 to NUM_TCS - 1
7971 + * @index: Selects the specific queue out of the set allocated for the
7972 + * same TC. Value must be in range 0 to NUM_QUEUES - 1
7973 + * @options: A combination of DPNI_QUEUE_OPT_ values that control what
7974 + * configuration options are set on the queue
7975 + * @queue: Queue structure
7976 + *
7977 + * Return: '0' on Success; Error code otherwise.
7978 + */
7979 +int dpni_set_queue(struct fsl_mc_io *mc_io,
7980 + u32 cmd_flags,
7981 + u16 token,
7982 + enum dpni_queue_type qtype,
7983 + u8 tc,
7984 + u8 index,
7985 + u8 options,
7986 + const struct dpni_queue *queue)
7987 +{
7988 + struct mc_command cmd = { 0 };
7989 + struct dpni_cmd_set_queue *cmd_params;
7990 +
7991 + /* prepare command */
7992 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QUEUE,
7993 + cmd_flags,
7994 + token);
7995 + cmd_params = (struct dpni_cmd_set_queue *)cmd.params;
7996 + cmd_params->qtype = qtype;
7997 + cmd_params->tc = tc;
7998 + cmd_params->index = index;
7999 + cmd_params->options = options;
8000 + cmd_params->dest_id = cpu_to_le32(queue->destination.id);
8001 + cmd_params->dest_prio = queue->destination.priority;
8002 + dpni_set_field(cmd_params->flags, DEST_TYPE, queue->destination.type);
8003 + dpni_set_field(cmd_params->flags, STASH_CTRL, queue->flc.stash_control);
8004 + dpni_set_field(cmd_params->flags, HOLD_ACTIVE,
8005 + queue->destination.hold_active);
8006 + cmd_params->flc = cpu_to_le64(queue->flc.value);
8007 + cmd_params->user_context = cpu_to_le64(queue->user_context);
8008 +
8009 + /* send command to mc */
8010 + return mc_send_command(mc_io, &cmd);
8011 +}
8012 +
8013 +/**
8014 + * dpni_get_queue() - Get queue parameters
8015 + * @mc_io: Pointer to MC portal's I/O object
8016 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8017 + * @token: Token of DPNI object
8018 + * @qtype: Type of queue - all queue types are supported
8019 + * @tc: Traffic class, in range 0 to NUM_TCS - 1
8020 + * @index: Selects the specific queue out of the set allocated for the
8021 + * same TC. Value must be in range 0 to NUM_QUEUES - 1
8022 + * @queue: Queue configuration structure
8023 + * @qid: Queue identification
8024 + *
8025 + * Return: '0' on Success; Error code otherwise.
8026 + */
8027 +int dpni_get_queue(struct fsl_mc_io *mc_io,
8028 + u32 cmd_flags,
8029 + u16 token,
8030 + enum dpni_queue_type qtype,
8031 + u8 tc,
8032 + u8 index,
8033 + struct dpni_queue *queue,
8034 + struct dpni_queue_id *qid)
8035 +{
8036 + struct mc_command cmd = { 0 };
8037 + struct dpni_cmd_get_queue *cmd_params;
8038 + struct dpni_rsp_get_queue *rsp_params;
8039 + int err;
8040 +
8041 + /* prepare command */
8042 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QUEUE,
8043 + cmd_flags,
8044 + token);
8045 + cmd_params = (struct dpni_cmd_get_queue *)cmd.params;
8046 + cmd_params->qtype = qtype;
8047 + cmd_params->tc = tc;
8048 + cmd_params->index = index;
8049 +
8050 + /* send command to mc */
8051 + err = mc_send_command(mc_io, &cmd);
8052 + if (err)
8053 + return err;
8054 +
8055 + /* retrieve response parameters */
8056 + rsp_params = (struct dpni_rsp_get_queue *)cmd.params;
8057 + queue->destination.id = le32_to_cpu(rsp_params->dest_id);
8058 + queue->destination.priority = rsp_params->dest_prio;
8059 + queue->destination.type = dpni_get_field(rsp_params->flags,
8060 + DEST_TYPE);
8061 + queue->flc.stash_control = dpni_get_field(rsp_params->flags,
8062 + STASH_CTRL);
8063 + queue->destination.hold_active = dpni_get_field(rsp_params->flags,
8064 + HOLD_ACTIVE);
8065 + queue->flc.value = le64_to_cpu(rsp_params->flc);
8066 + queue->user_context = le64_to_cpu(rsp_params->user_context);
8067 + qid->fqid = le32_to_cpu(rsp_params->fqid);
8068 + qid->qdbin = le16_to_cpu(rsp_params->qdbin);
8069 +
8070 + return 0;
8071 +}
8072 +
8073 +/**
8074 + * dpni_get_statistics() - Get DPNI statistics
8075 + * @mc_io: Pointer to MC portal's I/O object
8076 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8077 + * @token: Token of DPNI object
8078 + * @page: Selects the statistics page to retrieve, see
8079 + * DPNI_GET_STATISTICS output. Pages are numbered 0 to 2.
8080 + * @stat: Structure containing the statistics
8081 + *
8082 + * Return: '0' on Success; Error code otherwise.
8083 + */
8084 +int dpni_get_statistics(struct fsl_mc_io *mc_io,
8085 + u32 cmd_flags,
8086 + u16 token,
8087 + u8 page,
8088 + union dpni_statistics *stat)
8089 +{
8090 + struct mc_command cmd = { 0 };
8091 + struct dpni_cmd_get_statistics *cmd_params;
8092 + struct dpni_rsp_get_statistics *rsp_params;
8093 + int i, err;
8094 +
8095 + /* prepare command */
8096 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_STATISTICS,
8097 + cmd_flags,
8098 + token);
8099 + cmd_params = (struct dpni_cmd_get_statistics *)cmd.params;
8100 + cmd_params->page_number = page;
8101 +
8102 + /* send command to mc */
8103 + err = mc_send_command(mc_io, &cmd);
8104 + if (err)
8105 + return err;
8106 +
8107 + /* retrieve response parameters */
8108 + rsp_params = (struct dpni_rsp_get_statistics *)cmd.params;
8109 + for (i = 0; i < DPNI_STATISTICS_CNT; i++)
8110 + stat->raw.counter[i] = le64_to_cpu(rsp_params->counter[i]);
8111 +
8112 + return 0;
8113 +}
8114 +
8115 +/**
8116 + * dpni_reset_statistics() - Clears DPNI statistics
8117 + * @mc_io: Pointer to MC portal's I/O object
8118 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8119 + * @token: Token of DPNI object
8120 + *
8121 + * Return: '0' on Success; Error code otherwise.
8122 + */
8123 +int dpni_reset_statistics(struct fsl_mc_io *mc_io,
8124 + u32 cmd_flags,
8125 + u16 token)
8126 +{
8127 + struct mc_command cmd = { 0 };
8128 +
8129 + /* prepare command */
8130 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET_STATISTICS,
8131 + cmd_flags,
8132 + token);
8133 +
8134 + /* send command to mc*/
8135 + return mc_send_command(mc_io, &cmd);
8136 +}
8137 +
8138 +/**
8139 + * dpni_set_taildrop() - Set taildrop per queue or TC
8140 + * @mc_io: Pointer to MC portal's I/O object
8141 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8142 + * @token: Token of DPNI object
8143 + * @cg_point: Congestion point
8144 + * @q_type: Queue type on which the taildrop is configured.
8145 + * Only Rx queues are supported for now
8146 + * @tc: Traffic class to apply this taildrop to
8147 + * @q_index: Index of the queue if the DPNI supports multiple queues for
8148 + * traffic distribution. Ignored if CONGESTION_POINT is not 0.
8149 + * @taildrop: Taildrop structure
8150 + *
8151 + * Return: '0' on Success; Error code otherwise.
8152 + */
8153 +int dpni_set_taildrop(struct fsl_mc_io *mc_io,
8154 + u32 cmd_flags,
8155 + u16 token,
8156 + enum dpni_congestion_point cg_point,
8157 + enum dpni_queue_type qtype,
8158 + u8 tc,
8159 + u8 index,
8160 + struct dpni_taildrop *taildrop)
8161 +{
8162 + struct mc_command cmd = { 0 };
8163 + struct dpni_cmd_set_taildrop *cmd_params;
8164 +
8165 + /* prepare command */
8166 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TAILDROP,
8167 + cmd_flags,
8168 + token);
8169 + cmd_params = (struct dpni_cmd_set_taildrop *)cmd.params;
8170 + cmd_params->congestion_point = cg_point;
8171 + cmd_params->qtype = qtype;
8172 + cmd_params->tc = tc;
8173 + cmd_params->index = index;
8174 + dpni_set_field(cmd_params->enable, ENABLE, taildrop->enable);
8175 + cmd_params->units = taildrop->units;
8176 + cmd_params->threshold = cpu_to_le32(taildrop->threshold);
8177 +
8178 + /* send command to mc */
8179 + return mc_send_command(mc_io, &cmd);
8180 +}
8181 +
8182 +/**
8183 + * dpni_get_taildrop() - Get taildrop information
8184 + * @mc_io: Pointer to MC portal's I/O object
8185 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8186 + * @token: Token of DPNI object
8187 + * @cg_point: Congestion point
8188 + * @q_type: Queue type on which the taildrop is configured.
8189 + * Only Rx queues are supported for now
8190 + * @tc: Traffic class to apply this taildrop to
8191 + * @q_index: Index of the queue if the DPNI supports multiple queues for
8192 + * traffic distribution. Ignored if CONGESTION_POINT is not 0.
8193 + * @taildrop: Taildrop structure
8194 + *
8195 + * Return: '0' on Success; Error code otherwise.
8196 + */
8197 +int dpni_get_taildrop(struct fsl_mc_io *mc_io,
8198 + u32 cmd_flags,
8199 + u16 token,
8200 + enum dpni_congestion_point cg_point,
8201 + enum dpni_queue_type qtype,
8202 + u8 tc,
8203 + u8 index,
8204 + struct dpni_taildrop *taildrop)
8205 +{
8206 + struct mc_command cmd = { 0 };
8207 + struct dpni_cmd_get_taildrop *cmd_params;
8208 + struct dpni_rsp_get_taildrop *rsp_params;
8209 + int err;
8210 +
8211 + /* prepare command */
8212 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TAILDROP,
8213 + cmd_flags,
8214 + token);
8215 + cmd_params = (struct dpni_cmd_get_taildrop *)cmd.params;
8216 + cmd_params->congestion_point = cg_point;
8217 + cmd_params->qtype = qtype;
8218 + cmd_params->tc = tc;
8219 + cmd_params->index = index;
8220 +
8221 + /* send command to mc */
8222 + err = mc_send_command(mc_io, &cmd);
8223 + if (err)
8224 + return err;
8225 +
8226 + /* retrieve response parameters */
8227 + rsp_params = (struct dpni_rsp_get_taildrop *)cmd.params;
8228 + taildrop->enable = dpni_get_field(rsp_params->enable, ENABLE);
8229 + taildrop->units = rsp_params->units;
8230 + taildrop->threshold = le32_to_cpu(rsp_params->threshold);
8231 +
8232 + return 0;
8233 +}
8234 --- /dev/null
8235 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.h
8236 @@ -0,0 +1,989 @@
8237 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
8238 + * Copyright 2016 NXP
8239 + *
8240 + * Redistribution and use in source and binary forms, with or without
8241 + * modification, are permitted provided that the following conditions are met:
8242 + * * Redistributions of source code must retain the above copyright
8243 + * notice, this list of conditions and the following disclaimer.
8244 + * * Redistributions in binary form must reproduce the above copyright
8245 + * notice, this list of conditions and the following disclaimer in the
8246 + * documentation and/or other materials provided with the distribution.
8247 + * * Neither the name of the above-listed copyright holders nor the
8248 + * names of any contributors may be used to endorse or promote products
8249 + * derived from this software without specific prior written permission.
8250 + *
8251 + *
8252 + * ALTERNATIVELY, this software may be distributed under the terms of the
8253 + * GNU General Public License ("GPL") as published by the Free Software
8254 + * Foundation, either version 2 of that License or (at your option) any
8255 + * later version.
8256 + *
8257 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
8258 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
8259 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
8260 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
8261 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
8262 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
8263 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
8264 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
8265 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
8266 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
8267 + * POSSIBILITY OF SUCH DAMAGE.
8268 + */
8269 +#ifndef __FSL_DPNI_H
8270 +#define __FSL_DPNI_H
8271 +
8272 +#include "dpkg.h"
8273 +
8274 +struct fsl_mc_io;
8275 +
8276 +/**
8277 + * Data Path Network Interface API
8278 + * Contains initialization APIs and runtime control APIs for DPNI
8279 + */
8280 +
8281 +/** General DPNI macros */
8282 +
8283 +/**
8284 + * Maximum number of traffic classes
8285 + */
8286 +#define DPNI_MAX_TC 8
8287 +/**
8288 + * Maximum number of buffer pools per DPNI
8289 + */
8290 +#define DPNI_MAX_DPBP 8
8291 +
8292 +/**
8293 + * All traffic classes considered; see dpni_set_queue()
8294 + */
8295 +#define DPNI_ALL_TCS (u8)(-1)
8296 +/**
8297 + * All flows within traffic class considered; see dpni_set_queue()
8298 + */
8299 +#define DPNI_ALL_TC_FLOWS (u16)(-1)
8300 +/**
8301 + * Generate new flow ID; see dpni_set_queue()
8302 + */
8303 +#define DPNI_NEW_FLOW_ID (u16)(-1)
8304 +
8305 +/**
8306 + * Tx traffic is always released to a buffer pool on transmit, there are no
8307 + * resources allocated to have the frames confirmed back to the source after
8308 + * transmission.
8309 + */
8310 +#define DPNI_OPT_TX_FRM_RELEASE 0x000001
8311 +/**
8312 + * Disables support for MAC address filtering for addresses other than primary
8313 + * MAC address. This affects both unicast and multicast. Promiscuous mode can
8314 + * still be enabled/disabled for both unicast and multicast. If promiscuous mode
8315 + * is disabled, only traffic matching the primary MAC address will be accepted.
8316 + */
8317 +#define DPNI_OPT_NO_MAC_FILTER 0x000002
8318 +/**
8319 + * Allocate policers for this DPNI. They can be used to rate-limit traffic per
8320 + * traffic class (TC) basis.
8321 + */
8322 +#define DPNI_OPT_HAS_POLICING 0x000004
8323 +/**
8324 + * Congestion can be managed in several ways, allowing the buffer pool to
8325 + * deplete on ingress, taildrop on each queue or use congestion groups for sets
8326 + * of queues. If set, it configures a single congestion groups across all TCs.
8327 + * If reset, a congestion group is allocated for each TC. Only relevant if the
8328 + * DPNI has multiple traffic classes.
8329 + */
8330 +#define DPNI_OPT_SHARED_CONGESTION 0x000008
8331 +/**
8332 + * Enables TCAM for Flow Steering and QoS look-ups. If not specified, all
8333 + * look-ups are exact match. Note that TCAM is not available on LS1088 and its
8334 + * variants. Setting this bit on these SoCs will trigger an error.
8335 + */
8336 +#define DPNI_OPT_HAS_KEY_MASKING 0x000010
8337 +/**
8338 + * Disables the flow steering table.
8339 + */
8340 +#define DPNI_OPT_NO_FS 0x000020
8341 +
8342 +int dpni_open(struct fsl_mc_io *mc_io,
8343 + u32 cmd_flags,
8344 + int dpni_id,
8345 + u16 *token);
8346 +
8347 +int dpni_close(struct fsl_mc_io *mc_io,
8348 + u32 cmd_flags,
8349 + u16 token);
8350 +
8351 +/**
8352 + * struct dpni_pools_cfg - Structure representing buffer pools configuration
8353 + * @num_dpbp: Number of DPBPs
8354 + * @pools: Array of buffer pools parameters; The number of valid entries
8355 + * must match 'num_dpbp' value
8356 + */
8357 +struct dpni_pools_cfg {
8358 + u8 num_dpbp;
8359 + /**
8360 + * struct pools - Buffer pools parameters
8361 + * @dpbp_id: DPBP object ID
8362 + * @buffer_size: Buffer size
8363 + * @backup_pool: Backup pool
8364 + */
8365 + struct {
8366 + int dpbp_id;
8367 + u16 buffer_size;
8368 + int backup_pool;
8369 + } pools[DPNI_MAX_DPBP];
8370 +};
8371 +
8372 +int dpni_set_pools(struct fsl_mc_io *mc_io,
8373 + u32 cmd_flags,
8374 + u16 token,
8375 + const struct dpni_pools_cfg *cfg);
8376 +
8377 +int dpni_enable(struct fsl_mc_io *mc_io,
8378 + u32 cmd_flags,
8379 + u16 token);
8380 +
8381 +int dpni_disable(struct fsl_mc_io *mc_io,
8382 + u32 cmd_flags,
8383 + u16 token);
8384 +
8385 +int dpni_is_enabled(struct fsl_mc_io *mc_io,
8386 + u32 cmd_flags,
8387 + u16 token,
8388 + int *en);
8389 +
8390 +int dpni_reset(struct fsl_mc_io *mc_io,
8391 + u32 cmd_flags,
8392 + u16 token);
8393 +
8394 +/**
8395 + * DPNI IRQ Index and Events
8396 + */
8397 +
8398 +/**
8399 + * IRQ index
8400 + */
8401 +#define DPNI_IRQ_INDEX 0
8402 +/**
8403 + * IRQ event - indicates a change in link state
8404 + */
8405 +#define DPNI_IRQ_EVENT_LINK_CHANGED 0x00000001
8406 +
8407 +int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
8408 + u32 cmd_flags,
8409 + u16 token,
8410 + u8 irq_index,
8411 + u8 en);
8412 +
8413 +int dpni_get_irq_enable(struct fsl_mc_io *mc_io,
8414 + u32 cmd_flags,
8415 + u16 token,
8416 + u8 irq_index,
8417 + u8 *en);
8418 +
8419 +int dpni_set_irq_mask(struct fsl_mc_io *mc_io,
8420 + u32 cmd_flags,
8421 + u16 token,
8422 + u8 irq_index,
8423 + u32 mask);
8424 +
8425 +int dpni_get_irq_mask(struct fsl_mc_io *mc_io,
8426 + u32 cmd_flags,
8427 + u16 token,
8428 + u8 irq_index,
8429 + u32 *mask);
8430 +
8431 +int dpni_get_irq_status(struct fsl_mc_io *mc_io,
8432 + u32 cmd_flags,
8433 + u16 token,
8434 + u8 irq_index,
8435 + u32 *status);
8436 +
8437 +int dpni_clear_irq_status(struct fsl_mc_io *mc_io,
8438 + u32 cmd_flags,
8439 + u16 token,
8440 + u8 irq_index,
8441 + u32 status);
8442 +
8443 +/**
8444 + * struct dpni_attr - Structure representing DPNI attributes
8445 + * @options: Any combination of the following options:
8446 + * DPNI_OPT_TX_FRM_RELEASE
8447 + * DPNI_OPT_NO_MAC_FILTER
8448 + * DPNI_OPT_HAS_POLICING
8449 + * DPNI_OPT_SHARED_CONGESTION
8450 + * DPNI_OPT_HAS_KEY_MASKING
8451 + * DPNI_OPT_NO_FS
8452 + * @num_queues: Number of Tx and Rx queues used for traffic distribution.
8453 + * @num_tcs: Number of traffic classes (TCs), reserved for the DPNI.
8454 + * @mac_filter_entries: Number of entries in the MAC address filtering table.
8455 + * @vlan_filter_entries: Number of entries in the VLAN address filtering table.
8456 + * @qos_entries: Number of entries in the QoS classification table.
8457 + * @fs_entries: Number of entries in the flow steering table.
8458 + * @qos_key_size: Size, in bytes, of the QoS look-up key. Defining a key larger
8459 + * than this when adding QoS entries will result in an error.
8460 + * @fs_key_size: Size, in bytes, of the flow steering look-up key. Defining a
8461 + * key larger than this when composing the hash + FS key will
8462 + * result in an error.
8463 + * @wriop_version: Version of WRIOP HW block. The 3 version values are stored
8464 + * on 6, 5, 5 bits respectively.
8465 + */
8466 +struct dpni_attr {
8467 + u32 options;
8468 + u8 num_queues;
8469 + u8 num_tcs;
8470 + u8 mac_filter_entries;
8471 + u8 vlan_filter_entries;
8472 + u8 qos_entries;
8473 + u16 fs_entries;
8474 + u8 qos_key_size;
8475 + u8 fs_key_size;
8476 + u16 wriop_version;
8477 +};
8478 +
8479 +int dpni_get_attributes(struct fsl_mc_io *mc_io,
8480 + u32 cmd_flags,
8481 + u16 token,
8482 + struct dpni_attr *attr);
8483 +
8484 +/**
8485 + * DPNI errors
8486 + */
8487 +
8488 +/**
8489 + * Extract out of frame header error
8490 + */
8491 +#define DPNI_ERROR_EOFHE 0x00020000
8492 +/**
8493 + * Frame length error
8494 + */
8495 +#define DPNI_ERROR_FLE 0x00002000
8496 +/**
8497 + * Frame physical error
8498 + */
8499 +#define DPNI_ERROR_FPE 0x00001000
8500 +/**
8501 + * Parsing header error
8502 + */
8503 +#define DPNI_ERROR_PHE 0x00000020
8504 +/**
8505 + * Parser L3 checksum error
8506 + */
8507 +#define DPNI_ERROR_L3CE 0x00000004
8508 +/**
8509 + * Parser L3 checksum error
8510 + */
8511 +#define DPNI_ERROR_L4CE 0x00000001
8512 +
8513 +/**
8514 + * enum dpni_error_action - Defines DPNI behavior for errors
8515 + * @DPNI_ERROR_ACTION_DISCARD: Discard the frame
8516 + * @DPNI_ERROR_ACTION_CONTINUE: Continue with the normal flow
8517 + * @DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE: Send the frame to the error queue
8518 + */
8519 +enum dpni_error_action {
8520 + DPNI_ERROR_ACTION_DISCARD = 0,
8521 + DPNI_ERROR_ACTION_CONTINUE = 1,
8522 + DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE = 2
8523 +};
8524 +
8525 +/**
8526 + * struct dpni_error_cfg - Structure representing DPNI errors treatment
8527 + * @errors: Errors mask; use 'DPNI_ERROR__<X>
8528 + * @error_action: The desired action for the errors mask
8529 + * @set_frame_annotation: Set to '1' to mark the errors in frame annotation
8530 + * status (FAS); relevant only for the non-discard action
8531 + */
8532 +struct dpni_error_cfg {
8533 + u32 errors;
8534 + enum dpni_error_action error_action;
8535 + int set_frame_annotation;
8536 +};
8537 +
8538 +int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
8539 + u32 cmd_flags,
8540 + u16 token,
8541 + struct dpni_error_cfg *cfg);
8542 +
8543 +/**
8544 + * DPNI buffer layout modification options
8545 + */
8546 +
8547 +/**
8548 + * Select to modify the time-stamp setting
8549 + */
8550 +#define DPNI_BUF_LAYOUT_OPT_TIMESTAMP 0x00000001
8551 +/**
8552 + * Select to modify the parser-result setting; not applicable for Tx
8553 + */
8554 +#define DPNI_BUF_LAYOUT_OPT_PARSER_RESULT 0x00000002
8555 +/**
8556 + * Select to modify the frame-status setting
8557 + */
8558 +#define DPNI_BUF_LAYOUT_OPT_FRAME_STATUS 0x00000004
8559 +/**
8560 + * Select to modify the private-data-size setting
8561 + */
8562 +#define DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE 0x00000008
8563 +/**
8564 + * Select to modify the data-alignment setting
8565 + */
8566 +#define DPNI_BUF_LAYOUT_OPT_DATA_ALIGN 0x00000010
8567 +/**
8568 + * Select to modify the data-head-room setting
8569 + */
8570 +#define DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM 0x00000020
8571 +/**
8572 + * Select to modify the data-tail-room setting
8573 + */
8574 +#define DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM 0x00000040
8575 +
8576 +/**
8577 + * struct dpni_buffer_layout - Structure representing DPNI buffer layout
8578 + * @options: Flags representing the suggested modifications to the buffer
8579 + * layout; Use any combination of 'DPNI_BUF_LAYOUT_OPT_<X>' flags
8580 + * @pass_timestamp: Pass timestamp value
8581 + * @pass_parser_result: Pass parser results
8582 + * @pass_frame_status: Pass frame status
8583 + * @private_data_size: Size kept for private data (in bytes)
8584 + * @data_align: Data alignment
8585 + * @data_head_room: Data head room
8586 + * @data_tail_room: Data tail room
8587 + */
8588 +struct dpni_buffer_layout {
8589 + u32 options;
8590 + int pass_timestamp;
8591 + int pass_parser_result;
8592 + int pass_frame_status;
8593 + u16 private_data_size;
8594 + u16 data_align;
8595 + u16 data_head_room;
8596 + u16 data_tail_room;
8597 +};
8598 +
8599 +/**
8600 + * enum dpni_queue_type - Identifies a type of queue targeted by the command
8601 + * @DPNI_QUEUE_RX: Rx queue
8602 + * @DPNI_QUEUE_TX: Tx queue
8603 + * @DPNI_QUEUE_TX_CONFIRM: Tx confirmation queue
8604 + * @DPNI_QUEUE_RX_ERR: Rx error queue
8605 + */enum dpni_queue_type {
8606 + DPNI_QUEUE_RX,
8607 + DPNI_QUEUE_TX,
8608 + DPNI_QUEUE_TX_CONFIRM,
8609 + DPNI_QUEUE_RX_ERR,
8610 +};
8611 +
8612 +int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
8613 + u32 cmd_flags,
8614 + u16 token,
8615 + enum dpni_queue_type qtype,
8616 + struct dpni_buffer_layout *layout);
8617 +
8618 +int dpni_set_buffer_layout(struct fsl_mc_io *mc_io,
8619 + u32 cmd_flags,
8620 + u16 token,
8621 + enum dpni_queue_type qtype,
8622 + const struct dpni_buffer_layout *layout);
8623 +
8624 +/**
8625 + * enum dpni_offload - Identifies a type of offload targeted by the command
8626 + * @DPNI_OFF_RX_L3_CSUM: Rx L3 checksum validation
8627 + * @DPNI_OFF_RX_L4_CSUM: Rx L4 checksum validation
8628 + * @DPNI_OFF_TX_L3_CSUM: Tx L3 checksum generation
8629 + * @DPNI_OFF_TX_L4_CSUM: Tx L4 checksum generation
8630 + */
8631 +enum dpni_offload {
8632 + DPNI_OFF_RX_L3_CSUM,
8633 + DPNI_OFF_RX_L4_CSUM,
8634 + DPNI_OFF_TX_L3_CSUM,
8635 + DPNI_OFF_TX_L4_CSUM,
8636 +};
8637 +
8638 +int dpni_set_offload(struct fsl_mc_io *mc_io,
8639 + u32 cmd_flags,
8640 + u16 token,
8641 + enum dpni_offload type,
8642 + u32 config);
8643 +
8644 +int dpni_get_offload(struct fsl_mc_io *mc_io,
8645 + u32 cmd_flags,
8646 + u16 token,
8647 + enum dpni_offload type,
8648 + u32 *config);
8649 +
8650 +int dpni_get_qdid(struct fsl_mc_io *mc_io,
8651 + u32 cmd_flags,
8652 + u16 token,
8653 + enum dpni_queue_type qtype,
8654 + u16 *qdid);
8655 +
8656 +int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
8657 + u32 cmd_flags,
8658 + u16 token,
8659 + u16 *data_offset);
8660 +
8661 +#define DPNI_STATISTICS_CNT 7
8662 +
8663 +union dpni_statistics {
8664 + /**
8665 + * struct page_0 - Page_0 statistics structure
8666 + * @ingress_all_frames: Ingress frame count
8667 + * @ingress_all_bytes: Ingress byte count
8668 + * @ingress_multicast_frames: Ingress multicast frame count
8669 + * @ingress_multicast_bytes: Ingress multicast byte count
8670 + * @ingress_broadcast_frames: Ingress broadcast frame count
8671 + * @ingress_broadcast_bytes: Ingress broadcast byte count
8672 + */
8673 + struct {
8674 + u64 ingress_all_frames;
8675 + u64 ingress_all_bytes;
8676 + u64 ingress_multicast_frames;
8677 + u64 ingress_multicast_bytes;
8678 + u64 ingress_broadcast_frames;
8679 + u64 ingress_broadcast_bytes;
8680 + } page_0;
8681 + /**
8682 + * struct page_1 - Page_1 statistics structure
8683 + * @egress_all_frames: Egress frame count
8684 + * @egress_all_bytes: Egress byte count
8685 + * @egress_multicast_frames: Egress multicast frame count
8686 + * @egress_multicast_bytes: Egress multicast byte count
8687 + * @egress_broadcast_frames: Egress broadcast frame count
8688 + * @egress_broadcast_bytes: Egress broadcast byte count
8689 + */
8690 + struct {
8691 + u64 egress_all_frames;
8692 + u64 egress_all_bytes;
8693 + u64 egress_multicast_frames;
8694 + u64 egress_multicast_bytes;
8695 + u64 egress_broadcast_frames;
8696 + u64 egress_broadcast_bytes;
8697 + } page_1;
8698 + /**
8699 + * struct page_2 - Page_2 statistics structure
8700 + * @ingress_filtered_frames: Ingress filtered frame count
8701 + * @ingress_discarded_frames: Ingress discarded frame count
8702 + * @ingress_nobuffer_discards: Ingress discarded frame count
8703 + * due to lack of buffers
8704 + * @egress_discarded_frames: Egress discarded frame count
8705 + * @egress_confirmed_frames: Egress confirmed frame count
8706 + */
8707 + struct {
8708 + u64 ingress_filtered_frames;
8709 + u64 ingress_discarded_frames;
8710 + u64 ingress_nobuffer_discards;
8711 + u64 egress_discarded_frames;
8712 + u64 egress_confirmed_frames;
8713 + } page_2;
8714 + /**
8715 + * struct raw - raw statistics structure
8716 + */
8717 + struct {
8718 + u64 counter[DPNI_STATISTICS_CNT];
8719 + } raw;
8720 +};
8721 +
8722 +int dpni_get_statistics(struct fsl_mc_io *mc_io,
8723 + u32 cmd_flags,
8724 + u16 token,
8725 + u8 page,
8726 + union dpni_statistics *stat);
8727 +
8728 +int dpni_reset_statistics(struct fsl_mc_io *mc_io,
8729 + u32 cmd_flags,
8730 + u16 token);
8731 +
8732 +/**
8733 + * Enable auto-negotiation
8734 + */
8735 +#define DPNI_LINK_OPT_AUTONEG 0x0000000000000001ULL
8736 +/**
8737 + * Enable half-duplex mode
8738 + */
8739 +#define DPNI_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
8740 +/**
8741 + * Enable pause frames
8742 + */
8743 +#define DPNI_LINK_OPT_PAUSE 0x0000000000000004ULL
8744 +/**
8745 + * Enable a-symmetric pause frames
8746 + */
8747 +#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
8748 +
8749 +/**
8750 + * struct - Structure representing DPNI link configuration
8751 + * @rate: Rate
8752 + * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
8753 + */
8754 +struct dpni_link_cfg {
8755 + u32 rate;
8756 + u64 options;
8757 +};
8758 +
8759 +int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
8760 + u32 cmd_flags,
8761 + u16 token,
8762 + const struct dpni_link_cfg *cfg);
8763 +
8764 +/**
8765 + * struct dpni_link_state - Structure representing DPNI link state
8766 + * @rate: Rate
8767 + * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
8768 + * @up: Link state; '0' for down, '1' for up
8769 + */
8770 +struct dpni_link_state {
8771 + u32 rate;
8772 + u64 options;
8773 + int up;
8774 +};
8775 +
8776 +int dpni_get_link_state(struct fsl_mc_io *mc_io,
8777 + u32 cmd_flags,
8778 + u16 token,
8779 + struct dpni_link_state *state);
8780 +
8781 +/**
8782 + * struct dpni_tx_shaping - Structure representing DPNI tx shaping configuration
8783 + * @rate_limit: rate in Mbps
8784 + * @max_burst_size: burst size in bytes (up to 64KB)
8785 + */
8786 +struct dpni_tx_shaping_cfg {
8787 + u32 rate_limit;
8788 + u16 max_burst_size;
8789 +};
8790 +
8791 +int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
8792 + u32 cmd_flags,
8793 + u16 token,
8794 + const struct dpni_tx_shaping_cfg *tx_shaper);
8795 +
8796 +int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
8797 + u32 cmd_flags,
8798 + u16 token,
8799 + u16 max_frame_length);
8800 +
8801 +int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
8802 + u32 cmd_flags,
8803 + u16 token,
8804 + u16 *max_frame_length);
8805 +
8806 +int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
8807 + u32 cmd_flags,
8808 + u16 token,
8809 + int en);
8810 +
8811 +int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
8812 + u32 cmd_flags,
8813 + u16 token,
8814 + int *en);
8815 +
8816 +int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
8817 + u32 cmd_flags,
8818 + u16 token,
8819 + int en);
8820 +
8821 +int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
8822 + u32 cmd_flags,
8823 + u16 token,
8824 + int *en);
8825 +
8826 +int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
8827 + u32 cmd_flags,
8828 + u16 token,
8829 + const u8 mac_addr[6]);
8830 +
8831 +int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
8832 + u32 cmd_flags,
8833 + u16 token,
8834 + u8 mac_addr[6]);
8835 +
8836 +int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
8837 + u32 cm_flags,
8838 + u16 token,
8839 + u8 mac_addr[6]);
8840 +
8841 +int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
8842 + u32 cmd_flags,
8843 + u16 token,
8844 + const u8 mac_addr[6]);
8845 +
8846 +int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
8847 + u32 cmd_flags,
8848 + u16 token,
8849 + const u8 mac_addr[6]);
8850 +
8851 +int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
8852 + u32 cmd_flags,
8853 + u16 token,
8854 + int unicast,
8855 + int multicast);
8856 +
8857 +/**
8858 + * enum dpni_dist_mode - DPNI distribution mode
8859 + * @DPNI_DIST_MODE_NONE: No distribution
8860 + * @DPNI_DIST_MODE_HASH: Use hash distribution; only relevant if
8861 + * the 'DPNI_OPT_DIST_HASH' option was set at DPNI creation
8862 + * @DPNI_DIST_MODE_FS: Use explicit flow steering; only relevant if
8863 + * the 'DPNI_OPT_DIST_FS' option was set at DPNI creation
8864 + */
8865 +enum dpni_dist_mode {
8866 + DPNI_DIST_MODE_NONE = 0,
8867 + DPNI_DIST_MODE_HASH = 1,
8868 + DPNI_DIST_MODE_FS = 2
8869 +};
8870 +
8871 +/**
8872 + * enum dpni_fs_miss_action - DPNI Flow Steering miss action
8873 + * @DPNI_FS_MISS_DROP: In case of no-match, drop the frame
8874 + * @DPNI_FS_MISS_EXPLICIT_FLOWID: In case of no-match, use explicit flow-id
8875 + * @DPNI_FS_MISS_HASH: In case of no-match, distribute using hash
8876 + */
8877 +enum dpni_fs_miss_action {
8878 + DPNI_FS_MISS_DROP = 0,
8879 + DPNI_FS_MISS_EXPLICIT_FLOWID = 1,
8880 + DPNI_FS_MISS_HASH = 2
8881 +};
8882 +
8883 +/**
8884 + * struct dpni_fs_tbl_cfg - Flow Steering table configuration
8885 + * @miss_action: Miss action selection
8886 + * @default_flow_id: Used when 'miss_action = DPNI_FS_MISS_EXPLICIT_FLOWID'
8887 + */
8888 +struct dpni_fs_tbl_cfg {
8889 + enum dpni_fs_miss_action miss_action;
8890 + u16 default_flow_id;
8891 +};
8892 +
8893 +int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg,
8894 + u8 *key_cfg_buf);
8895 +
8896 +/**
8897 + * struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration
8898 + * @dist_size: Set the distribution size;
8899 + * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96,
8900 + * 112,128,192,224,256,384,448,512,768,896,1024
8901 + * @dist_mode: Distribution mode
8902 + * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
8903 + * the extractions to be used for the distribution key by calling
8904 + * dpni_prepare_key_cfg() relevant only when
8905 + * 'dist_mode != DPNI_DIST_MODE_NONE', otherwise it can be '0'
8906 + * @fs_cfg: Flow Steering table configuration; only relevant if
8907 + * 'dist_mode = DPNI_DIST_MODE_FS'
8908 + */
8909 +struct dpni_rx_tc_dist_cfg {
8910 + u16 dist_size;
8911 + enum dpni_dist_mode dist_mode;
8912 + u64 key_cfg_iova;
8913 + struct dpni_fs_tbl_cfg fs_cfg;
8914 +};
8915 +
8916 +int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
8917 + u32 cmd_flags,
8918 + u16 token,
8919 + u8 tc_id,
8920 + const struct dpni_rx_tc_dist_cfg *cfg);
8921 +
8922 +/**
8923 + * enum dpni_dest - DPNI destination types
8924 + * @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and
8925 + * does not generate FQDAN notifications; user is expected to
8926 + * dequeue from the queue based on polling or other user-defined
8927 + * method
8928 + * @DPNI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
8929 + * notifications to the specified DPIO; user is expected to dequeue
8930 + * from the queue only after notification is received
8931 + * @DPNI_DEST_DPCON: The queue is set in schedule mode and does not generate
8932 + * FQDAN notifications, but is connected to the specified DPCON
8933 + * object; user is expected to dequeue from the DPCON channel
8934 + */
8935 +enum dpni_dest {
8936 + DPNI_DEST_NONE = 0,
8937 + DPNI_DEST_DPIO = 1,
8938 + DPNI_DEST_DPCON = 2
8939 +};
8940 +
8941 +/**
8942 + * struct dpni_queue - Queue structure
8943 + * @user_context: User data, presented to the user along with any frames from
8944 + * this queue. Not relevant for Tx queues.
8945 + */
8946 +struct dpni_queue {
8947 +/**
8948 + * struct destination - Destination structure
8949 + * @id: ID of the destination, only relevant if DEST_TYPE is > 0.
8950 + * Identifies either a DPIO or a DPCON object. Not relevant for
8951 + * Tx queues.
8952 + * @type: May be one of the following:
8953 + * 0 - No destination, queue can be manually queried, but will not
8954 + * push traffic or notifications to a DPIO;
8955 + * 1 - The destination is a DPIO. When traffic becomes available in
8956 + * the queue a FQDAN (FQ data available notification) will be
8957 + * generated to selected DPIO;
8958 + * 2 - The destination is a DPCON. The queue is associated with a
8959 + * DPCON object for the purpose of scheduling between multiple
8960 + * queues. The DPCON may be independently configured to
8961 + * generate notifications. Not relevant for Tx queues.
8962 + * @hold_active: Hold active, maintains a queue scheduled for longer
8963 + * in a DPIO during dequeue to reduce spread of traffic.
8964 + * Only relevant if queues are not affined to a single DPIO.
8965 + */
8966 + struct {
8967 + u16 id;
8968 + enum dpni_dest type;
8969 + char hold_active;
8970 + u8 priority;
8971 + } destination;
8972 + u64 user_context;
8973 + struct {
8974 + u64 value;
8975 + char stash_control;
8976 + } flc;
8977 +};
8978 +
8979 +/**
8980 + * struct dpni_queue_id - Queue identification, used for enqueue commands
8981 + * or queue control
8982 + * @fqid: FQID used for enqueueing to and/or configuration of this specific FQ
8983 + * @qdbin: Queueing bin, used to enqueue using QDID, DQBIN, QPRI. Only relevant
8984 + * for Tx queues.
8985 + */
8986 +struct dpni_queue_id {
8987 + u32 fqid;
8988 + u16 qdbin;
8989 +};
8990 +
8991 +/**
8992 + * Set User Context
8993 + */
8994 +#define DPNI_QUEUE_OPT_USER_CTX 0x00000001
8995 +#define DPNI_QUEUE_OPT_DEST 0x00000002
8996 +#define DPNI_QUEUE_OPT_FLC 0x00000004
8997 +#define DPNI_QUEUE_OPT_HOLD_ACTIVE 0x00000008
8998 +
8999 +int dpni_set_queue(struct fsl_mc_io *mc_io,
9000 + u32 cmd_flags,
9001 + u16 token,
9002 + enum dpni_queue_type qtype,
9003 + u8 tc,
9004 + u8 index,
9005 + u8 options,
9006 + const struct dpni_queue *queue);
9007 +
9008 +int dpni_get_queue(struct fsl_mc_io *mc_io,
9009 + u32 cmd_flags,
9010 + u16 token,
9011 + enum dpni_queue_type qtype,
9012 + u8 tc,
9013 + u8 index,
9014 + struct dpni_queue *queue,
9015 + struct dpni_queue_id *qid);
9016 +
9017 +/**
9018 + * enum dpni_congestion_unit - DPNI congestion units
9019 + * @DPNI_CONGESTION_UNIT_BYTES: bytes units
9020 + * @DPNI_CONGESTION_UNIT_FRAMES: frames units
9021 + */
9022 +enum dpni_congestion_unit {
9023 + DPNI_CONGESTION_UNIT_BYTES = 0,
9024 + DPNI_CONGESTION_UNIT_FRAMES
9025 +};
9026 +
9027 +/**
9028 + * enum dpni_congestion_point - Structure representing congestion point
9029 + * @DPNI_CP_QUEUE: Set taildrop per queue, identified by QUEUE_TYPE, TC and
9030 + * QUEUE_INDEX
9031 + * @DPNI_CP_GROUP: Set taildrop per queue group. Depending on options used to
9032 + * define the DPNI this can be either per TC (default) or per
9033 + * interface (DPNI_OPT_SHARED_CONGESTION set at DPNI create).
9034 + * QUEUE_INDEX is ignored if this type is used.
9035 + */
9036 +enum dpni_congestion_point {
9037 + DPNI_CP_QUEUE,
9038 + DPNI_CP_GROUP,
9039 +};
9040 +
9041 +/**
9042 + * struct dpni_dest_cfg - Structure representing DPNI destination parameters
9043 + * @dest_type: Destination type
9044 + * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
9045 + * @priority: Priority selection within the DPIO or DPCON channel; valid values
9046 + * are 0-1 or 0-7, depending on the number of priorities in that
9047 + * channel; not relevant for 'DPNI_DEST_NONE' option
9048 + */
9049 +struct dpni_dest_cfg {
9050 + enum dpni_dest dest_type;
9051 + int dest_id;
9052 + u8 priority;
9053 +};
9054 +
9055 +/* DPNI congestion options */
9056 +
9057 +/**
9058 + * CSCN message is written to message_iova once entering a
9059 + * congestion state (see 'threshold_entry')
9060 + */
9061 +#define DPNI_CONG_OPT_WRITE_MEM_ON_ENTER 0x00000001
9062 +/**
9063 + * CSCN message is written to message_iova once exiting a
9064 + * congestion state (see 'threshold_exit')
9065 + */
9066 +#define DPNI_CONG_OPT_WRITE_MEM_ON_EXIT 0x00000002
9067 +/**
9068 + * CSCN write will attempt to allocate into a cache (coherent write);
9069 + * valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>' is selected
9070 + */
9071 +#define DPNI_CONG_OPT_COHERENT_WRITE 0x00000004
9072 +/**
9073 + * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
9074 + * DPIO/DPCON's WQ channel once entering a congestion state
9075 + * (see 'threshold_entry')
9076 + */
9077 +#define DPNI_CONG_OPT_NOTIFY_DEST_ON_ENTER 0x00000008
9078 +/**
9079 + * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
9080 + * DPIO/DPCON's WQ channel once exiting a congestion state
9081 + * (see 'threshold_exit')
9082 + */
9083 +#define DPNI_CONG_OPT_NOTIFY_DEST_ON_EXIT 0x00000010
9084 +/**
9085 + * if 'dest_cfg.dest_type != DPNI_DEST_NONE' when the CSCN is written to the
9086 + * sw-portal's DQRR, the DQRI interrupt is asserted immediately (if enabled)
9087 + */
9088 +#define DPNI_CONG_OPT_INTR_COALESCING_DISABLED 0x00000020
9089 +
9090 +/**
9091 + * struct dpni_congestion_notification_cfg - congestion notification
9092 + * configuration
9093 + * @units: units type
9094 + * @threshold_entry: above this threshold we enter a congestion state.
9095 + * set it to '0' to disable it
9096 + * @threshold_exit: below this threshold we exit the congestion state.
9097 + * @message_ctx: The context that will be part of the CSCN message
9098 + * @message_iova: I/O virtual address (must be in DMA-able memory),
9099 + * must be 16B aligned; valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>' is
9100 + * contained in 'options'
9101 + * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
9102 + * @notification_mode: Mask of available options; use 'DPNI_CONG_OPT_<X>' values
9103 + */
9104 +
9105 +struct dpni_congestion_notification_cfg {
9106 + enum dpni_congestion_unit units;
9107 + u32 threshold_entry;
9108 + u32 threshold_exit;
9109 + u64 message_ctx;
9110 + u64 message_iova;
9111 + struct dpni_dest_cfg dest_cfg;
9112 + u16 notification_mode;
9113 +};
9114 +
9115 +int dpni_set_congestion_notification(struct fsl_mc_io *mc_io,
9116 + u32 cmd_flags,
9117 + u16 token,
9118 + enum dpni_queue_type qtype,
9119 + u8 tc_id,
9120 + const struct dpni_congestion_notification_cfg *cfg);
9121 +
9122 +/**
9123 + * struct dpni_taildrop - Structure representing the taildrop
9124 + * @enable: Indicates whether the taildrop is active or not.
9125 + * @units: Indicates the unit of THRESHOLD. Queue taildrop only supports
9126 + * byte units, this field is ignored and assumed = 0 if
9127 + * CONGESTION_POINT is 0.
9128 + * @threshold: Threshold value, in units identified by UNITS field. Value 0
9129 + * cannot be used as a valid taildrop threshold, THRESHOLD must
9130 + * be > 0 if the taildrop is enabled.
9131 + */
9132 +struct dpni_taildrop {
9133 + char enable;
9134 + enum dpni_congestion_unit units;
9135 + u32 threshold;
9136 +};
9137 +
9138 +int dpni_set_taildrop(struct fsl_mc_io *mc_io,
9139 + u32 cmd_flags,
9140 + u16 token,
9141 + enum dpni_congestion_point cg_point,
9142 + enum dpni_queue_type q_type,
9143 + u8 tc,
9144 + u8 q_index,
9145 + struct dpni_taildrop *taildrop);
9146 +
9147 +int dpni_get_taildrop(struct fsl_mc_io *mc_io,
9148 + u32 cmd_flags,
9149 + u16 token,
9150 + enum dpni_congestion_point cg_point,
9151 + enum dpni_queue_type q_type,
9152 + u8 tc,
9153 + u8 q_index,
9154 + struct dpni_taildrop *taildrop);
9155 +
9156 +/**
9157 + * struct dpni_rule_cfg - Rule configuration for table lookup
9158 + * @key_iova: I/O virtual address of the key (must be in DMA-able memory)
9159 + * @mask_iova: I/O virtual address of the mask (must be in DMA-able memory)
9160 + * @key_size: key and mask size (in bytes)
9161 + */
9162 +struct dpni_rule_cfg {
9163 + u64 key_iova;
9164 + u64 mask_iova;
9165 + u8 key_size;
9166 +};
9167 +
9168 +/**
9169 + * Discard matching traffic. If set, this takes precedence over any other
9170 + * configuration and matching traffic is always discarded.
9171 + */
9172 + #define DPNI_FS_OPT_DISCARD 0x1
9173 +
9174 +/**
9175 + * Set FLC value. If set, flc member of truct dpni_fs_action_cfg is used to
9176 + * override the FLC value set per queue.
9177 + * For more details check the Frame Descriptor section in the hardware
9178 + * documentation.
9179 + */
9180 +#define DPNI_FS_OPT_SET_FLC 0x2
9181 +
9182 +/*
9183 + * Indicates whether the 6 lowest significant bits of FLC are used for stash
9184 + * control. If set, the 6 least significant bits in value are interpreted as
9185 + * follows:
9186 + * - bits 0-1: indicates the number of 64 byte units of context that are
9187 + * stashed. FLC value is interpreted as a memory address in this case,
9188 + * excluding the 6 LS bits.
9189 + * - bits 2-3: indicates the number of 64 byte units of frame annotation
9190 + * to be stashed. Annotation is placed at FD[ADDR].
9191 + * - bits 4-5: indicates the number of 64 byte units of frame data to be
9192 + * stashed. Frame data is placed at FD[ADDR] + FD[OFFSET].
9193 + * This flag is ignored if DPNI_FS_OPT_SET_FLC is not specified.
9194 + */
9195 +#define DPNI_FS_OPT_SET_STASH_CONTROL 0x4
9196 +
9197 +/**
9198 + * struct dpni_fs_action_cfg - Action configuration for table look-up
9199 + * @flc: FLC value for traffic matching this rule. Please check the Frame
9200 + * Descriptor section in the hardware documentation for more information.
9201 + * @flow_id: Identifies the Rx queue used for matching traffic. Supported
9202 + * values are in range 0 to num_queue-1.
9203 + * @options: Any combination of DPNI_FS_OPT_ values.
9204 + */
9205 +struct dpni_fs_action_cfg {
9206 + u64 flc;
9207 + u16 flow_id;
9208 + u16 options;
9209 +};
9210 +
9211 +int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
9212 + u32 cmd_flags,
9213 + u16 token,
9214 + u8 tc_id,
9215 + u16 index,
9216 + const struct dpni_rule_cfg *cfg,
9217 + const struct dpni_fs_action_cfg *action);
9218 +
9219 +int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
9220 + u32 cmd_flags,
9221 + u16 token,
9222 + u8 tc_id,
9223 + const struct dpni_rule_cfg *cfg);
9224 +
9225 +#endif /* __FSL_DPNI_H */
9226 --- /dev/null
9227 +++ b/drivers/staging/fsl-dpaa2/ethernet/net.h
9228 @@ -0,0 +1,480 @@
9229 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
9230 + *
9231 + * Redistribution and use in source and binary forms, with or without
9232 + * modification, are permitted provided that the following conditions are met:
9233 + * * Redistributions of source code must retain the above copyright
9234 + * notice, this list of conditions and the following disclaimer.
9235 + * * Redistributions in binary form must reproduce the above copyright
9236 + * notice, this list of conditions and the following disclaimer in the
9237 + * documentation and/or other materials provided with the distribution.
9238 + * * Neither the name of the above-listed copyright holders nor the
9239 + * names of any contributors may be used to endorse or promote products
9240 + * derived from this software without specific prior written permission.
9241 + *
9242 + *
9243 + * ALTERNATIVELY, this software may be distributed under the terms of the
9244 + * GNU General Public License ("GPL") as published by the Free Software
9245 + * Foundation, either version 2 of that License or (at your option) any
9246 + * later version.
9247 + *
9248 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
9249 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
9250 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
9251 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
9252 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
9253 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
9254 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
9255 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
9256 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
9257 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
9258 + * POSSIBILITY OF SUCH DAMAGE.
9259 + */
9260 +#ifndef __FSL_NET_H
9261 +#define __FSL_NET_H
9262 +
9263 +#define LAST_HDR_INDEX 0xFFFFFFFF
9264 +
9265 +/*****************************************************************************/
9266 +/* Protocol fields */
9267 +/*****************************************************************************/
9268 +
9269 +/************************* Ethernet fields *********************************/
9270 +#define NH_FLD_ETH_DA (1)
9271 +#define NH_FLD_ETH_SA (NH_FLD_ETH_DA << 1)
9272 +#define NH_FLD_ETH_LENGTH (NH_FLD_ETH_DA << 2)
9273 +#define NH_FLD_ETH_TYPE (NH_FLD_ETH_DA << 3)
9274 +#define NH_FLD_ETH_FINAL_CKSUM (NH_FLD_ETH_DA << 4)
9275 +#define NH_FLD_ETH_PADDING (NH_FLD_ETH_DA << 5)
9276 +#define NH_FLD_ETH_ALL_FIELDS ((NH_FLD_ETH_DA << 6) - 1)
9277 +
9278 +#define NH_FLD_ETH_ADDR_SIZE 6
9279 +
9280 +/*************************** VLAN fields ***********************************/
9281 +#define NH_FLD_VLAN_VPRI (1)
9282 +#define NH_FLD_VLAN_CFI (NH_FLD_VLAN_VPRI << 1)
9283 +#define NH_FLD_VLAN_VID (NH_FLD_VLAN_VPRI << 2)
9284 +#define NH_FLD_VLAN_LENGTH (NH_FLD_VLAN_VPRI << 3)
9285 +#define NH_FLD_VLAN_TYPE (NH_FLD_VLAN_VPRI << 4)
9286 +#define NH_FLD_VLAN_ALL_FIELDS ((NH_FLD_VLAN_VPRI << 5) - 1)
9287 +
9288 +#define NH_FLD_VLAN_TCI (NH_FLD_VLAN_VPRI | \
9289 + NH_FLD_VLAN_CFI | \
9290 + NH_FLD_VLAN_VID)
9291 +
9292 +/************************ IP (generic) fields ******************************/
9293 +#define NH_FLD_IP_VER (1)
9294 +#define NH_FLD_IP_DSCP (NH_FLD_IP_VER << 2)
9295 +#define NH_FLD_IP_ECN (NH_FLD_IP_VER << 3)
9296 +#define NH_FLD_IP_PROTO (NH_FLD_IP_VER << 4)
9297 +#define NH_FLD_IP_SRC (NH_FLD_IP_VER << 5)
9298 +#define NH_FLD_IP_DST (NH_FLD_IP_VER << 6)
9299 +#define NH_FLD_IP_TOS_TC (NH_FLD_IP_VER << 7)
9300 +#define NH_FLD_IP_ID (NH_FLD_IP_VER << 8)
9301 +#define NH_FLD_IP_ALL_FIELDS ((NH_FLD_IP_VER << 9) - 1)
9302 +
9303 +#define NH_FLD_IP_PROTO_SIZE 1
9304 +
9305 +/***************************** IPV4 fields *********************************/
9306 +#define NH_FLD_IPV4_VER (1)
9307 +#define NH_FLD_IPV4_HDR_LEN (NH_FLD_IPV4_VER << 1)
9308 +#define NH_FLD_IPV4_TOS (NH_FLD_IPV4_VER << 2)
9309 +#define NH_FLD_IPV4_TOTAL_LEN (NH_FLD_IPV4_VER << 3)
9310 +#define NH_FLD_IPV4_ID (NH_FLD_IPV4_VER << 4)
9311 +#define NH_FLD_IPV4_FLAG_D (NH_FLD_IPV4_VER << 5)
9312 +#define NH_FLD_IPV4_FLAG_M (NH_FLD_IPV4_VER << 6)
9313 +#define NH_FLD_IPV4_OFFSET (NH_FLD_IPV4_VER << 7)
9314 +#define NH_FLD_IPV4_TTL (NH_FLD_IPV4_VER << 8)
9315 +#define NH_FLD_IPV4_PROTO (NH_FLD_IPV4_VER << 9)
9316 +#define NH_FLD_IPV4_CKSUM (NH_FLD_IPV4_VER << 10)
9317 +#define NH_FLD_IPV4_SRC_IP (NH_FLD_IPV4_VER << 11)
9318 +#define NH_FLD_IPV4_DST_IP (NH_FLD_IPV4_VER << 12)
9319 +#define NH_FLD_IPV4_OPTS (NH_FLD_IPV4_VER << 13)
9320 +#define NH_FLD_IPV4_OPTS_COUNT (NH_FLD_IPV4_VER << 14)
9321 +#define NH_FLD_IPV4_ALL_FIELDS ((NH_FLD_IPV4_VER << 15) - 1)
9322 +
9323 +#define NH_FLD_IPV4_ADDR_SIZE 4
9324 +#define NH_FLD_IPV4_PROTO_SIZE 1
9325 +
9326 +/***************************** IPV6 fields *********************************/
9327 +#define NH_FLD_IPV6_VER (1)
9328 +#define NH_FLD_IPV6_TC (NH_FLD_IPV6_VER << 1)
9329 +#define NH_FLD_IPV6_SRC_IP (NH_FLD_IPV6_VER << 2)
9330 +#define NH_FLD_IPV6_DST_IP (NH_FLD_IPV6_VER << 3)
9331 +#define NH_FLD_IPV6_NEXT_HDR (NH_FLD_IPV6_VER << 4)
9332 +#define NH_FLD_IPV6_FL (NH_FLD_IPV6_VER << 5)
9333 +#define NH_FLD_IPV6_HOP_LIMIT (NH_FLD_IPV6_VER << 6)
9334 +#define NH_FLD_IPV6_ID (NH_FLD_IPV6_VER << 7)
9335 +#define NH_FLD_IPV6_ALL_FIELDS ((NH_FLD_IPV6_VER << 8) - 1)
9336 +
9337 +#define NH_FLD_IPV6_ADDR_SIZE 16
9338 +#define NH_FLD_IPV6_NEXT_HDR_SIZE 1
9339 +
9340 +/***************************** ICMP fields *********************************/
9341 +#define NH_FLD_ICMP_TYPE (1)
9342 +#define NH_FLD_ICMP_CODE (NH_FLD_ICMP_TYPE << 1)
9343 +#define NH_FLD_ICMP_CKSUM (NH_FLD_ICMP_TYPE << 2)
9344 +#define NH_FLD_ICMP_ID (NH_FLD_ICMP_TYPE << 3)
9345 +#define NH_FLD_ICMP_SQ_NUM (NH_FLD_ICMP_TYPE << 4)
9346 +#define NH_FLD_ICMP_ALL_FIELDS ((NH_FLD_ICMP_TYPE << 5) - 1)
9347 +
9348 +#define NH_FLD_ICMP_CODE_SIZE 1
9349 +#define NH_FLD_ICMP_TYPE_SIZE 1
9350 +
9351 +/***************************** IGMP fields *********************************/
9352 +#define NH_FLD_IGMP_VERSION (1)
9353 +#define NH_FLD_IGMP_TYPE (NH_FLD_IGMP_VERSION << 1)
9354 +#define NH_FLD_IGMP_CKSUM (NH_FLD_IGMP_VERSION << 2)
9355 +#define NH_FLD_IGMP_DATA (NH_FLD_IGMP_VERSION << 3)
9356 +#define NH_FLD_IGMP_ALL_FIELDS ((NH_FLD_IGMP_VERSION << 4) - 1)
9357 +
9358 +/***************************** TCP fields **********************************/
9359 +#define NH_FLD_TCP_PORT_SRC (1)
9360 +#define NH_FLD_TCP_PORT_DST (NH_FLD_TCP_PORT_SRC << 1)
9361 +#define NH_FLD_TCP_SEQ (NH_FLD_TCP_PORT_SRC << 2)
9362 +#define NH_FLD_TCP_ACK (NH_FLD_TCP_PORT_SRC << 3)
9363 +#define NH_FLD_TCP_OFFSET (NH_FLD_TCP_PORT_SRC << 4)
9364 +#define NH_FLD_TCP_FLAGS (NH_FLD_TCP_PORT_SRC << 5)
9365 +#define NH_FLD_TCP_WINDOW (NH_FLD_TCP_PORT_SRC << 6)
9366 +#define NH_FLD_TCP_CKSUM (NH_FLD_TCP_PORT_SRC << 7)
9367 +#define NH_FLD_TCP_URGPTR (NH_FLD_TCP_PORT_SRC << 8)
9368 +#define NH_FLD_TCP_OPTS (NH_FLD_TCP_PORT_SRC << 9)
9369 +#define NH_FLD_TCP_OPTS_COUNT (NH_FLD_TCP_PORT_SRC << 10)
9370 +#define NH_FLD_TCP_ALL_FIELDS ((NH_FLD_TCP_PORT_SRC << 11) - 1)
9371 +
9372 +#define NH_FLD_TCP_PORT_SIZE 2
9373 +
9374 +/***************************** UDP fields **********************************/
9375 +#define NH_FLD_UDP_PORT_SRC (1)
9376 +#define NH_FLD_UDP_PORT_DST (NH_FLD_UDP_PORT_SRC << 1)
9377 +#define NH_FLD_UDP_LEN (NH_FLD_UDP_PORT_SRC << 2)
9378 +#define NH_FLD_UDP_CKSUM (NH_FLD_UDP_PORT_SRC << 3)
9379 +#define NH_FLD_UDP_ALL_FIELDS ((NH_FLD_UDP_PORT_SRC << 4) - 1)
9380 +
9381 +#define NH_FLD_UDP_PORT_SIZE 2
9382 +
9383 +/*************************** UDP-lite fields *******************************/
9384 +#define NH_FLD_UDP_LITE_PORT_SRC (1)
9385 +#define NH_FLD_UDP_LITE_PORT_DST (NH_FLD_UDP_LITE_PORT_SRC << 1)
9386 +#define NH_FLD_UDP_LITE_ALL_FIELDS \
9387 + ((NH_FLD_UDP_LITE_PORT_SRC << 2) - 1)
9388 +
9389 +#define NH_FLD_UDP_LITE_PORT_SIZE 2
9390 +
9391 +/*************************** UDP-encap-ESP fields **************************/
9392 +#define NH_FLD_UDP_ENC_ESP_PORT_SRC (1)
9393 +#define NH_FLD_UDP_ENC_ESP_PORT_DST (NH_FLD_UDP_ENC_ESP_PORT_SRC << 1)
9394 +#define NH_FLD_UDP_ENC_ESP_LEN (NH_FLD_UDP_ENC_ESP_PORT_SRC << 2)
9395 +#define NH_FLD_UDP_ENC_ESP_CKSUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 3)
9396 +#define NH_FLD_UDP_ENC_ESP_SPI (NH_FLD_UDP_ENC_ESP_PORT_SRC << 4)
9397 +#define NH_FLD_UDP_ENC_ESP_SEQUENCE_NUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 5)
9398 +#define NH_FLD_UDP_ENC_ESP_ALL_FIELDS \
9399 + ((NH_FLD_UDP_ENC_ESP_PORT_SRC << 6) - 1)
9400 +
9401 +#define NH_FLD_UDP_ENC_ESP_PORT_SIZE 2
9402 +#define NH_FLD_UDP_ENC_ESP_SPI_SIZE 4
9403 +
9404 +/***************************** SCTP fields *********************************/
9405 +#define NH_FLD_SCTP_PORT_SRC (1)
9406 +#define NH_FLD_SCTP_PORT_DST (NH_FLD_SCTP_PORT_SRC << 1)
9407 +#define NH_FLD_SCTP_VER_TAG (NH_FLD_SCTP_PORT_SRC << 2)
9408 +#define NH_FLD_SCTP_CKSUM (NH_FLD_SCTP_PORT_SRC << 3)
9409 +#define NH_FLD_SCTP_ALL_FIELDS ((NH_FLD_SCTP_PORT_SRC << 4) - 1)
9410 +
9411 +#define NH_FLD_SCTP_PORT_SIZE 2
9412 +
9413 +/***************************** DCCP fields *********************************/
9414 +#define NH_FLD_DCCP_PORT_SRC (1)
9415 +#define NH_FLD_DCCP_PORT_DST (NH_FLD_DCCP_PORT_SRC << 1)
9416 +#define NH_FLD_DCCP_ALL_FIELDS ((NH_FLD_DCCP_PORT_SRC << 2) - 1)
9417 +
9418 +#define NH_FLD_DCCP_PORT_SIZE 2
9419 +
9420 +/***************************** IPHC fields *********************************/
9421 +#define NH_FLD_IPHC_CID (1)
9422 +#define NH_FLD_IPHC_CID_TYPE (NH_FLD_IPHC_CID << 1)
9423 +#define NH_FLD_IPHC_HCINDEX (NH_FLD_IPHC_CID << 2)
9424 +#define NH_FLD_IPHC_GEN (NH_FLD_IPHC_CID << 3)
9425 +#define NH_FLD_IPHC_D_BIT (NH_FLD_IPHC_CID << 4)
9426 +#define NH_FLD_IPHC_ALL_FIELDS ((NH_FLD_IPHC_CID << 5) - 1)
9427 +
9428 +/***************************** SCTP fields *********************************/
9429 +#define NH_FLD_SCTP_CHUNK_DATA_TYPE (1)
9430 +#define NH_FLD_SCTP_CHUNK_DATA_FLAGS (NH_FLD_SCTP_CHUNK_DATA_TYPE << 1)
9431 +#define NH_FLD_SCTP_CHUNK_DATA_LENGTH (NH_FLD_SCTP_CHUNK_DATA_TYPE << 2)
9432 +#define NH_FLD_SCTP_CHUNK_DATA_TSN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 3)
9433 +#define NH_FLD_SCTP_CHUNK_DATA_STREAM_ID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 4)
9434 +#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 5)
9435 +#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 6)
9436 +#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED (NH_FLD_SCTP_CHUNK_DATA_TYPE << 7)
9437 +#define NH_FLD_SCTP_CHUNK_DATA_BEGGINING (NH_FLD_SCTP_CHUNK_DATA_TYPE << 8)
9438 +#define NH_FLD_SCTP_CHUNK_DATA_END (NH_FLD_SCTP_CHUNK_DATA_TYPE << 9)
9439 +#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS \
9440 + ((NH_FLD_SCTP_CHUNK_DATA_TYPE << 10) - 1)
9441 +
9442 +/*************************** L2TPV2 fields *********************************/
9443 +#define NH_FLD_L2TPV2_TYPE_BIT (1)
9444 +#define NH_FLD_L2TPV2_LENGTH_BIT (NH_FLD_L2TPV2_TYPE_BIT << 1)
9445 +#define NH_FLD_L2TPV2_SEQUENCE_BIT (NH_FLD_L2TPV2_TYPE_BIT << 2)
9446 +#define NH_FLD_L2TPV2_OFFSET_BIT (NH_FLD_L2TPV2_TYPE_BIT << 3)
9447 +#define NH_FLD_L2TPV2_PRIORITY_BIT (NH_FLD_L2TPV2_TYPE_BIT << 4)
9448 +#define NH_FLD_L2TPV2_VERSION (NH_FLD_L2TPV2_TYPE_BIT << 5)
9449 +#define NH_FLD_L2TPV2_LEN (NH_FLD_L2TPV2_TYPE_BIT << 6)
9450 +#define NH_FLD_L2TPV2_TUNNEL_ID (NH_FLD_L2TPV2_TYPE_BIT << 7)
9451 +#define NH_FLD_L2TPV2_SESSION_ID (NH_FLD_L2TPV2_TYPE_BIT << 8)
9452 +#define NH_FLD_L2TPV2_NS (NH_FLD_L2TPV2_TYPE_BIT << 9)
9453 +#define NH_FLD_L2TPV2_NR (NH_FLD_L2TPV2_TYPE_BIT << 10)
9454 +#define NH_FLD_L2TPV2_OFFSET_SIZE (NH_FLD_L2TPV2_TYPE_BIT << 11)
9455 +#define NH_FLD_L2TPV2_FIRST_BYTE (NH_FLD_L2TPV2_TYPE_BIT << 12)
9456 +#define NH_FLD_L2TPV2_ALL_FIELDS \
9457 + ((NH_FLD_L2TPV2_TYPE_BIT << 13) - 1)
9458 +
9459 +/*************************** L2TPV3 fields *********************************/
9460 +#define NH_FLD_L2TPV3_CTRL_TYPE_BIT (1)
9461 +#define NH_FLD_L2TPV3_CTRL_LENGTH_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 1)
9462 +#define NH_FLD_L2TPV3_CTRL_SEQUENCE_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 2)
9463 +#define NH_FLD_L2TPV3_CTRL_VERSION (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 3)
9464 +#define NH_FLD_L2TPV3_CTRL_LENGTH (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 4)
9465 +#define NH_FLD_L2TPV3_CTRL_CONTROL (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 5)
9466 +#define NH_FLD_L2TPV3_CTRL_SENT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 6)
9467 +#define NH_FLD_L2TPV3_CTRL_RECV (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 7)
9468 +#define NH_FLD_L2TPV3_CTRL_FIRST_BYTE (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 8)
9469 +#define NH_FLD_L2TPV3_CTRL_ALL_FIELDS \
9470 + ((NH_FLD_L2TPV3_CTRL_TYPE_BIT << 9) - 1)
9471 +
9472 +#define NH_FLD_L2TPV3_SESS_TYPE_BIT (1)
9473 +#define NH_FLD_L2TPV3_SESS_VERSION (NH_FLD_L2TPV3_SESS_TYPE_BIT << 1)
9474 +#define NH_FLD_L2TPV3_SESS_ID (NH_FLD_L2TPV3_SESS_TYPE_BIT << 2)
9475 +#define NH_FLD_L2TPV3_SESS_COOKIE (NH_FLD_L2TPV3_SESS_TYPE_BIT << 3)
9476 +#define NH_FLD_L2TPV3_SESS_ALL_FIELDS \
9477 + ((NH_FLD_L2TPV3_SESS_TYPE_BIT << 4) - 1)
9478 +
9479 +/**************************** PPP fields ***********************************/
9480 +#define NH_FLD_PPP_PID (1)
9481 +#define NH_FLD_PPP_COMPRESSED (NH_FLD_PPP_PID << 1)
9482 +#define NH_FLD_PPP_ALL_FIELDS ((NH_FLD_PPP_PID << 2) - 1)
9483 +
9484 +/************************** PPPoE fields ***********************************/
9485 +#define NH_FLD_PPPOE_VER (1)
9486 +#define NH_FLD_PPPOE_TYPE (NH_FLD_PPPOE_VER << 1)
9487 +#define NH_FLD_PPPOE_CODE (NH_FLD_PPPOE_VER << 2)
9488 +#define NH_FLD_PPPOE_SID (NH_FLD_PPPOE_VER << 3)
9489 +#define NH_FLD_PPPOE_LEN (NH_FLD_PPPOE_VER << 4)
9490 +#define NH_FLD_PPPOE_SESSION (NH_FLD_PPPOE_VER << 5)
9491 +#define NH_FLD_PPPOE_PID (NH_FLD_PPPOE_VER << 6)
9492 +#define NH_FLD_PPPOE_ALL_FIELDS ((NH_FLD_PPPOE_VER << 7) - 1)
9493 +
9494 +/************************* PPP-Mux fields **********************************/
9495 +#define NH_FLD_PPPMUX_PID (1)
9496 +#define NH_FLD_PPPMUX_CKSUM (NH_FLD_PPPMUX_PID << 1)
9497 +#define NH_FLD_PPPMUX_COMPRESSED (NH_FLD_PPPMUX_PID << 2)
9498 +#define NH_FLD_PPPMUX_ALL_FIELDS ((NH_FLD_PPPMUX_PID << 3) - 1)
9499 +
9500 +/*********************** PPP-Mux sub-frame fields **************************/
9501 +#define NH_FLD_PPPMUX_SUBFRM_PFF (1)
9502 +#define NH_FLD_PPPMUX_SUBFRM_LXT (NH_FLD_PPPMUX_SUBFRM_PFF << 1)
9503 +#define NH_FLD_PPPMUX_SUBFRM_LEN (NH_FLD_PPPMUX_SUBFRM_PFF << 2)
9504 +#define NH_FLD_PPPMUX_SUBFRM_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 3)
9505 +#define NH_FLD_PPPMUX_SUBFRM_USE_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 4)
9506 +#define NH_FLD_PPPMUX_SUBFRM_ALL_FIELDS \
9507 + ((NH_FLD_PPPMUX_SUBFRM_PFF << 5) - 1)
9508 +
9509 +/*************************** LLC fields ************************************/
9510 +#define NH_FLD_LLC_DSAP (1)
9511 +#define NH_FLD_LLC_SSAP (NH_FLD_LLC_DSAP << 1)
9512 +#define NH_FLD_LLC_CTRL (NH_FLD_LLC_DSAP << 2)
9513 +#define NH_FLD_LLC_ALL_FIELDS ((NH_FLD_LLC_DSAP << 3) - 1)
9514 +
9515 +/*************************** NLPID fields **********************************/
9516 +#define NH_FLD_NLPID_NLPID (1)
9517 +#define NH_FLD_NLPID_ALL_FIELDS ((NH_FLD_NLPID_NLPID << 1) - 1)
9518 +
9519 +/*************************** SNAP fields ***********************************/
9520 +#define NH_FLD_SNAP_OUI (1)
9521 +#define NH_FLD_SNAP_PID (NH_FLD_SNAP_OUI << 1)
9522 +#define NH_FLD_SNAP_ALL_FIELDS ((NH_FLD_SNAP_OUI << 2) - 1)
9523 +
9524 +/*************************** LLC SNAP fields *******************************/
9525 +#define NH_FLD_LLC_SNAP_TYPE (1)
9526 +#define NH_FLD_LLC_SNAP_ALL_FIELDS ((NH_FLD_LLC_SNAP_TYPE << 1) - 1)
9527 +
9528 +#define NH_FLD_ARP_HTYPE (1)
9529 +#define NH_FLD_ARP_PTYPE (NH_FLD_ARP_HTYPE << 1)
9530 +#define NH_FLD_ARP_HLEN (NH_FLD_ARP_HTYPE << 2)
9531 +#define NH_FLD_ARP_PLEN (NH_FLD_ARP_HTYPE << 3)
9532 +#define NH_FLD_ARP_OPER (NH_FLD_ARP_HTYPE << 4)
9533 +#define NH_FLD_ARP_SHA (NH_FLD_ARP_HTYPE << 5)
9534 +#define NH_FLD_ARP_SPA (NH_FLD_ARP_HTYPE << 6)
9535 +#define NH_FLD_ARP_THA (NH_FLD_ARP_HTYPE << 7)
9536 +#define NH_FLD_ARP_TPA (NH_FLD_ARP_HTYPE << 8)
9537 +#define NH_FLD_ARP_ALL_FIELDS ((NH_FLD_ARP_HTYPE << 9) - 1)
9538 +
9539 +/*************************** RFC2684 fields ********************************/
9540 +#define NH_FLD_RFC2684_LLC (1)
9541 +#define NH_FLD_RFC2684_NLPID (NH_FLD_RFC2684_LLC << 1)
9542 +#define NH_FLD_RFC2684_OUI (NH_FLD_RFC2684_LLC << 2)
9543 +#define NH_FLD_RFC2684_PID (NH_FLD_RFC2684_LLC << 3)
9544 +#define NH_FLD_RFC2684_VPN_OUI (NH_FLD_RFC2684_LLC << 4)
9545 +#define NH_FLD_RFC2684_VPN_IDX (NH_FLD_RFC2684_LLC << 5)
9546 +#define NH_FLD_RFC2684_ALL_FIELDS ((NH_FLD_RFC2684_LLC << 6) - 1)
9547 +
9548 +/*************************** User defined fields ***************************/
9549 +#define NH_FLD_USER_DEFINED_SRCPORT (1)
9550 +#define NH_FLD_USER_DEFINED_PCDID (NH_FLD_USER_DEFINED_SRCPORT << 1)
9551 +#define NH_FLD_USER_DEFINED_ALL_FIELDS \
9552 + ((NH_FLD_USER_DEFINED_SRCPORT << 2) - 1)
9553 +
9554 +/*************************** Payload fields ********************************/
9555 +#define NH_FLD_PAYLOAD_BUFFER (1)
9556 +#define NH_FLD_PAYLOAD_SIZE (NH_FLD_PAYLOAD_BUFFER << 1)
9557 +#define NH_FLD_MAX_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 2)
9558 +#define NH_FLD_MIN_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 3)
9559 +#define NH_FLD_PAYLOAD_TYPE (NH_FLD_PAYLOAD_BUFFER << 4)
9560 +#define NH_FLD_FRAME_SIZE (NH_FLD_PAYLOAD_BUFFER << 5)
9561 +#define NH_FLD_PAYLOAD_ALL_FIELDS ((NH_FLD_PAYLOAD_BUFFER << 6) - 1)
9562 +
9563 +/*************************** GRE fields ************************************/
9564 +#define NH_FLD_GRE_TYPE (1)
9565 +#define NH_FLD_GRE_ALL_FIELDS ((NH_FLD_GRE_TYPE << 1) - 1)
9566 +
9567 +/*************************** MINENCAP fields *******************************/
9568 +#define NH_FLD_MINENCAP_SRC_IP (1)
9569 +#define NH_FLD_MINENCAP_DST_IP (NH_FLD_MINENCAP_SRC_IP << 1)
9570 +#define NH_FLD_MINENCAP_TYPE (NH_FLD_MINENCAP_SRC_IP << 2)
9571 +#define NH_FLD_MINENCAP_ALL_FIELDS \
9572 + ((NH_FLD_MINENCAP_SRC_IP << 3) - 1)
9573 +
9574 +/*************************** IPSEC AH fields *******************************/
9575 +#define NH_FLD_IPSEC_AH_SPI (1)
9576 +#define NH_FLD_IPSEC_AH_NH (NH_FLD_IPSEC_AH_SPI << 1)
9577 +#define NH_FLD_IPSEC_AH_ALL_FIELDS ((NH_FLD_IPSEC_AH_SPI << 2) - 1)
9578 +
9579 +/*************************** IPSEC ESP fields ******************************/
9580 +#define NH_FLD_IPSEC_ESP_SPI (1)
9581 +#define NH_FLD_IPSEC_ESP_SEQUENCE_NUM (NH_FLD_IPSEC_ESP_SPI << 1)
9582 +#define NH_FLD_IPSEC_ESP_ALL_FIELDS ((NH_FLD_IPSEC_ESP_SPI << 2) - 1)
9583 +
9584 +#define NH_FLD_IPSEC_ESP_SPI_SIZE 4
9585 +
9586 +/*************************** MPLS fields ***********************************/
9587 +#define NH_FLD_MPLS_LABEL_STACK (1)
9588 +#define NH_FLD_MPLS_LABEL_STACK_ALL_FIELDS \
9589 + ((NH_FLD_MPLS_LABEL_STACK << 1) - 1)
9590 +
9591 +/*************************** MACSEC fields *********************************/
9592 +#define NH_FLD_MACSEC_SECTAG (1)
9593 +#define NH_FLD_MACSEC_ALL_FIELDS ((NH_FLD_MACSEC_SECTAG << 1) - 1)
9594 +
9595 +/*************************** GTP fields ************************************/
9596 +#define NH_FLD_GTP_TEID (1)
9597 +
9598 +/* Protocol options */
9599 +
9600 +/* Ethernet options */
9601 +#define NH_OPT_ETH_BROADCAST 1
9602 +#define NH_OPT_ETH_MULTICAST 2
9603 +#define NH_OPT_ETH_UNICAST 3
9604 +#define NH_OPT_ETH_BPDU 4
9605 +
9606 +#define NH_ETH_IS_MULTICAST_ADDR(addr) (addr[0] & 0x01)
9607 +/* also applicable for broadcast */
9608 +
9609 +/* VLAN options */
9610 +#define NH_OPT_VLAN_CFI 1
9611 +
9612 +/* IPV4 options */
9613 +#define NH_OPT_IPV4_UNICAST 1
9614 +#define NH_OPT_IPV4_MULTICAST 2
9615 +#define NH_OPT_IPV4_BROADCAST 3
9616 +#define NH_OPT_IPV4_OPTION 4
9617 +#define NH_OPT_IPV4_FRAG 5
9618 +#define NH_OPT_IPV4_INITIAL_FRAG 6
9619 +
9620 +/* IPV6 options */
9621 +#define NH_OPT_IPV6_UNICAST 1
9622 +#define NH_OPT_IPV6_MULTICAST 2
9623 +#define NH_OPT_IPV6_OPTION 3
9624 +#define NH_OPT_IPV6_FRAG 4
9625 +#define NH_OPT_IPV6_INITIAL_FRAG 5
9626 +
9627 +/* General IP options (may be used for any version) */
9628 +#define NH_OPT_IP_FRAG 1
9629 +#define NH_OPT_IP_INITIAL_FRAG 2
9630 +#define NH_OPT_IP_OPTION 3
9631 +
9632 +/* Minenc. options */
9633 +#define NH_OPT_MINENCAP_SRC_ADDR_PRESENT 1
9634 +
9635 +/* GRE. options */
9636 +#define NH_OPT_GRE_ROUTING_PRESENT 1
9637 +
9638 +/* TCP options */
9639 +#define NH_OPT_TCP_OPTIONS 1
9640 +#define NH_OPT_TCP_CONTROL_HIGH_BITS 2
9641 +#define NH_OPT_TCP_CONTROL_LOW_BITS 3
9642 +
9643 +/* CAPWAP options */
9644 +#define NH_OPT_CAPWAP_DTLS 1
9645 +
9646 +enum net_prot {
9647 + NET_PROT_NONE = 0,
9648 + NET_PROT_PAYLOAD,
9649 + NET_PROT_ETH,
9650 + NET_PROT_VLAN,
9651 + NET_PROT_IPV4,
9652 + NET_PROT_IPV6,
9653 + NET_PROT_IP,
9654 + NET_PROT_TCP,
9655 + NET_PROT_UDP,
9656 + NET_PROT_UDP_LITE,
9657 + NET_PROT_IPHC,
9658 + NET_PROT_SCTP,
9659 + NET_PROT_SCTP_CHUNK_DATA,
9660 + NET_PROT_PPPOE,
9661 + NET_PROT_PPP,
9662 + NET_PROT_PPPMUX,
9663 + NET_PROT_PPPMUX_SUBFRM,
9664 + NET_PROT_L2TPV2,
9665 + NET_PROT_L2TPV3_CTRL,
9666 + NET_PROT_L2TPV3_SESS,
9667 + NET_PROT_LLC,
9668 + NET_PROT_LLC_SNAP,
9669 + NET_PROT_NLPID,
9670 + NET_PROT_SNAP,
9671 + NET_PROT_MPLS,
9672 + NET_PROT_IPSEC_AH,
9673 + NET_PROT_IPSEC_ESP,
9674 + NET_PROT_UDP_ENC_ESP, /* RFC 3948 */
9675 + NET_PROT_MACSEC,
9676 + NET_PROT_GRE,
9677 + NET_PROT_MINENCAP,
9678 + NET_PROT_DCCP,
9679 + NET_PROT_ICMP,
9680 + NET_PROT_IGMP,
9681 + NET_PROT_ARP,
9682 + NET_PROT_CAPWAP_DATA,
9683 + NET_PROT_CAPWAP_CTRL,
9684 + NET_PROT_RFC2684,
9685 + NET_PROT_ICMPV6,
9686 + NET_PROT_FCOE,
9687 + NET_PROT_FIP,
9688 + NET_PROT_ISCSI,
9689 + NET_PROT_GTP,
9690 + NET_PROT_USER_DEFINED_L2,
9691 + NET_PROT_USER_DEFINED_L3,
9692 + NET_PROT_USER_DEFINED_L4,
9693 + NET_PROT_USER_DEFINED_L5,
9694 + NET_PROT_USER_DEFINED_SHIM1,
9695 + NET_PROT_USER_DEFINED_SHIM2,
9696 +
9697 + NET_PROT_DUMMY_LAST
9698 +};
9699 +
9700 +/*! IEEE8021.Q */
9701 +#define NH_IEEE8021Q_ETYPE 0x8100
9702 +#define NH_IEEE8021Q_HDR(etype, pcp, dei, vlan_id) \
9703 + ((((u32)((etype) & 0xFFFF)) << 16) | \
9704 + (((u32)((pcp) & 0x07)) << 13) | \
9705 + (((u32)((dei) & 0x01)) << 12) | \
9706 + (((u32)((vlan_id) & 0xFFF))))
9707 +
9708 +#endif /* __FSL_NET_H */
9709 --- /dev/null
9710 +++ b/drivers/staging/fsl-dpaa2/ethsw/Kconfig
9711 @@ -0,0 +1,6 @@
9712 +config FSL_DPAA2_ETHSW
9713 + tristate "DPAA2 Ethernet Switch"
9714 + depends on FSL_MC_BUS && FSL_DPAA2
9715 + default y
9716 + ---help---
9717 + Prototype driver for DPAA2 Ethernet Switch.
9718 --- /dev/null
9719 +++ b/drivers/staging/fsl-dpaa2/ethsw/Makefile
9720 @@ -0,0 +1,10 @@
9721 +
9722 +obj-$(CONFIG_FSL_DPAA2_ETHSW) += dpaa2-ethsw.o
9723 +
9724 +dpaa2-ethsw-objs := switch.o dpsw.o
9725 +
9726 +all:
9727 + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
9728 +
9729 +clean:
9730 + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
9731 --- /dev/null
9732 +++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
9733 @@ -0,0 +1,851 @@
9734 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
9735 + *
9736 + * Redistribution and use in source and binary forms, with or without
9737 + * modification, are permitted provided that the following conditions are met:
9738 + * * Redistributions of source code must retain the above copyright
9739 + * notice, this list of conditions and the following disclaimer.
9740 + * * Redistributions in binary form must reproduce the above copyright
9741 + * notice, this list of conditions and the following disclaimer in the
9742 + * documentation and/or other materials provided with the distribution.
9743 + * * Neither the name of the above-listed copyright holders nor the
9744 + * names of any contributors may be used to endorse or promote products
9745 + * derived from this software without specific prior written permission.
9746 + *
9747 + *
9748 + * ALTERNATIVELY, this software may be distributed under the terms of the
9749 + * GNU General Public License ("GPL") as published by the Free Software
9750 + * Foundation, either version 2 of that License or (at your option) any
9751 + * later version.
9752 + *
9753 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
9754 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
9755 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
9756 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
9757 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
9758 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
9759 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
9760 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
9761 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
9762 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
9763 + * POSSIBILITY OF SUCH DAMAGE.
9764 + */
9765 +#ifndef __FSL_DPSW_CMD_H
9766 +#define __FSL_DPSW_CMD_H
9767 +
9768 +/* DPSW Version */
9769 +#define DPSW_VER_MAJOR 8
9770 +#define DPSW_VER_MINOR 0
9771 +
9772 +#define DPSW_CMD_BASE_VERSION 1
9773 +#define DPSW_CMD_ID_OFFSET 4
9774 +
9775 +#define DPSW_CMD_ID(id) (((id) << DPSW_CMD_ID_OFFSET) | DPSW_CMD_BASE_VERSION)
9776 +
9777 +/* Command IDs */
9778 +#define DPSW_CMDID_CLOSE DPSW_CMD_ID(0x800)
9779 +#define DPSW_CMDID_OPEN DPSW_CMD_ID(0x802)
9780 +
9781 +#define DPSW_CMDID_GET_API_VERSION DPSW_CMD_ID(0xa02)
9782 +
9783 +#define DPSW_CMDID_ENABLE DPSW_CMD_ID(0x002)
9784 +#define DPSW_CMDID_DISABLE DPSW_CMD_ID(0x003)
9785 +#define DPSW_CMDID_GET_ATTR DPSW_CMD_ID(0x004)
9786 +#define DPSW_CMDID_RESET DPSW_CMD_ID(0x005)
9787 +#define DPSW_CMDID_IS_ENABLED DPSW_CMD_ID(0x006)
9788 +
9789 +#define DPSW_CMDID_SET_IRQ DPSW_CMD_ID(0x010)
9790 +#define DPSW_CMDID_GET_IRQ DPSW_CMD_ID(0x011)
9791 +#define DPSW_CMDID_SET_IRQ_ENABLE DPSW_CMD_ID(0x012)
9792 +#define DPSW_CMDID_GET_IRQ_ENABLE DPSW_CMD_ID(0x013)
9793 +#define DPSW_CMDID_SET_IRQ_MASK DPSW_CMD_ID(0x014)
9794 +#define DPSW_CMDID_GET_IRQ_MASK DPSW_CMD_ID(0x015)
9795 +#define DPSW_CMDID_GET_IRQ_STATUS DPSW_CMD_ID(0x016)
9796 +#define DPSW_CMDID_CLEAR_IRQ_STATUS DPSW_CMD_ID(0x017)
9797 +
9798 +#define DPSW_CMDID_SET_REFLECTION_IF DPSW_CMD_ID(0x022)
9799 +
9800 +#define DPSW_CMDID_ADD_CUSTOM_TPID DPSW_CMD_ID(0x024)
9801 +
9802 +#define DPSW_CMDID_REMOVE_CUSTOM_TPID DPSW_CMD_ID(0x026)
9803 +
9804 +#define DPSW_CMDID_IF_SET_TCI DPSW_CMD_ID(0x030)
9805 +#define DPSW_CMDID_IF_SET_STP DPSW_CMD_ID(0x031)
9806 +#define DPSW_CMDID_IF_SET_ACCEPTED_FRAMES DPSW_CMD_ID(0x032)
9807 +#define DPSW_CMDID_SET_IF_ACCEPT_ALL_VLAN DPSW_CMD_ID(0x033)
9808 +#define DPSW_CMDID_IF_GET_COUNTER DPSW_CMD_ID(0x034)
9809 +#define DPSW_CMDID_IF_SET_COUNTER DPSW_CMD_ID(0x035)
9810 +#define DPSW_CMDID_IF_SET_TX_SELECTION DPSW_CMD_ID(0x036)
9811 +#define DPSW_CMDID_IF_ADD_REFLECTION DPSW_CMD_ID(0x037)
9812 +#define DPSW_CMDID_IF_REMOVE_REFLECTION DPSW_CMD_ID(0x038)
9813 +#define DPSW_CMDID_IF_SET_FLOODING_METERING DPSW_CMD_ID(0x039)
9814 +#define DPSW_CMDID_IF_SET_METERING DPSW_CMD_ID(0x03A)
9815 +#define DPSW_CMDID_IF_SET_EARLY_DROP DPSW_CMD_ID(0x03B)
9816 +
9817 +#define DPSW_CMDID_IF_ENABLE DPSW_CMD_ID(0x03D)
9818 +#define DPSW_CMDID_IF_DISABLE DPSW_CMD_ID(0x03E)
9819 +
9820 +#define DPSW_CMDID_IF_GET_ATTR DPSW_CMD_ID(0x042)
9821 +
9822 +#define DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH DPSW_CMD_ID(0x044)
9823 +#define DPSW_CMDID_IF_GET_MAX_FRAME_LENGTH DPSW_CMD_ID(0x045)
9824 +#define DPSW_CMDID_IF_GET_LINK_STATE DPSW_CMD_ID(0x046)
9825 +#define DPSW_CMDID_IF_SET_FLOODING DPSW_CMD_ID(0x047)
9826 +#define DPSW_CMDID_IF_SET_BROADCAST DPSW_CMD_ID(0x048)
9827 +#define DPSW_CMDID_IF_SET_MULTICAST DPSW_CMD_ID(0x049)
9828 +#define DPSW_CMDID_IF_GET_TCI DPSW_CMD_ID(0x04A)
9829 +
9830 +#define DPSW_CMDID_IF_SET_LINK_CFG DPSW_CMD_ID(0x04C)
9831 +
9832 +#define DPSW_CMDID_VLAN_ADD DPSW_CMD_ID(0x060)
9833 +#define DPSW_CMDID_VLAN_ADD_IF DPSW_CMD_ID(0x061)
9834 +#define DPSW_CMDID_VLAN_ADD_IF_UNTAGGED DPSW_CMD_ID(0x062)
9835 +#define DPSW_CMDID_VLAN_ADD_IF_FLOODING DPSW_CMD_ID(0x063)
9836 +#define DPSW_CMDID_VLAN_REMOVE_IF DPSW_CMD_ID(0x064)
9837 +#define DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED DPSW_CMD_ID(0x065)
9838 +#define DPSW_CMDID_VLAN_REMOVE_IF_FLOODING DPSW_CMD_ID(0x066)
9839 +#define DPSW_CMDID_VLAN_REMOVE DPSW_CMD_ID(0x067)
9840 +#define DPSW_CMDID_VLAN_GET_IF DPSW_CMD_ID(0x068)
9841 +#define DPSW_CMDID_VLAN_GET_IF_FLOODING DPSW_CMD_ID(0x069)
9842 +#define DPSW_CMDID_VLAN_GET_IF_UNTAGGED DPSW_CMD_ID(0x06A)
9843 +#define DPSW_CMDID_VLAN_GET_ATTRIBUTES DPSW_CMD_ID(0x06B)
9844 +
9845 +#define DPSW_CMDID_FDB_GET_MULTICAST DPSW_CMD_ID(0x080)
9846 +#define DPSW_CMDID_FDB_GET_UNICAST DPSW_CMD_ID(0x081)
9847 +#define DPSW_CMDID_FDB_ADD DPSW_CMD_ID(0x082)
9848 +#define DPSW_CMDID_FDB_REMOVE DPSW_CMD_ID(0x083)
9849 +#define DPSW_CMDID_FDB_ADD_UNICAST DPSW_CMD_ID(0x084)
9850 +#define DPSW_CMDID_FDB_REMOVE_UNICAST DPSW_CMD_ID(0x085)
9851 +#define DPSW_CMDID_FDB_ADD_MULTICAST DPSW_CMD_ID(0x086)
9852 +#define DPSW_CMDID_FDB_REMOVE_MULTICAST DPSW_CMD_ID(0x087)
9853 +#define DPSW_CMDID_FDB_SET_LEARNING_MODE DPSW_CMD_ID(0x088)
9854 +#define DPSW_CMDID_FDB_GET_ATTR DPSW_CMD_ID(0x089)
9855 +
9856 +#define DPSW_CMDID_ACL_ADD DPSW_CMD_ID(0x090)
9857 +#define DPSW_CMDID_ACL_REMOVE DPSW_CMD_ID(0x091)
9858 +#define DPSW_CMDID_ACL_ADD_ENTRY DPSW_CMD_ID(0x092)
9859 +#define DPSW_CMDID_ACL_REMOVE_ENTRY DPSW_CMD_ID(0x093)
9860 +#define DPSW_CMDID_ACL_ADD_IF DPSW_CMD_ID(0x094)
9861 +#define DPSW_CMDID_ACL_REMOVE_IF DPSW_CMD_ID(0x095)
9862 +#define DPSW_CMDID_ACL_GET_ATTR DPSW_CMD_ID(0x096)
9863 +
9864 +#define DPSW_CMDID_CTRL_IF_GET_ATTR DPSW_CMD_ID(0x0A0)
9865 +#define DPSW_CMDID_CTRL_IF_SET_POOLS DPSW_CMD_ID(0x0A1)
9866 +#define DPSW_CMDID_CTRL_IF_ENABLE DPSW_CMD_ID(0x0A2)
9867 +#define DPSW_CMDID_CTRL_IF_DISABLE DPSW_CMD_ID(0x0A3)
9868 +
9869 +/* Macros for accessing command fields smaller than 1byte */
9870 +#define DPSW_MASK(field) \
9871 + GENMASK(DPSW_##field##_SHIFT + DPSW_##field##_SIZE - 1, \
9872 + DPSW_##field##_SHIFT)
9873 +#define dpsw_set_field(var, field, val) \
9874 + ((var) |= (((val) << DPSW_##field##_SHIFT) & DPSW_MASK(field)))
9875 +#define dpsw_get_field(var, field) \
9876 + (((var) & DPSW_MASK(field)) >> DPSW_##field##_SHIFT)
9877 +#define dpsw_get_bit(var, bit) \
9878 + (((var) >> (bit)) & GENMASK(0, 0))
9879 +
9880 +static inline u64 dpsw_set_bit(u64 var, unsigned int bit, u8 val)
9881 +{
9882 + var |= (u64)val << bit & GENMASK(bit, bit);
9883 + return var;
9884 +}
9885 +
9886 +struct dpsw_cmd_open {
9887 + __le32 dpsw_id;
9888 +};
9889 +
9890 +#define DPSW_COMPONENT_TYPE_SHIFT 0
9891 +#define DPSW_COMPONENT_TYPE_SIZE 4
9892 +
9893 +struct dpsw_cmd_create {
9894 + /* cmd word 0 */
9895 + __le16 num_ifs;
9896 + u8 max_fdbs;
9897 + u8 max_meters_per_if;
9898 + /* from LSB: only the first 4 bits */
9899 + u8 component_type;
9900 + u8 pad[3];
9901 + /* cmd word 1 */
9902 + __le16 max_vlans;
9903 + __le16 max_fdb_entries;
9904 + __le16 fdb_aging_time;
9905 + __le16 max_fdb_mc_groups;
9906 + /* cmd word 2 */
9907 + __le64 options;
9908 +};
9909 +
9910 +struct dpsw_cmd_destroy {
9911 + __le32 dpsw_id;
9912 +};
9913 +
9914 +#define DPSW_ENABLE_SHIFT 0
9915 +#define DPSW_ENABLE_SIZE 1
9916 +
9917 +struct dpsw_rsp_is_enabled {
9918 + /* from LSB: enable:1 */
9919 + u8 enabled;
9920 +};
9921 +
9922 +struct dpsw_cmd_set_irq {
9923 + /* cmd word 0 */
9924 + u8 irq_index;
9925 + u8 pad[3];
9926 + __le32 irq_val;
9927 + /* cmd word 1 */
9928 + __le64 irq_addr;
9929 + /* cmd word 2 */
9930 + __le32 irq_num;
9931 +};
9932 +
9933 +struct dpsw_cmd_get_irq {
9934 + __le32 pad;
9935 + u8 irq_index;
9936 +};
9937 +
9938 +struct dpsw_rsp_get_irq {
9939 + /* cmd word 0 */
9940 + __le32 irq_val;
9941 + __le32 pad;
9942 + /* cmd word 1 */
9943 + __le64 irq_addr;
9944 + /* cmd word 2 */
9945 + __le32 irq_num;
9946 + __le32 irq_type;
9947 +};
9948 +
9949 +struct dpsw_cmd_set_irq_enable {
9950 + u8 enable_state;
9951 + u8 pad[3];
9952 + u8 irq_index;
9953 +};
9954 +
9955 +struct dpsw_cmd_get_irq_enable {
9956 + __le32 pad;
9957 + u8 irq_index;
9958 +};
9959 +
9960 +struct dpsw_rsp_get_irq_enable {
9961 + u8 enable_state;
9962 +};
9963 +
9964 +struct dpsw_cmd_set_irq_mask {
9965 + __le32 mask;
9966 + u8 irq_index;
9967 +};
9968 +
9969 +struct dpsw_cmd_get_irq_mask {
9970 + __le32 pad;
9971 + u8 irq_index;
9972 +};
9973 +
9974 +struct dpsw_rsp_get_irq_mask {
9975 + __le32 mask;
9976 +};
9977 +
9978 +struct dpsw_cmd_get_irq_status {
9979 + __le32 status;
9980 + u8 irq_index;
9981 +};
9982 +
9983 +struct dpsw_rsp_get_irq_status {
9984 + __le32 status;
9985 +};
9986 +
9987 +struct dpsw_cmd_clear_irq_status {
9988 + __le32 status;
9989 + u8 irq_index;
9990 +};
9991 +
9992 +#define DPSW_COMPONENT_TYPE_SHIFT 0
9993 +#define DPSW_COMPONENT_TYPE_SIZE 4
9994 +
9995 +struct dpsw_rsp_get_attr {
9996 + /* cmd word 0 */
9997 + __le16 num_ifs;
9998 + u8 max_fdbs;
9999 + u8 num_fdbs;
10000 + __le16 max_vlans;
10001 + __le16 num_vlans;
10002 + /* cmd word 1 */
10003 + __le16 max_fdb_entries;
10004 + __le16 fdb_aging_time;
10005 + __le32 dpsw_id;
10006 + /* cmd word 2 */
10007 + __le16 mem_size;
10008 + __le16 max_fdb_mc_groups;
10009 + u8 max_meters_per_if;
10010 + /* from LSB only the ffirst 4 bits */
10011 + u8 component_type;
10012 + __le16 pad;
10013 + /* cmd word 3 */
10014 + __le64 options;
10015 +};
10016 +
10017 +struct dpsw_cmd_set_reflection_if {
10018 + __le16 if_id;
10019 +};
10020 +
10021 +struct dpsw_cmd_if_set_flooding {
10022 + __le16 if_id;
10023 + /* from LSB: enable:1 */
10024 + u8 enable;
10025 +};
10026 +
10027 +struct dpsw_cmd_if_set_broadcast {
10028 + __le16 if_id;
10029 + /* from LSB: enable:1 */
10030 + u8 enable;
10031 +};
10032 +
10033 +struct dpsw_cmd_if_set_multicast {
10034 + __le16 if_id;
10035 + /* from LSB: enable:1 */
10036 + u8 enable;
10037 +};
10038 +
10039 +#define DPSW_VLAN_ID_SHIFT 0
10040 +#define DPSW_VLAN_ID_SIZE 12
10041 +#define DPSW_DEI_SHIFT 12
10042 +#define DPSW_DEI_SIZE 1
10043 +#define DPSW_PCP_SHIFT 13
10044 +#define DPSW_PCP_SIZE 3
10045 +
10046 +struct dpsw_cmd_if_set_tci {
10047 + __le16 if_id;
10048 + /* from LSB: VLAN_ID:12 DEI:1 PCP:3 */
10049 + __le16 conf;
10050 +};
10051 +
10052 +struct dpsw_cmd_if_get_tci {
10053 + __le16 if_id;
10054 +};
10055 +
10056 +struct dpsw_rsp_if_get_tci {
10057 + __le16 pad;
10058 + __le16 vlan_id;
10059 + u8 dei;
10060 + u8 pcp;
10061 +};
10062 +
10063 +#define DPSW_STATE_SHIFT 0
10064 +#define DPSW_STATE_SIZE 4
10065 +
10066 +struct dpsw_cmd_if_set_stp {
10067 + __le16 if_id;
10068 + __le16 vlan_id;
10069 + /* only the first LSB 4 bits */
10070 + u8 state;
10071 +};
10072 +
10073 +#define DPSW_FRAME_TYPE_SHIFT 0
10074 +#define DPSW_FRAME_TYPE_SIZE 4
10075 +#define DPSW_UNACCEPTED_ACT_SHIFT 4
10076 +#define DPSW_UNACCEPTED_ACT_SIZE 4
10077 +
10078 +struct dpsw_cmd_if_set_accepted_frames {
10079 + __le16 if_id;
10080 + /* from LSB: type:4 unaccepted_act:4 */
10081 + u8 unaccepted;
10082 +};
10083 +
10084 +#define DPSW_ACCEPT_ALL_SHIFT 0
10085 +#define DPSW_ACCEPT_ALL_SIZE 1
10086 +
10087 +struct dpsw_cmd_if_set_accept_all_vlan {
10088 + __le16 if_id;
10089 + /* only the least significant bit */
10090 + u8 accept_all;
10091 +};
10092 +
10093 +#define DPSW_COUNTER_TYPE_SHIFT 0
10094 +#define DPSW_COUNTER_TYPE_SIZE 5
10095 +
10096 +struct dpsw_cmd_if_get_counter {
10097 + __le16 if_id;
10098 + /* from LSB: type:5 */
10099 + u8 type;
10100 +};
10101 +
10102 +struct dpsw_rsp_if_get_counter {
10103 + __le64 pad;
10104 + __le64 counter;
10105 +};
10106 +
10107 +struct dpsw_cmd_if_set_counter {
10108 + /* cmd word 0 */
10109 + __le16 if_id;
10110 + /* from LSB: type:5 */
10111 + u8 type;
10112 + /* cmd word 1 */
10113 + __le64 counter;
10114 +};
10115 +
10116 +#define DPSW_PRIORITY_SELECTOR_SHIFT 0
10117 +#define DPSW_PRIORITY_SELECTOR_SIZE 3
10118 +#define DPSW_SCHED_MODE_SHIFT 0
10119 +#define DPSW_SCHED_MODE_SIZE 4
10120 +
10121 +struct dpsw_cmd_if_set_tx_selection {
10122 + __le16 if_id;
10123 + /* from LSB: priority_selector:3 */
10124 + u8 priority_selector;
10125 + u8 pad[5];
10126 + u8 tc_id[8];
10127 +
10128 + struct dpsw_tc_sched {
10129 + __le16 delta_bandwidth;
10130 + u8 mode;
10131 + u8 pad;
10132 + } tc_sched[8];
10133 +};
10134 +
10135 +#define DPSW_FILTER_SHIFT 0
10136 +#define DPSW_FILTER_SIZE 2
10137 +
10138 +struct dpsw_cmd_if_reflection {
10139 + __le16 if_id;
10140 + __le16 vlan_id;
10141 + /* only 2 bits from the LSB */
10142 + u8 filter;
10143 +};
10144 +
10145 +#define DPSW_MODE_SHIFT 0
10146 +#define DPSW_MODE_SIZE 4
10147 +#define DPSW_UNITS_SHIFT 4
10148 +#define DPSW_UNITS_SIZE 4
10149 +
10150 +struct dpsw_cmd_if_set_flooding_metering {
10151 + /* cmd word 0 */
10152 + __le16 if_id;
10153 + u8 pad;
10154 + /* from LSB: mode:4 units:4 */
10155 + u8 mode_units;
10156 + __le32 cir;
10157 + /* cmd word 1 */
10158 + __le32 eir;
10159 + __le32 cbs;
10160 + /* cmd word 2 */
10161 + __le32 ebs;
10162 +};
10163 +
10164 +struct dpsw_cmd_if_set_metering {
10165 + /* cmd word 0 */
10166 + __le16 if_id;
10167 + u8 tc_id;
10168 + /* from LSB: mode:4 units:4 */
10169 + u8 mode_units;
10170 + __le32 cir;
10171 + /* cmd word 1 */
10172 + __le32 eir;
10173 + __le32 cbs;
10174 + /* cmd word 2 */
10175 + __le32 ebs;
10176 +};
10177 +
10178 +#define DPSW_EARLY_DROP_MODE_SHIFT 0
10179 +#define DPSW_EARLY_DROP_MODE_SIZE 2
10180 +#define DPSW_EARLY_DROP_UNIT_SHIFT 2
10181 +#define DPSW_EARLY_DROP_UNIT_SIZE 2
10182 +
10183 +struct dpsw_prep_early_drop {
10184 + /* from LSB: mode:2 units:2 */
10185 + u8 conf;
10186 + u8 pad0[3];
10187 + __le32 tail_drop_threshold;
10188 + u8 green_drop_probability;
10189 + u8 pad1[7];
10190 + __le64 green_max_threshold;
10191 + __le64 green_min_threshold;
10192 + __le64 pad2;
10193 + u8 yellow_drop_probability;
10194 + u8 pad3[7];
10195 + __le64 yellow_max_threshold;
10196 + __le64 yellow_min_threshold;
10197 +};
10198 +
10199 +struct dpsw_cmd_if_set_early_drop {
10200 + /* cmd word 0 */
10201 + u8 pad0;
10202 + u8 tc_id;
10203 + __le16 if_id;
10204 + __le32 pad1;
10205 + /* cmd word 1 */
10206 + __le64 early_drop_iova;
10207 +};
10208 +
10209 +struct dpsw_cmd_custom_tpid {
10210 + __le16 pad;
10211 + __le16 tpid;
10212 +};
10213 +
10214 +struct dpsw_cmd_if {
10215 + __le16 if_id;
10216 +};
10217 +
10218 +#define DPSW_ADMIT_UNTAGGED_SHIFT 0
10219 +#define DPSW_ADMIT_UNTAGGED_SIZE 4
10220 +#define DPSW_ENABLED_SHIFT 5
10221 +#define DPSW_ENABLED_SIZE 1
10222 +#define DPSW_ACCEPT_ALL_VLAN_SHIFT 6
10223 +#define DPSW_ACCEPT_ALL_VLAN_SIZE 1
10224 +
10225 +struct dpsw_rsp_if_get_attr {
10226 + /* cmd word 0 */
10227 + /* from LSB: admit_untagged:4 enabled:1 accept_all_vlan:1 */
10228 + u8 conf;
10229 + u8 pad1;
10230 + u8 num_tcs;
10231 + u8 pad2;
10232 + __le16 qdid;
10233 + /* cmd word 1 */
10234 + __le32 options;
10235 + __le32 pad3;
10236 + /* cmd word 2 */
10237 + __le32 rate;
10238 +};
10239 +
10240 +struct dpsw_cmd_if_set_max_frame_length {
10241 + __le16 if_id;
10242 + __le16 frame_length;
10243 +};
10244 +
10245 +struct dpsw_cmd_if_get_max_frame_length {
10246 + __le16 if_id;
10247 +};
10248 +
10249 +struct dpsw_rsp_if_get_max_frame_length {
10250 + __le16 pad;
10251 + __le16 frame_length;
10252 +};
10253 +
10254 +struct dpsw_cmd_if_set_link_cfg {
10255 + /* cmd word 0 */
10256 + __le16 if_id;
10257 + u8 pad[6];
10258 + /* cmd word 1 */
10259 + __le32 rate;
10260 + __le32 pad1;
10261 + /* cmd word 2 */
10262 + __le64 options;
10263 +};
10264 +
10265 +struct dpsw_cmd_if_get_link_state {
10266 + __le16 if_id;
10267 +};
10268 +
10269 +#define DPSW_UP_SHIFT 0
10270 +#define DPSW_UP_SIZE 1
10271 +
10272 +struct dpsw_rsp_if_get_link_state {
10273 + /* cmd word 0 */
10274 + __le32 pad0;
10275 + u8 up;
10276 + u8 pad1[3];
10277 + /* cmd word 1 */
10278 + __le32 rate;
10279 + __le32 pad2;
10280 + /* cmd word 2 */
10281 + __le64 options;
10282 +};
10283 +
10284 +struct dpsw_vlan_add {
10285 + __le16 fdb_id;
10286 + __le16 vlan_id;
10287 +};
10288 +
10289 +struct dpsw_cmd_vlan_manage_if {
10290 + /* cmd word 0 */
10291 + __le16 pad0;
10292 + __le16 vlan_id;
10293 + __le32 pad1;
10294 + /* cmd word 1 */
10295 + __le64 if_id[4];
10296 +};
10297 +
10298 +struct dpsw_cmd_vlan_remove {
10299 + __le16 pad;
10300 + __le16 vlan_id;
10301 +};
10302 +
10303 +struct dpsw_cmd_vlan_get_attr {
10304 + __le16 vlan_id;
10305 +};
10306 +
10307 +struct dpsw_rsp_vlan_get_attr {
10308 + /* cmd word 0 */
10309 + __le64 pad;
10310 + /* cmd word 1 */
10311 + __le16 fdb_id;
10312 + __le16 num_ifs;
10313 + __le16 num_untagged_ifs;
10314 + __le16 num_flooding_ifs;
10315 +};
10316 +
10317 +struct dpsw_cmd_vlan_get_if {
10318 + __le16 vlan_id;
10319 +};
10320 +
10321 +struct dpsw_rsp_vlan_get_if {
10322 + /* cmd word 0 */
10323 + __le16 pad0;
10324 + __le16 num_ifs;
10325 + u8 pad1[4];
10326 + /* cmd word 1 */
10327 + __le64 if_id[4];
10328 +};
10329 +
10330 +struct dpsw_cmd_vlan_get_if_untagged {
10331 + __le16 vlan_id;
10332 +};
10333 +
10334 +struct dpsw_rsp_vlan_get_if_untagged {
10335 + /* cmd word 0 */
10336 + __le16 pad0;
10337 + __le16 num_ifs;
10338 + u8 pad1[4];
10339 + /* cmd word 1 */
10340 + __le64 if_id[4];
10341 +};
10342 +
10343 +struct dpsw_cmd_vlan_get_if_flooding {
10344 + __le16 vlan_id;
10345 +};
10346 +
10347 +struct dpsw_rsp_vlan_get_if_flooding {
10348 + /* cmd word 0 */
10349 + __le16 pad0;
10350 + __le16 num_ifs;
10351 + u8 pad1[4];
10352 + /* cmd word 1 */
10353 + __le64 if_id[4];
10354 +};
10355 +
10356 +struct dpsw_cmd_fdb_add {
10357 + __le32 pad;
10358 + __le16 fdb_aging_time;
10359 + __le16 num_fdb_entries;
10360 +};
10361 +
10362 +struct dpsw_rsp_fdb_add {
10363 + __le16 fdb_id;
10364 +};
10365 +
10366 +struct dpsw_cmd_fdb_remove {
10367 + __le16 fdb_id;
10368 +};
10369 +
10370 +#define DPSW_ENTRY_TYPE_SHIFT 0
10371 +#define DPSW_ENTRY_TYPE_SIZE 4
10372 +
10373 +struct dpsw_cmd_fdb_add_unicast {
10374 + /* cmd word 0 */
10375 + __le16 fdb_id;
10376 + u8 mac_addr[6];
10377 + /* cmd word 1 */
10378 + u8 if_egress;
10379 + u8 pad;
10380 + /* only the first 4 bits from LSB */
10381 + u8 type;
10382 +};
10383 +
10384 +struct dpsw_cmd_fdb_get_unicast {
10385 + __le16 fdb_id;
10386 + u8 mac_addr[6];
10387 +};
10388 +
10389 +struct dpsw_rsp_fdb_get_unicast {
10390 + __le64 pad;
10391 + __le16 if_egress;
10392 + /* only first 4 bits from LSB */
10393 + u8 type;
10394 +};
10395 +
10396 +struct dpsw_cmd_fdb_remove_unicast {
10397 + /* cmd word 0 */
10398 + __le16 fdb_id;
10399 + u8 mac_addr[6];
10400 + /* cmd word 1 */
10401 + __le16 if_egress;
10402 + /* only the first 4 bits from LSB */
10403 + u8 type;
10404 +};
10405 +
10406 +struct dpsw_cmd_fdb_add_multicast {
10407 + /* cmd word 0 */
10408 + __le16 fdb_id;
10409 + __le16 num_ifs;
10410 + /* only the first 4 bits from LSB */
10411 + u8 type;
10412 + u8 pad[3];
10413 + /* cmd word 1 */
10414 + u8 mac_addr[6];
10415 + __le16 pad2;
10416 + /* cmd word 2 */
10417 + __le64 if_id[4];
10418 +};
10419 +
10420 +struct dpsw_cmd_fdb_get_multicast {
10421 + __le16 fdb_id;
10422 + u8 mac_addr[6];
10423 +};
10424 +
10425 +struct dpsw_rsp_fdb_get_multicast {
10426 + /* cmd word 0 */
10427 + __le64 pad0;
10428 + /* cmd word 1 */
10429 + __le16 num_ifs;
10430 + /* only the first 4 bits from LSB */
10431 + u8 type;
10432 + u8 pad1[5];
10433 + /* cmd word 2 */
10434 + __le64 if_id[4];
10435 +};
10436 +
10437 +struct dpsw_cmd_fdb_remove_multicast {
10438 + /* cmd word 0 */
10439 + __le16 fdb_id;
10440 + __le16 num_ifs;
10441 + /* only the first 4 bits from LSB */
10442 + u8 type;
10443 + u8 pad[3];
10444 + /* cmd word 1 */
10445 + u8 mac_addr[6];
10446 + __le16 pad2;
10447 + /* cmd word 2 */
10448 + __le64 if_id[4];
10449 +};
10450 +
10451 +#define DPSW_LEARNING_MODE_SHIFT 0
10452 +#define DPSW_LEARNING_MODE_SIZE 4
10453 +
10454 +struct dpsw_cmd_fdb_set_learning_mode {
10455 + __le16 fdb_id;
10456 + /* only the first 4 bits from LSB */
10457 + u8 mode;
10458 +};
10459 +
10460 +struct dpsw_cmd_fdb_get_attr {
10461 + __le16 fdb_id;
10462 +};
10463 +
10464 +struct dpsw_rsp_fdb_get_attr {
10465 + /* cmd word 0 */
10466 + __le16 pad;
10467 + __le16 max_fdb_entries;
10468 + __le16 fdb_aging_time;
10469 + __le16 num_fdb_mc_groups;
10470 + /* cmd word 1 */
10471 + __le16 max_fdb_mc_groups;
10472 + /* only the first 4 bits from LSB */
10473 + u8 learning_mode;
10474 +};
10475 +
10476 +struct dpsw_cmd_acl_add {
10477 + __le16 pad;
10478 + __le16 max_entries;
10479 +};
10480 +
10481 +struct dpsw_rsp_acl_add {
10482 + __le16 acl_id;
10483 +};
10484 +
10485 +struct dpsw_cmd_acl_remove {
10486 + __le16 acl_id;
10487 +};
10488 +
10489 +struct dpsw_prep_acl_entry {
10490 + u8 match_l2_dest_mac[6];
10491 + __le16 match_l2_tpid;
10492 +
10493 + u8 match_l2_source_mac[6];
10494 + __le16 match_l2_vlan_id;
10495 +
10496 + __le32 match_l3_dest_ip;
10497 + __le32 match_l3_source_ip;
10498 +
10499 + __le16 match_l4_dest_port;
10500 + __le16 match_l4_source_port;
10501 + __le16 match_l2_ether_type;
10502 + u8 match_l2_pcp_dei;
10503 + u8 match_l3_dscp;
10504 +
10505 + u8 mask_l2_dest_mac[6];
10506 + __le16 mask_l2_tpid;
10507 +
10508 + u8 mask_l2_source_mac[6];
10509 + __le16 mask_l2_vlan_id;
10510 +
10511 + __le32 mask_l3_dest_ip;
10512 + __le32 mask_l3_source_ip;
10513 +
10514 + __le16 mask_l4_dest_port;
10515 + __le16 mask_l4_source_port;
10516 + __le16 mask_l2_ether_type;
10517 + u8 mask_l2_pcp_dei;
10518 + u8 mask_l3_dscp;
10519 +
10520 + u8 match_l3_protocol;
10521 + u8 mask_l3_protocol;
10522 +};
10523 +
10524 +#define DPSW_RESULT_ACTION_SHIFT 0
10525 +#define DPSW_RESULT_ACTION_SIZE 4
10526 +
10527 +struct dpsw_cmd_acl_entry {
10528 + __le16 acl_id;
10529 + __le16 result_if_id;
10530 + __le32 precedence;
10531 + /* from LSB only the first 4 bits */
10532 + u8 result_action;
10533 + u8 pad[7];
10534 + __le64 pad2[4];
10535 + __le64 key_iova;
10536 +};
10537 +
10538 +struct dpsw_cmd_acl_if {
10539 + /* cmd word 0 */
10540 + __le16 acl_id;
10541 + __le16 num_ifs;
10542 + __le32 pad;
10543 + /* cmd word 1 */
10544 + __le64 if_id[4];
10545 +};
10546 +
10547 +struct dpsw_cmd_acl_get_attr {
10548 + __le16 acl_id;
10549 +};
10550 +
10551 +struct dpsw_rsp_acl_get_attr {
10552 + /* cmd word 0 */
10553 + __le64 pad;
10554 + /* cmd word 1 */
10555 + __le16 max_entries;
10556 + __le16 num_entries;
10557 + __le16 num_ifs;
10558 +};
10559 +
10560 +struct dpsw_rsp_ctrl_if_get_attr {
10561 + /* cmd word 0 */
10562 + __le64 pad;
10563 + /* cmd word 1 */
10564 + __le32 rx_fqid;
10565 + __le32 rx_err_fqid;
10566 + /* cmd word 2 */
10567 + __le32 tx_err_conf_fqid;
10568 +};
10569 +
10570 +struct dpsw_cmd_ctrl_if_set_pools {
10571 + u8 num_dpbp;
10572 + /* from LSB: POOL0_BACKUP_POOL:1 ... POOL7_BACKUP_POOL */
10573 + u8 backup_pool;
10574 + __le16 pad;
10575 + __le32 dpbp_id[8];
10576 + __le16 buffer_size[8];
10577 +};
10578 +
10579 +struct dpsw_rsp_get_api_version {
10580 + __le16 version_major;
10581 + __le16 version_minor;
10582 +};
10583 +
10584 +#endif /* __FSL_DPSW_CMD_H */
10585 --- /dev/null
10586 +++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw.c
10587 @@ -0,0 +1,2762 @@
10588 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
10589 + *
10590 + * Redistribution and use in source and binary forms, with or without
10591 + * modification, are permitted provided that the following conditions are met:
10592 + * * Redistributions of source code must retain the above copyright
10593 + * notice, this list of conditions and the following disclaimer.
10594 + * * Redistributions in binary form must reproduce the above copyright
10595 + * notice, this list of conditions and the following disclaimer in the
10596 + * documentation and/or other materials provided with the distribution.
10597 + * * Neither the name of the above-listed copyright holders nor the
10598 + * names of any contributors may be used to endorse or promote products
10599 + * derived from this software without specific prior written permission.
10600 + *
10601 + *
10602 + * ALTERNATIVELY, this software may be distributed under the terms of the
10603 + * GNU General Public License ("GPL") as published by the Free Software
10604 + * Foundation, either version 2 of that License or (at your option) any
10605 + * later version.
10606 + *
10607 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
10608 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
10609 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
10610 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
10611 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
10612 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
10613 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
10614 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
10615 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
10616 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
10617 + * POSSIBILITY OF SUCH DAMAGE.
10618 + */
10619 +#include "../../fsl-mc/include/mc-sys.h"
10620 +#include "../../fsl-mc/include/mc-cmd.h"
10621 +#include "dpsw.h"
10622 +#include "dpsw-cmd.h"
10623 +
10624 +static void build_if_id_bitmap(__le64 *bmap,
10625 + const u16 *id,
10626 + const u16 num_ifs) {
10627 + int i;
10628 +
10629 + for (i = 0; (i < num_ifs) && (i < DPSW_MAX_IF); i++)
10630 + bmap[id[i] / 64] = dpsw_set_bit(bmap[id[i] / 64],
10631 + (id[i] % 64),
10632 + 1);
10633 +}
10634 +
10635 +static void read_if_id_bitmap(u16 *if_id,
10636 + u16 *num_ifs,
10637 + __le64 *bmap) {
10638 + int bitmap[DPSW_MAX_IF] = { 0 };
10639 + int i, j = 0;
10640 + int count = 0;
10641 +
10642 + for (i = 0; i < DPSW_MAX_IF; i++) {
10643 + bitmap[i] = dpsw_get_bit(le64_to_cpu(bmap[i / 64]),
10644 + i % 64);
10645 + count += bitmap[i];
10646 + }
10647 +
10648 + *num_ifs = (u16)count;
10649 +
10650 + for (i = 0; (i < DPSW_MAX_IF) && (j < count); i++) {
10651 + if (bitmap[i]) {
10652 + if_id[j] = (u16)i;
10653 + j++;
10654 + }
10655 + }
10656 +}
10657 +
10658 +/**
10659 + * dpsw_open() - Open a control session for the specified object
10660 + * @mc_io: Pointer to MC portal's I/O object
10661 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10662 + * @dpsw_id: DPSW unique ID
10663 + * @token: Returned token; use in subsequent API calls
10664 + *
10665 + * This function can be used to open a control session for an
10666 + * already created object; an object may have been declared in
10667 + * the DPL or by calling the dpsw_create() function.
10668 + * This function returns a unique authentication token,
10669 + * associated with the specific object ID and the specific MC
10670 + * portal; this token must be used in all subsequent commands for
10671 + * this specific object
10672 + *
10673 + * Return: '0' on Success; Error code otherwise.
10674 + */
10675 +int dpsw_open(struct fsl_mc_io *mc_io,
10676 + u32 cmd_flags,
10677 + int dpsw_id,
10678 + u16 *token)
10679 +{
10680 + struct mc_command cmd = { 0 };
10681 + struct dpsw_cmd_open *cmd_params;
10682 + int err;
10683 +
10684 + /* prepare command */
10685 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_OPEN,
10686 + cmd_flags,
10687 + 0);
10688 + cmd_params = (struct dpsw_cmd_open *)cmd.params;
10689 + cmd_params->dpsw_id = cpu_to_le32(dpsw_id);
10690 +
10691 + /* send command to mc*/
10692 + err = mc_send_command(mc_io, &cmd);
10693 + if (err)
10694 + return err;
10695 +
10696 + /* retrieve response parameters */
10697 + *token = mc_cmd_hdr_read_token(&cmd);
10698 +
10699 + return 0;
10700 +}
10701 +
10702 +/**
10703 + * dpsw_close() - Close the control session of the object
10704 + * @mc_io: Pointer to MC portal's I/O object
10705 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10706 + * @token: Token of DPSW object
10707 + *
10708 + * After this function is called, no further operations are
10709 + * allowed on the object without opening a new control session.
10710 + *
10711 + * Return: '0' on Success; Error code otherwise.
10712 + */
10713 +int dpsw_close(struct fsl_mc_io *mc_io,
10714 + u32 cmd_flags,
10715 + u16 token)
10716 +{
10717 + struct mc_command cmd = { 0 };
10718 +
10719 + /* prepare command */
10720 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLOSE,
10721 + cmd_flags,
10722 + token);
10723 +
10724 + /* send command to mc*/
10725 + return mc_send_command(mc_io, &cmd);
10726 +}
10727 +
10728 +/**
10729 + * dpsw_enable() - Enable DPSW functionality
10730 + * @mc_io: Pointer to MC portal's I/O object
10731 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10732 + * @token: Token of DPSW object
10733 + *
10734 + * Return: Completion status. '0' on Success; Error code otherwise.
10735 + */
10736 +int dpsw_enable(struct fsl_mc_io *mc_io,
10737 + u32 cmd_flags,
10738 + u16 token)
10739 +{
10740 + struct mc_command cmd = { 0 };
10741 +
10742 + /* prepare command */
10743 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ENABLE,
10744 + cmd_flags,
10745 + token);
10746 +
10747 + /* send command to mc*/
10748 + return mc_send_command(mc_io, &cmd);
10749 +}
10750 +
10751 +/**
10752 + * dpsw_disable() - Disable DPSW functionality
10753 + * @mc_io: Pointer to MC portal's I/O object
10754 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10755 + * @token: Token of DPSW object
10756 + *
10757 + * Return: Completion status. '0' on Success; Error code otherwise.
10758 + */
10759 +int dpsw_disable(struct fsl_mc_io *mc_io,
10760 + u32 cmd_flags,
10761 + u16 token)
10762 +{
10763 + struct mc_command cmd = { 0 };
10764 +
10765 + /* prepare command */
10766 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_DISABLE,
10767 + cmd_flags,
10768 + token);
10769 +
10770 + /* send command to mc*/
10771 + return mc_send_command(mc_io, &cmd);
10772 +}
10773 +
10774 +/**
10775 + * dpsw_is_enabled() - Check if the DPSW is enabled
10776 + *
10777 + * @mc_io: Pointer to MC portal's I/O object
10778 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10779 + * @token: Token of DPSW object
10780 + * @en: Returns '1' if object is enabled; '0' otherwise
10781 + *
10782 + * Return: '0' on Success; Error code otherwise
10783 + */
10784 +int dpsw_is_enabled(struct fsl_mc_io *mc_io,
10785 + u32 cmd_flags,
10786 + u16 token,
10787 + int *en)
10788 +{
10789 + struct mc_command cmd = { 0 };
10790 + struct dpsw_rsp_is_enabled *cmd_rsp;
10791 + int err;
10792 +
10793 + /* prepare command */
10794 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IS_ENABLED, cmd_flags,
10795 + token);
10796 +
10797 + /* send command to mc*/
10798 + err = mc_send_command(mc_io, &cmd);
10799 + if (err)
10800 + return err;
10801 +
10802 + /* retrieve response parameters */
10803 + cmd_rsp = (struct dpsw_rsp_is_enabled *)cmd.params;
10804 + *en = dpsw_get_field(cmd_rsp->enabled, ENABLE);
10805 +
10806 + return 0;
10807 +}
10808 +
10809 +/**
10810 + * dpsw_reset() - Reset the DPSW, returns the object to initial state.
10811 + * @mc_io: Pointer to MC portal's I/O object
10812 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10813 + * @token: Token of DPSW object
10814 + *
10815 + * Return: '0' on Success; Error code otherwise.
10816 + */
10817 +int dpsw_reset(struct fsl_mc_io *mc_io,
10818 + u32 cmd_flags,
10819 + u16 token)
10820 +{
10821 + struct mc_command cmd = { 0 };
10822 +
10823 + /* prepare command */
10824 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_RESET,
10825 + cmd_flags,
10826 + token);
10827 +
10828 + /* send command to mc*/
10829 + return mc_send_command(mc_io, &cmd);
10830 +}
10831 +
10832 +/**
10833 + * dpsw_set_irq() - Set IRQ information for the DPSW to trigger an interrupt.
10834 + * @mc_io: Pointer to MC portal's I/O object
10835 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10836 + * @token: Token of DPSW object
10837 + * @irq_index: Identifies the interrupt index to configure
10838 + * @irq_cfg: IRQ configuration
10839 + *
10840 + * Return: '0' on Success; Error code otherwise.
10841 + */
10842 +int dpsw_set_irq(struct fsl_mc_io *mc_io,
10843 + u32 cmd_flags,
10844 + u16 token,
10845 + u8 irq_index,
10846 + struct dpsw_irq_cfg *irq_cfg)
10847 +{
10848 + struct mc_command cmd = { 0 };
10849 + struct dpsw_cmd_set_irq *cmd_params;
10850 +
10851 + /* prepare command */
10852 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ,
10853 + cmd_flags,
10854 + token);
10855 + cmd_params = (struct dpsw_cmd_set_irq *)cmd.params;
10856 + cmd_params->irq_index = irq_index;
10857 + cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
10858 + cmd_params->irq_addr = cpu_to_le64(irq_cfg->addr);
10859 + cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
10860 +
10861 + /* send command to mc*/
10862 + return mc_send_command(mc_io, &cmd);
10863 +}
10864 +
10865 +/**
10866 + * dpsw_get_irq() - Get IRQ information from the DPSW
10867 + *
10868 + * @mc_io: Pointer to MC portal's I/O object
10869 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10870 + * @token: Token of DPSW object
10871 + * @irq_index: The interrupt index to configure
10872 + * @type: Interrupt type: 0 represents message interrupt
10873 + * type (both irq_addr and irq_val are valid)
10874 + * @irq_cfg: IRQ attributes
10875 + *
10876 + * Return: '0' on Success; Error code otherwise.
10877 + */
10878 +int dpsw_get_irq(struct fsl_mc_io *mc_io,
10879 + u32 cmd_flags,
10880 + u16 token,
10881 + u8 irq_index,
10882 + int *type,
10883 + struct dpsw_irq_cfg *irq_cfg)
10884 +{
10885 + struct mc_command cmd = { 0 };
10886 + struct dpsw_cmd_get_irq *cmd_params;
10887 + struct dpsw_rsp_get_irq *rsp_params;
10888 + int err;
10889 +
10890 + /* prepare command */
10891 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ,
10892 + cmd_flags,
10893 + token);
10894 + cmd_params = (struct dpsw_cmd_get_irq *)cmd.params;
10895 + cmd_params->irq_index = irq_index;
10896 +
10897 + /* send command to mc*/
10898 + err = mc_send_command(mc_io, &cmd);
10899 + if (err)
10900 + return err;
10901 +
10902 + /* retrieve response parameters */
10903 + rsp_params = (struct dpsw_rsp_get_irq *)cmd.params;
10904 + irq_cfg->addr = le64_to_cpu(rsp_params->irq_addr);
10905 + irq_cfg->val = le32_to_cpu(rsp_params->irq_val);
10906 + irq_cfg->irq_num = le32_to_cpu(rsp_params->irq_num);
10907 + *type = le32_to_cpu(rsp_params->irq_type);
10908 +
10909 + return 0;
10910 +}
10911 +
10912 +/**
10913 + * dpsw_set_irq_enable() - Set overall interrupt state.
10914 + * @mc_io: Pointer to MC portal's I/O object
10915 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10916 + * @token: Token of DPCI object
10917 + * @irq_index: The interrupt index to configure
10918 + * @en: Interrupt state - enable = 1, disable = 0
10919 + *
10920 + * Allows GPP software to control when interrupts are generated.
10921 + * Each interrupt can have up to 32 causes. The enable/disable control's the
10922 + * overall interrupt state. if the interrupt is disabled no causes will cause
10923 + * an interrupt
10924 + *
10925 + * Return: '0' on Success; Error code otherwise.
10926 + */
10927 +int dpsw_set_irq_enable(struct fsl_mc_io *mc_io,
10928 + u32 cmd_flags,
10929 + u16 token,
10930 + u8 irq_index,
10931 + u8 en)
10932 +{
10933 + struct mc_command cmd = { 0 };
10934 + struct dpsw_cmd_set_irq_enable *cmd_params;
10935 +
10936 + /* prepare command */
10937 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_ENABLE,
10938 + cmd_flags,
10939 + token);
10940 + cmd_params = (struct dpsw_cmd_set_irq_enable *)cmd.params;
10941 + dpsw_set_field(cmd_params->enable_state, ENABLE, en);
10942 + cmd_params->irq_index = irq_index;
10943 +
10944 + /* send command to mc*/
10945 + return mc_send_command(mc_io, &cmd);
10946 +}
10947 +
10948 +/**
10949 + * dpsw_set_irq_mask() - Set interrupt mask.
10950 + * @mc_io: Pointer to MC portal's I/O object
10951 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10952 + * @token: Token of DPCI object
10953 + * @irq_index: The interrupt index to configure
10954 + * @mask: Event mask to trigger interrupt;
10955 + * each bit:
10956 + * 0 = ignore event
10957 + * 1 = consider event for asserting IRQ
10958 + *
10959 + * Every interrupt can have up to 32 causes and the interrupt model supports
10960 + * masking/unmasking each cause independently
10961 + *
10962 + * Return: '0' on Success; Error code otherwise.
10963 + */
10964 +int dpsw_set_irq_mask(struct fsl_mc_io *mc_io,
10965 + u32 cmd_flags,
10966 + u16 token,
10967 + u8 irq_index,
10968 + u32 mask)
10969 +{
10970 + struct mc_command cmd = { 0 };
10971 + struct dpsw_cmd_set_irq_mask *cmd_params;
10972 +
10973 + /* prepare command */
10974 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_MASK,
10975 + cmd_flags,
10976 + token);
10977 + cmd_params = (struct dpsw_cmd_set_irq_mask *)cmd.params;
10978 + cmd_params->mask = cpu_to_le32(mask);
10979 + cmd_params->irq_index = irq_index;
10980 +
10981 + /* send command to mc*/
10982 + return mc_send_command(mc_io, &cmd);
10983 +}
10984 +
10985 +/**
10986 + * dpsw_get_irq_status() - Get the current status of any pending interrupts
10987 + * @mc_io: Pointer to MC portal's I/O object
10988 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10989 + * @token: Token of DPSW object
10990 + * @irq_index: The interrupt index to configure
10991 + * @status: Returned interrupts status - one bit per cause:
10992 + * 0 = no interrupt pending
10993 + * 1 = interrupt pending
10994 + *
10995 + * Return: '0' on Success; Error code otherwise.
10996 + */
10997 +int dpsw_get_irq_status(struct fsl_mc_io *mc_io,
10998 + u32 cmd_flags,
10999 + u16 token,
11000 + u8 irq_index,
11001 + u32 *status)
11002 +{
11003 + struct mc_command cmd = { 0 };
11004 + struct dpsw_cmd_get_irq_status *cmd_params;
11005 + struct dpsw_rsp_get_irq_status *rsp_params;
11006 + int err;
11007 +
11008 + /* prepare command */
11009 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ_STATUS,
11010 + cmd_flags,
11011 + token);
11012 + cmd_params = (struct dpsw_cmd_get_irq_status *)cmd.params;
11013 + cmd_params->status = cpu_to_le32(*status);
11014 + cmd_params->irq_index = irq_index;
11015 +
11016 + /* send command to mc*/
11017 + err = mc_send_command(mc_io, &cmd);
11018 + if (err)
11019 + return err;
11020 +
11021 + /* retrieve response parameters */
11022 + rsp_params = (struct dpsw_rsp_get_irq_status *)cmd.params;
11023 + *status = le32_to_cpu(rsp_params->status);
11024 +
11025 + return 0;
11026 +}
11027 +
11028 +/**
11029 + * dpsw_clear_irq_status() - Clear a pending interrupt's status
11030 + * @mc_io: Pointer to MC portal's I/O object
11031 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11032 + * @token: Token of DPCI object
11033 + * @irq_index: The interrupt index to configure
11034 + * @status: bits to clear (W1C) - one bit per cause:
11035 + * 0 = don't change
11036 + * 1 = clear status bit
11037 + *
11038 + * Return: '0' on Success; Error code otherwise.
11039 + */
11040 +int dpsw_clear_irq_status(struct fsl_mc_io *mc_io,
11041 + u32 cmd_flags,
11042 + u16 token,
11043 + u8 irq_index,
11044 + u32 status)
11045 +{
11046 + struct mc_command cmd = { 0 };
11047 + struct dpsw_cmd_clear_irq_status *cmd_params;
11048 +
11049 + /* prepare command */
11050 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLEAR_IRQ_STATUS,
11051 + cmd_flags,
11052 + token);
11053 + cmd_params = (struct dpsw_cmd_clear_irq_status *)cmd.params;
11054 + cmd_params->status = cpu_to_le32(status);
11055 + cmd_params->irq_index = irq_index;
11056 +
11057 + /* send command to mc*/
11058 + return mc_send_command(mc_io, &cmd);
11059 +}
11060 +
11061 +/**
11062 + * dpsw_get_attributes() - Retrieve DPSW attributes
11063 + * @mc_io: Pointer to MC portal's I/O object
11064 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11065 + * @token: Token of DPSW object
11066 + * @attr: Returned DPSW attributes
11067 + *
11068 + * Return: Completion status. '0' on Success; Error code otherwise.
11069 + */
11070 +int dpsw_get_attributes(struct fsl_mc_io *mc_io,
11071 + u32 cmd_flags,
11072 + u16 token,
11073 + struct dpsw_attr *attr)
11074 +{
11075 + struct mc_command cmd = { 0 };
11076 + struct dpsw_rsp_get_attr *rsp_params;
11077 + int err;
11078 +
11079 + /* prepare command */
11080 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_ATTR,
11081 + cmd_flags,
11082 + token);
11083 +
11084 + /* send command to mc*/
11085 + err = mc_send_command(mc_io, &cmd);
11086 + if (err)
11087 + return err;
11088 +
11089 + /* retrieve response parameters */
11090 + rsp_params = (struct dpsw_rsp_get_attr *)cmd.params;
11091 + attr->num_ifs = le16_to_cpu(rsp_params->num_ifs);
11092 + attr->max_fdbs = rsp_params->max_fdbs;
11093 + attr->num_fdbs = rsp_params->num_fdbs;
11094 + attr->max_vlans = le16_to_cpu(rsp_params->max_vlans);
11095 + attr->num_vlans = le16_to_cpu(rsp_params->num_vlans);
11096 + attr->max_fdb_entries = le16_to_cpu(rsp_params->max_fdb_entries);
11097 + attr->fdb_aging_time = le16_to_cpu(rsp_params->fdb_aging_time);
11098 + attr->id = le32_to_cpu(rsp_params->dpsw_id);
11099 + attr->mem_size = le16_to_cpu(rsp_params->mem_size);
11100 + attr->max_fdb_mc_groups = le16_to_cpu(rsp_params->max_fdb_mc_groups);
11101 + attr->max_meters_per_if = rsp_params->max_meters_per_if;
11102 + attr->options = le64_to_cpu(rsp_params->options);
11103 + attr->component_type = dpsw_get_field(rsp_params->component_type,
11104 + COMPONENT_TYPE);
11105 +
11106 + return 0;
11107 +}
11108 +
11109 +/**
11110 + * dpsw_set_reflection_if() - Set target interface for reflected interfaces.
11111 + * @mc_io: Pointer to MC portal's I/O object
11112 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11113 + * @token: Token of DPSW object
11114 + * @if_id: Interface Id
11115 + *
11116 + * Only one reflection receive interface is allowed per switch
11117 + *
11118 + * Return: Completion status. '0' on Success; Error code otherwise.
11119 + */
11120 +int dpsw_set_reflection_if(struct fsl_mc_io *mc_io,
11121 + u32 cmd_flags,
11122 + u16 token,
11123 + u16 if_id)
11124 +{
11125 + struct mc_command cmd = { 0 };
11126 + struct dpsw_cmd_set_reflection_if *cmd_params;
11127 +
11128 + /* prepare command */
11129 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_REFLECTION_IF,
11130 + cmd_flags,
11131 + token);
11132 + cmd_params = (struct dpsw_cmd_set_reflection_if *)cmd.params;
11133 + cmd_params->if_id = cpu_to_le16(if_id);
11134 +
11135 + /* send command to mc*/
11136 + return mc_send_command(mc_io, &cmd);
11137 +}
11138 +
11139 +/**
11140 + * dpsw_if_set_link_cfg() - Set the link configuration.
11141 + * @mc_io: Pointer to MC portal's I/O object
11142 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11143 + * @token: Token of DPSW object
11144 + * @if_id: Interface id
11145 + * @cfg: Link configuration
11146 + *
11147 + * Return: '0' on Success; Error code otherwise.
11148 + */
11149 +int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io,
11150 + u32 cmd_flags,
11151 + u16 token,
11152 + u16 if_id,
11153 + struct dpsw_link_cfg *cfg)
11154 +{
11155 + struct mc_command cmd = { 0 };
11156 + struct dpsw_cmd_if_set_link_cfg *cmd_params;
11157 +
11158 + /* prepare command */
11159 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_LINK_CFG,
11160 + cmd_flags,
11161 + token);
11162 + cmd_params = (struct dpsw_cmd_if_set_link_cfg *)cmd.params;
11163 + cmd_params->if_id = cpu_to_le16(if_id);
11164 + cmd_params->rate = cpu_to_le32(cfg->rate);
11165 + cmd_params->options = cpu_to_le64(cfg->options);
11166 +
11167 + /* send command to mc*/
11168 + return mc_send_command(mc_io, &cmd);
11169 +}
11170 +
11171 +/**
11172 + * dpsw_if_get_link_state - Return the link state
11173 + * @mc_io: Pointer to MC portal's I/O object
11174 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11175 + * @token: Token of DPSW object
11176 + * @if_id: Interface id
11177 + * @state: Link state 1 - linkup, 0 - link down or disconnected
11178 + *
11179 + * @Return '0' on Success; Error code otherwise.
11180 + */
11181 +int dpsw_if_get_link_state(struct fsl_mc_io *mc_io,
11182 + u32 cmd_flags,
11183 + u16 token,
11184 + u16 if_id,
11185 + struct dpsw_link_state *state)
11186 +{
11187 + struct mc_command cmd = { 0 };
11188 + struct dpsw_cmd_if_get_link_state *cmd_params;
11189 + struct dpsw_rsp_if_get_link_state *rsp_params;
11190 + int err;
11191 +
11192 + /* prepare command */
11193 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_LINK_STATE,
11194 + cmd_flags,
11195 + token);
11196 + cmd_params = (struct dpsw_cmd_if_get_link_state *)cmd.params;
11197 + cmd_params->if_id = cpu_to_le16(if_id);
11198 +
11199 + /* send command to mc*/
11200 + err = mc_send_command(mc_io, &cmd);
11201 + if (err)
11202 + return err;
11203 +
11204 + /* retrieve response parameters */
11205 + rsp_params = (struct dpsw_rsp_if_get_link_state *)cmd.params;
11206 + state->rate = le32_to_cpu(rsp_params->rate);
11207 + state->options = le64_to_cpu(rsp_params->options);
11208 + state->up = dpsw_get_field(rsp_params->up, UP);
11209 +
11210 + return 0;
11211 +}
11212 +
11213 +/**
11214 + * dpsw_if_set_flooding() - Enable Disable flooding for particular interface
11215 + * @mc_io: Pointer to MC portal's I/O object
11216 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11217 + * @token: Token of DPSW object
11218 + * @if_id: Interface Identifier
11219 + * @en: 1 - enable, 0 - disable
11220 + *
11221 + * Return: Completion status. '0' on Success; Error code otherwise.
11222 + */
11223 +int dpsw_if_set_flooding(struct fsl_mc_io *mc_io,
11224 + u32 cmd_flags,
11225 + u16 token,
11226 + u16 if_id,
11227 + int en)
11228 +{
11229 + struct mc_command cmd = { 0 };
11230 + struct dpsw_cmd_if_set_flooding *cmd_params;
11231 +
11232 + /* prepare command */
11233 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_FLOODING,
11234 + cmd_flags,
11235 + token);
11236 + cmd_params = (struct dpsw_cmd_if_set_flooding *)cmd.params;
11237 + cmd_params->if_id = cpu_to_le16(if_id);
11238 + dpsw_set_field(cmd_params->enable, ENABLE, en);
11239 +
11240 + /* send command to mc*/
11241 + return mc_send_command(mc_io, &cmd);
11242 +}
11243 +
11244 +/**
11245 + * dpsw_if_set_broadcast() - Enable/disable broadcast for particular interface
11246 + * @mc_io: Pointer to MC portal's I/O object
11247 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11248 + * @token: Token of DPSW object
11249 + * @if_id: Interface Identifier
11250 + * @en: 1 - enable, 0 - disable
11251 + *
11252 + * Return: Completion status. '0' on Success; Error code otherwise.
11253 + */
11254 +int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io,
11255 + u32 cmd_flags,
11256 + u16 token,
11257 + u16 if_id,
11258 + int en)
11259 +{
11260 + struct mc_command cmd = { 0 };
11261 + struct dpsw_cmd_if_set_broadcast *cmd_params;
11262 +
11263 + /* prepare command */
11264 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_BROADCAST,
11265 + cmd_flags,
11266 + token);
11267 + cmd_params = (struct dpsw_cmd_if_set_broadcast *)cmd.params;
11268 + cmd_params->if_id = cpu_to_le16(if_id);
11269 + dpsw_set_field(cmd_params->enable, ENABLE, en);
11270 +
11271 + /* send command to mc*/
11272 + return mc_send_command(mc_io, &cmd);
11273 +}
11274 +
11275 +/**
11276 + * dpsw_if_set_multicast() - Enable/disable multicast for particular interface
11277 + * @mc_io: Pointer to MC portal's I/O object
11278 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11279 + * @token: Token of DPSW object
11280 + * @if_id: Interface Identifier
11281 + * @en: 1 - enable, 0 - disable
11282 + *
11283 + * Return: Completion status. '0' on Success; Error code otherwise.
11284 + */
11285 +int dpsw_if_set_multicast(struct fsl_mc_io *mc_io,
11286 + u32 cmd_flags,
11287 + u16 token,
11288 + u16 if_id,
11289 + int en)
11290 +{
11291 + struct mc_command cmd = { 0 };
11292 + struct dpsw_cmd_if_set_multicast *cmd_params;
11293 +
11294 + /* prepare command */
11295 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_MULTICAST,
11296 + cmd_flags,
11297 + token);
11298 + cmd_params = (struct dpsw_cmd_if_set_multicast *)cmd.params;
11299 + cmd_params->if_id = cpu_to_le16(if_id);
11300 + dpsw_set_field(cmd_params->enable, ENABLE, en);
11301 +
11302 + /* send command to mc*/
11303 + return mc_send_command(mc_io, &cmd);
11304 +}
11305 +
11306 +/**
11307 + * dpsw_if_set_tci() - Set default VLAN Tag Control Information (TCI)
11308 + * @mc_io: Pointer to MC portal's I/O object
11309 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11310 + * @token: Token of DPSW object
11311 + * @if_id: Interface Identifier
11312 + * @cfg: Tag Control Information Configuration
11313 + *
11314 + * Return: Completion status. '0' on Success; Error code otherwise.
11315 + */
11316 +int dpsw_if_set_tci(struct fsl_mc_io *mc_io,
11317 + u32 cmd_flags,
11318 + u16 token,
11319 + u16 if_id,
11320 + const struct dpsw_tci_cfg *cfg)
11321 +{
11322 + struct mc_command cmd = { 0 };
11323 + struct dpsw_cmd_if_set_tci *cmd_params;
11324 +
11325 + /* prepare command */
11326 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_TCI,
11327 + cmd_flags,
11328 + token);
11329 + cmd_params = (struct dpsw_cmd_if_set_tci *)cmd.params;
11330 + cmd_params->if_id = cpu_to_le16(if_id);
11331 + dpsw_set_field(cmd_params->conf, VLAN_ID, cfg->vlan_id);
11332 + dpsw_set_field(cmd_params->conf, DEI, cfg->dei);
11333 + dpsw_set_field(cmd_params->conf, PCP, cfg->pcp);
11334 + cmd_params->conf = cpu_to_le16(cmd_params->conf);
11335 +
11336 + /* send command to mc*/
11337 + return mc_send_command(mc_io, &cmd);
11338 +}
11339 +
11340 +/**
11341 + * dpsw_if_get_tci() - Get default VLAN Tag Control Information (TCI)
11342 + * @mc_io: Pointer to MC portal's I/O object
11343 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11344 + * @token: Token of DPSW object
11345 + * @if_id: Interface Identifier
11346 + * @cfg: Tag Control Information Configuration
11347 + *
11348 + * Return: Completion status. '0' on Success; Error code otherwise.
11349 + */
11350 +int dpsw_if_get_tci(struct fsl_mc_io *mc_io,
11351 + u32 cmd_flags,
11352 + u16 token,
11353 + u16 if_id,
11354 + struct dpsw_tci_cfg *cfg)
11355 +{
11356 + struct mc_command cmd = { 0 };
11357 + struct dpsw_cmd_if_get_tci *cmd_params;
11358 + struct dpsw_rsp_if_get_tci *rsp_params;
11359 + int err;
11360 +
11361 + /* prepare command */
11362 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_TCI,
11363 + cmd_flags,
11364 + token);
11365 + cmd_params = (struct dpsw_cmd_if_get_tci *)cmd.params;
11366 + cmd_params->if_id = cpu_to_le16(if_id);
11367 +
11368 + /* send command to mc*/
11369 + err = mc_send_command(mc_io, &cmd);
11370 + if (err)
11371 + return err;
11372 +
11373 + /* retrieve response parameters */
11374 + rsp_params = (struct dpsw_rsp_if_get_tci *)cmd.params;
11375 + cfg->pcp = rsp_params->pcp;
11376 + cfg->dei = rsp_params->dei;
11377 + cfg->vlan_id = le16_to_cpu(rsp_params->vlan_id);
11378 +
11379 + return 0;
11380 +}
11381 +
11382 +/**
11383 + * dpsw_if_set_stp() - Function sets Spanning Tree Protocol (STP) state.
11384 + * @mc_io: Pointer to MC portal's I/O object
11385 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11386 + * @token: Token of DPSW object
11387 + * @if_id: Interface Identifier
11388 + * @cfg: STP State configuration parameters
11389 + *
11390 + * The following STP states are supported -
11391 + * blocking, listening, learning, forwarding and disabled.
11392 + *
11393 + * Return: Completion status. '0' on Success; Error code otherwise.
11394 + */
11395 +int dpsw_if_set_stp(struct fsl_mc_io *mc_io,
11396 + u32 cmd_flags,
11397 + u16 token,
11398 + u16 if_id,
11399 + const struct dpsw_stp_cfg *cfg)
11400 +{
11401 + struct mc_command cmd = { 0 };
11402 + struct dpsw_cmd_if_set_stp *cmd_params;
11403 +
11404 + /* prepare command */
11405 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_STP,
11406 + cmd_flags,
11407 + token);
11408 + cmd_params = (struct dpsw_cmd_if_set_stp *)cmd.params;
11409 + cmd_params->if_id = cpu_to_le16(if_id);
11410 + cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id);
11411 + dpsw_set_field(cmd_params->state, STATE, cfg->state);
11412 +
11413 + /* send command to mc*/
11414 + return mc_send_command(mc_io, &cmd);
11415 +}
11416 +
11417 +/**
11418 + * dpsw_if_set_accepted_frames()
11419 + * @mc_io: Pointer to MC portal's I/O object
11420 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11421 + * @token: Token of DPSW object
11422 + * @if_id: Interface Identifier
11423 + * @cfg: Frame types configuration
11424 + *
11425 + * When is admit_only_vlan_tagged- the device will discard untagged
11426 + * frames or Priority-Tagged frames received on this interface.
11427 + * When admit_only_untagged- untagged frames or Priority-Tagged
11428 + * frames received on this interface will be accepted and assigned
11429 + * to a VID based on the PVID and VID Set for this interface.
11430 + * When admit_all - the device will accept VLAN tagged, untagged
11431 + * and priority tagged frames.
11432 + * The default is admit_all
11433 + *
11434 + * Return: Completion status. '0' on Success; Error code otherwise.
11435 + */
11436 +int dpsw_if_set_accepted_frames(struct fsl_mc_io *mc_io,
11437 + u32 cmd_flags,
11438 + u16 token,
11439 + u16 if_id,
11440 + const struct dpsw_accepted_frames_cfg *cfg)
11441 +{
11442 + struct mc_command cmd = { 0 };
11443 + struct dpsw_cmd_if_set_accepted_frames *cmd_params;
11444 +
11445 + /* prepare command */
11446 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_ACCEPTED_FRAMES,
11447 + cmd_flags,
11448 + token);
11449 + cmd_params = (struct dpsw_cmd_if_set_accepted_frames *)cmd.params;
11450 + cmd_params->if_id = cpu_to_le16(if_id);
11451 + dpsw_set_field(cmd_params->unaccepted, FRAME_TYPE, cfg->type);
11452 + dpsw_set_field(cmd_params->unaccepted, UNACCEPTED_ACT,
11453 + cfg->unaccept_act);
11454 +
11455 + /* send command to mc*/
11456 + return mc_send_command(mc_io, &cmd);
11457 +}
11458 +
11459 +/**
11460 + * dpsw_if_set_accept_all_vlan()
11461 + * @mc_io: Pointer to MC portal's I/O object
11462 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11463 + * @token: Token of DPSW object
11464 + * @if_id: Interface Identifier
11465 + * @accept_all: Accept or drop frames having different VLAN
11466 + *
11467 + * When this is accept (FALSE), the device will discard incoming
11468 + * frames for VLANs that do not include this interface in its
11469 + * Member set. When accept (TRUE), the interface will accept all incoming frames
11470 + *
11471 + * Return: Completion status. '0' on Success; Error code otherwise.
11472 + */
11473 +int dpsw_if_set_accept_all_vlan(struct fsl_mc_io *mc_io,
11474 + u32 cmd_flags,
11475 + u16 token,
11476 + u16 if_id,
11477 + int accept_all)
11478 +{
11479 + struct mc_command cmd = { 0 };
11480 + struct dpsw_cmd_if_set_accept_all_vlan *cmd_params;
11481 +
11482 + /* prepare command */
11483 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IF_ACCEPT_ALL_VLAN,
11484 + cmd_flags,
11485 + token);
11486 + cmd_params = (struct dpsw_cmd_if_set_accept_all_vlan *)cmd.params;
11487 + cmd_params->if_id = cpu_to_le16(if_id);
11488 + dpsw_set_field(cmd_params->accept_all, ACCEPT_ALL, accept_all);
11489 +
11490 + /* send command to mc*/
11491 + return mc_send_command(mc_io, &cmd);
11492 +}
11493 +
11494 +/**
11495 + * dpsw_if_get_counter() - Get specific counter of particular interface
11496 + * @mc_io: Pointer to MC portal's I/O object
11497 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11498 + * @token: Token of DPSW object
11499 + * @if_id: Interface Identifier
11500 + * @type: Counter type
11501 + * @counter: return value
11502 + *
11503 + * Return: Completion status. '0' on Success; Error code otherwise.
11504 + */
11505 +int dpsw_if_get_counter(struct fsl_mc_io *mc_io,
11506 + u32 cmd_flags,
11507 + u16 token,
11508 + u16 if_id,
11509 + enum dpsw_counter type,
11510 + u64 *counter)
11511 +{
11512 + struct mc_command cmd = { 0 };
11513 + struct dpsw_cmd_if_get_counter *cmd_params;
11514 + struct dpsw_rsp_if_get_counter *rsp_params;
11515 + int err;
11516 +
11517 + /* prepare command */
11518 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_COUNTER,
11519 + cmd_flags,
11520 + token);
11521 + cmd_params = (struct dpsw_cmd_if_get_counter *)cmd.params;
11522 + cmd_params->if_id = cpu_to_le16(if_id);
11523 + dpsw_set_field(cmd_params->type, COUNTER_TYPE, type);
11524 +
11525 + /* send command to mc*/
11526 + err = mc_send_command(mc_io, &cmd);
11527 + if (err)
11528 + return err;
11529 +
11530 + /* retrieve response parameters */
11531 + rsp_params = (struct dpsw_rsp_if_get_counter *)cmd.params;
11532 + *counter = le64_to_cpu(rsp_params->counter);
11533 +
11534 + return 0;
11535 +}
11536 +
11537 +/**
11538 + * dpsw_if_set_counter() - Set specific counter of particular interface
11539 + * @mc_io: Pointer to MC portal's I/O object
11540 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11541 + * @token: Token of DPSW object
11542 + * @if_id: Interface Identifier
11543 + * @type: Counter type
11544 + * @counter: New counter value
11545 + *
11546 + * Return: Completion status. '0' on Success; Error code otherwise.
11547 + */
11548 +int dpsw_if_set_counter(struct fsl_mc_io *mc_io,
11549 + u32 cmd_flags,
11550 + u16 token,
11551 + u16 if_id,
11552 + enum dpsw_counter type,
11553 + u64 counter)
11554 +{
11555 + struct mc_command cmd = { 0 };
11556 + struct dpsw_cmd_if_set_counter *cmd_params;
11557 +
11558 + /* prepare command */
11559 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_COUNTER,
11560 + cmd_flags,
11561 + token);
11562 + cmd_params = (struct dpsw_cmd_if_set_counter *)cmd.params;
11563 + cmd_params->if_id = cpu_to_le16(if_id);
11564 + cmd_params->counter = cpu_to_le64(counter);
11565 + dpsw_set_field(cmd_params->type, COUNTER_TYPE, type);
11566 +
11567 + /* send command to mc*/
11568 + return mc_send_command(mc_io, &cmd);
11569 +}
11570 +
11571 +/**
11572 + * dpsw_if_set_tx_selection() - Function is used for mapping variety
11573 + * of frame fields
11574 + * @mc_io: Pointer to MC portal's I/O object
11575 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11576 + * @token: Token of DPSW object
11577 + * @if_id: Interface Identifier
11578 + * @cfg: Traffic class mapping configuration
11579 + *
11580 + * Function is used for mapping variety of frame fields (DSCP, PCP)
11581 + * to Traffic Class. Traffic class is a number
11582 + * in the range from 0 to 7
11583 + *
11584 + * Return: Completion status. '0' on Success; Error code otherwise.
11585 + */
11586 +int dpsw_if_set_tx_selection(struct fsl_mc_io *mc_io,
11587 + u32 cmd_flags,
11588 + u16 token,
11589 + u16 if_id,
11590 + const struct dpsw_tx_selection_cfg *cfg)
11591 +{
11592 + struct dpsw_cmd_if_set_tx_selection *cmd_params;
11593 + struct mc_command cmd = { 0 };
11594 + int i;
11595 +
11596 + /* prepare command */
11597 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_TX_SELECTION,
11598 + cmd_flags,
11599 + token);
11600 + cmd_params = (struct dpsw_cmd_if_set_tx_selection *)cmd.params;
11601 + cmd_params->if_id = cpu_to_le16(if_id);
11602 + dpsw_set_field(cmd_params->priority_selector, PRIORITY_SELECTOR,
11603 + cfg->priority_selector);
11604 +
11605 + for (i = 0; i < 8; i++) {
11606 + cmd_params->tc_sched[i].delta_bandwidth =
11607 + cpu_to_le16(cfg->tc_sched[i].delta_bandwidth);
11608 + dpsw_set_field(cmd_params->tc_sched[i].mode, SCHED_MODE,
11609 + cfg->tc_sched[i].mode);
11610 + cmd_params->tc_id[i] = cfg->tc_id[i];
11611 + }
11612 +
11613 + /* send command to mc*/
11614 + return mc_send_command(mc_io, &cmd);
11615 +}
11616 +
11617 +/**
11618 + * dpsw_if_add_reflection() - Identify interface to be reflected or mirrored
11619 + * @mc_io: Pointer to MC portal's I/O object
11620 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11621 + * @token: Token of DPSW object
11622 + * @if_id: Interface Identifier
11623 + * @cfg: Reflection configuration
11624 + *
11625 + * Return: Completion status. '0' on Success; Error code otherwise.
11626 + */
11627 +int dpsw_if_add_reflection(struct fsl_mc_io *mc_io,
11628 + u32 cmd_flags,
11629 + u16 token,
11630 + u16 if_id,
11631 + const struct dpsw_reflection_cfg *cfg)
11632 +{
11633 + struct mc_command cmd = { 0 };
11634 + struct dpsw_cmd_if_reflection *cmd_params;
11635 +
11636 + /* prepare command */
11637 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ADD_REFLECTION,
11638 + cmd_flags,
11639 + token);
11640 + cmd_params = (struct dpsw_cmd_if_reflection *)cmd.params;
11641 + cmd_params->if_id = cpu_to_le16(if_id);
11642 + cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id);
11643 + dpsw_set_field(cmd_params->filter, FILTER, cfg->filter);
11644 +
11645 + /* send command to mc*/
11646 + return mc_send_command(mc_io, &cmd);
11647 +}
11648 +
11649 +/**
11650 + * dpsw_if_remove_reflection() - Remove interface to be reflected or mirrored
11651 + * @mc_io: Pointer to MC portal's I/O object
11652 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11653 + * @token: Token of DPSW object
11654 + * @if_id: Interface Identifier
11655 + * @cfg: Reflection configuration
11656 + *
11657 + * Return: Completion status. '0' on Success; Error code otherwise.
11658 + */
11659 +int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io,
11660 + u32 cmd_flags,
11661 + u16 token,
11662 + u16 if_id,
11663 + const struct dpsw_reflection_cfg *cfg)
11664 +{
11665 + struct mc_command cmd = { 0 };
11666 + struct dpsw_cmd_if_reflection *cmd_params;
11667 +
11668 + /* prepare command */
11669 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_REMOVE_REFLECTION,
11670 + cmd_flags,
11671 + token);
11672 + cmd_params = (struct dpsw_cmd_if_reflection *)cmd.params;
11673 + cmd_params->if_id = cpu_to_le16(if_id);
11674 + cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id);
11675 + dpsw_set_field(cmd_params->filter, FILTER, cfg->filter);
11676 +
11677 + /* send command to mc*/
11678 + return mc_send_command(mc_io, &cmd);
11679 +}
11680 +
11681 +/**
11682 + * dpsw_if_set_flooding_metering() - Set flooding metering
11683 + * @mc_io: Pointer to MC portal's I/O object
11684 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11685 + * @token: Token of DPSW object
11686 + * @if_id: Interface Identifier
11687 + * @cfg: Metering parameters
11688 + *
11689 + * Return: Completion status. '0' on Success; Error code otherwise.
11690 + */
11691 +int dpsw_if_set_flooding_metering(struct fsl_mc_io *mc_io,
11692 + u32 cmd_flags,
11693 + u16 token,
11694 + u16 if_id,
11695 + const struct dpsw_metering_cfg *cfg)
11696 +{
11697 + struct mc_command cmd = { 0 };
11698 + struct dpsw_cmd_if_set_flooding_metering *cmd_params;
11699 +
11700 + /* prepare command */
11701 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_FLOODING_METERING,
11702 + cmd_flags,
11703 + token);
11704 + cmd_params = (struct dpsw_cmd_if_set_flooding_metering *)cmd.params;
11705 + cmd_params->if_id = cpu_to_le16(if_id);
11706 + dpsw_set_field(cmd_params->mode_units, MODE, cfg->mode);
11707 + dpsw_set_field(cmd_params->mode_units, UNITS, cfg->units);
11708 + cmd_params->cir = cpu_to_le32(cfg->cir);
11709 + cmd_params->eir = cpu_to_le32(cfg->eir);
11710 + cmd_params->cbs = cpu_to_le32(cfg->cbs);
11711 + cmd_params->ebs = cpu_to_le32(cfg->ebs);
11712 +
11713 + /* send command to mc*/
11714 + return mc_send_command(mc_io, &cmd);
11715 +}
11716 +
11717 +/**
11718 + * dpsw_if_set_metering() - Set interface metering for flooding
11719 + * @mc_io: Pointer to MC portal's I/O object
11720 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11721 + * @token: Token of DPSW object
11722 + * @if_id: Interface Identifier
11723 + * @tc_id: Traffic class ID
11724 + * @cfg: Metering parameters
11725 + *
11726 + * Return: Completion status. '0' on Success; Error code otherwise.
11727 + */
11728 +int dpsw_if_set_metering(struct fsl_mc_io *mc_io,
11729 + u32 cmd_flags,
11730 + u16 token,
11731 + u16 if_id,
11732 + u8 tc_id,
11733 + const struct dpsw_metering_cfg *cfg)
11734 +{
11735 + struct mc_command cmd = { 0 };
11736 + struct dpsw_cmd_if_set_metering *cmd_params;
11737 +
11738 + /* prepare command */
11739 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_METERING,
11740 + cmd_flags,
11741 + token);
11742 + cmd_params = (struct dpsw_cmd_if_set_metering *)cmd.params;
11743 + cmd_params->if_id = cpu_to_le16(if_id);
11744 + cmd_params->tc_id = tc_id;
11745 + dpsw_set_field(cmd_params->mode_units, MODE, cfg->mode);
11746 + dpsw_set_field(cmd_params->mode_units, UNITS, cfg->units);
11747 + cmd_params->cir = cpu_to_le32(cfg->cir);
11748 + cmd_params->eir = cpu_to_le32(cfg->eir);
11749 + cmd_params->cbs = cpu_to_le32(cfg->cbs);
11750 + cmd_params->ebs = cpu_to_le32(cfg->ebs);
11751 +
11752 + /* send command to mc*/
11753 + return mc_send_command(mc_io, &cmd);
11754 +}
11755 +
11756 +/**
11757 + * dpsw_prepare_early_drop() - Prepare an early drop for setting in to interface
11758 + * @cfg: Early-drop configuration
11759 + * @early_drop_buf: Zeroed 256 bytes of memory before mapping it to DMA
11760 + *
11761 + * This function has to be called before dpsw_if_tc_set_early_drop
11762 + *
11763 + */
11764 +void dpsw_prepare_early_drop(const struct dpsw_early_drop_cfg *cfg,
11765 + u8 *early_drop_buf)
11766 +{
11767 + struct dpsw_prep_early_drop *ext_params;
11768 +
11769 + ext_params = (struct dpsw_prep_early_drop *)early_drop_buf;
11770 + dpsw_set_field(ext_params->conf, EARLY_DROP_MODE, cfg->drop_mode);
11771 + dpsw_set_field(ext_params->conf, EARLY_DROP_UNIT, cfg->units);
11772 + ext_params->tail_drop_threshold = cpu_to_le32(cfg->tail_drop_threshold);
11773 + ext_params->green_drop_probability = cfg->green.drop_probability;
11774 + ext_params->green_max_threshold = cpu_to_le64(cfg->green.max_threshold);
11775 + ext_params->green_min_threshold = cpu_to_le64(cfg->green.min_threshold);
11776 + ext_params->yellow_drop_probability = cfg->yellow.drop_probability;
11777 + ext_params->yellow_max_threshold =
11778 + cpu_to_le64(cfg->yellow.max_threshold);
11779 + ext_params->yellow_min_threshold =
11780 + cpu_to_le64(cfg->yellow.min_threshold);
11781 +}
11782 +
11783 +/**
11784 + * dpsw_if_set_early_drop() - Set interface traffic class early-drop
11785 + * configuration
11786 + * @mc_io: Pointer to MC portal's I/O object
11787 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11788 + * @token: Token of DPSW object
11789 + * @if_id: Interface Identifier
11790 + * @tc_id: Traffic class selection (0-7)
11791 + * @early_drop_iova: I/O virtual address of 64 bytes;
11792 + * Must be cacheline-aligned and DMA-able memory
11793 + *
11794 + * warning: Before calling this function, call dpsw_prepare_if_tc_early_drop()
11795 + * to prepare the early_drop_iova parameter
11796 + *
11797 + * Return: '0' on Success; error code otherwise.
11798 + */
11799 +int dpsw_if_set_early_drop(struct fsl_mc_io *mc_io,
11800 + u32 cmd_flags,
11801 + u16 token,
11802 + u16 if_id,
11803 + u8 tc_id,
11804 + u64 early_drop_iova)
11805 +{
11806 + struct mc_command cmd = { 0 };
11807 + struct dpsw_cmd_if_set_early_drop *cmd_params;
11808 +
11809 + /* prepare command */
11810 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_EARLY_DROP,
11811 + cmd_flags,
11812 + token);
11813 + cmd_params = (struct dpsw_cmd_if_set_early_drop *)cmd.params;
11814 + cmd_params->tc_id = tc_id;
11815 + cmd_params->if_id = cpu_to_le16(if_id);
11816 + cmd_params->early_drop_iova = cpu_to_le64(early_drop_iova);
11817 +
11818 + /* send command to mc*/
11819 + return mc_send_command(mc_io, &cmd);
11820 +}
11821 +
11822 +/**
11823 + * dpsw_add_custom_tpid() - API Configures a distinct Ethernet type value
11824 + * @mc_io: Pointer to MC portal's I/O object
11825 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11826 + * @token: Token of DPSW object
11827 + * @cfg: Tag Protocol identifier
11828 + *
11829 + * API Configures a distinct Ethernet type value (or TPID value)
11830 + * to indicate a VLAN tag in addition to the common
11831 + * TPID values 0x8100 and 0x88A8.
11832 + * Two additional TPID's are supported
11833 + *
11834 + * Return: Completion status. '0' on Success; Error code otherwise.
11835 + */
11836 +int dpsw_add_custom_tpid(struct fsl_mc_io *mc_io,
11837 + u32 cmd_flags,
11838 + u16 token,
11839 + const struct dpsw_custom_tpid_cfg *cfg)
11840 +{
11841 + struct mc_command cmd = { 0 };
11842 + struct dpsw_cmd_custom_tpid *cmd_params;
11843 +
11844 + /* prepare command */
11845 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ADD_CUSTOM_TPID,
11846 + cmd_flags,
11847 + token);
11848 + cmd_params = (struct dpsw_cmd_custom_tpid *)cmd.params;
11849 + cmd_params->tpid = cpu_to_le16(cfg->tpid);
11850 +
11851 + /* send command to mc*/
11852 + return mc_send_command(mc_io, &cmd);
11853 +}
11854 +
11855 +/**
11856 + * dpsw_remove_custom_tpid - API removes a distinct Ethernet type value
11857 + * @mc_io: Pointer to MC portal's I/O object
11858 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11859 + * @token: Token of DPSW object
11860 + * @cfg: Tag Protocol identifier
11861 + *
11862 + * Return: Completion status. '0' on Success; Error code otherwise.
11863 + */
11864 +int dpsw_remove_custom_tpid(struct fsl_mc_io *mc_io,
11865 + u32 cmd_flags,
11866 + u16 token,
11867 + const struct dpsw_custom_tpid_cfg *cfg)
11868 +{
11869 + struct mc_command cmd = { 0 };
11870 + struct dpsw_cmd_custom_tpid *cmd_params;
11871 +
11872 + /* prepare command */
11873 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_REMOVE_CUSTOM_TPID,
11874 + cmd_flags,
11875 + token);
11876 + cmd_params = (struct dpsw_cmd_custom_tpid *)cmd.params;
11877 + cmd_params->tpid = cpu_to_le16(cfg->tpid);
11878 +
11879 + /* send command to mc*/
11880 + return mc_send_command(mc_io, &cmd);
11881 +}
11882 +
11883 +/**
11884 + * dpsw_if_enable() - Enable Interface
11885 + * @mc_io: Pointer to MC portal's I/O object
11886 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11887 + * @token: Token of DPSW object
11888 + * @if_id: Interface Identifier
11889 + *
11890 + * Return: Completion status. '0' on Success; Error code otherwise.
11891 + */
11892 +int dpsw_if_enable(struct fsl_mc_io *mc_io,
11893 + u32 cmd_flags,
11894 + u16 token,
11895 + u16 if_id)
11896 +{
11897 + struct mc_command cmd = { 0 };
11898 + struct dpsw_cmd_if *cmd_params;
11899 +
11900 + /* prepare command */
11901 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ENABLE,
11902 + cmd_flags,
11903 + token);
11904 + cmd_params = (struct dpsw_cmd_if *)cmd.params;
11905 + cmd_params->if_id = cpu_to_le16(if_id);
11906 +
11907 + /* send command to mc*/
11908 + return mc_send_command(mc_io, &cmd);
11909 +}
11910 +
11911 +/**
11912 + * dpsw_if_disable() - Disable Interface
11913 + * @mc_io: Pointer to MC portal's I/O object
11914 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11915 + * @token: Token of DPSW object
11916 + * @if_id: Interface Identifier
11917 + *
11918 + * Return: Completion status. '0' on Success; Error code otherwise.
11919 + */
11920 +int dpsw_if_disable(struct fsl_mc_io *mc_io,
11921 + u32 cmd_flags,
11922 + u16 token,
11923 + u16 if_id)
11924 +{
11925 + struct mc_command cmd = { 0 };
11926 + struct dpsw_cmd_if *cmd_params;
11927 +
11928 + /* prepare command */
11929 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_DISABLE,
11930 + cmd_flags,
11931 + token);
11932 + cmd_params = (struct dpsw_cmd_if *)cmd.params;
11933 + cmd_params->if_id = cpu_to_le16(if_id);
11934 +
11935 + /* send command to mc*/
11936 + return mc_send_command(mc_io, &cmd);
11937 +}
11938 +
11939 +/**
11940 + * dpsw_if_get_attributes() - Function obtains attributes of interface
11941 + * @mc_io: Pointer to MC portal's I/O object
11942 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11943 + * @token: Token of DPSW object
11944 + * @if_id: Interface Identifier
11945 + * @attr: Returned interface attributes
11946 + *
11947 + * Return: Completion status. '0' on Success; Error code otherwise.
11948 + */
11949 +int dpsw_if_get_attributes(struct fsl_mc_io *mc_io,
11950 + u32 cmd_flags,
11951 + u16 token,
11952 + u16 if_id,
11953 + struct dpsw_if_attr *attr)
11954 +{
11955 + struct dpsw_rsp_if_get_attr *rsp_params;
11956 + struct dpsw_cmd_if *cmd_params;
11957 + struct mc_command cmd = { 0 };
11958 + int err;
11959 +
11960 + /* prepare command */
11961 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_ATTR,
11962 + cmd_flags,
11963 + token);
11964 + cmd_params = (struct dpsw_cmd_if *)cmd.params;
11965 + cmd_params->if_id = cpu_to_le16(if_id);
11966 +
11967 + /* send command to mc*/
11968 + err = mc_send_command(mc_io, &cmd);
11969 + if (err)
11970 + return err;
11971 +
11972 + /* retrieve response parameters */
11973 + rsp_params = (struct dpsw_rsp_if_get_attr *)cmd.params;
11974 + attr->num_tcs = rsp_params->num_tcs;
11975 + attr->rate = le32_to_cpu(rsp_params->rate);
11976 + attr->options = le32_to_cpu(rsp_params->options);
11977 + attr->enabled = dpsw_get_field(rsp_params->conf, ENABLED);
11978 + attr->accept_all_vlan = dpsw_get_field(rsp_params->conf,
11979 + ACCEPT_ALL_VLAN);
11980 + attr->admit_untagged = dpsw_get_field(rsp_params->conf, ADMIT_UNTAGGED);
11981 + attr->qdid = le16_to_cpu(rsp_params->qdid);
11982 +
11983 + return 0;
11984 +}
11985 +
11986 +/**
11987 + * dpsw_if_set_max_frame_length() - Set Maximum Receive frame length.
11988 + * @mc_io: Pointer to MC portal's I/O object
11989 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11990 + * @token: Token of DPSW object
11991 + * @if_id: Interface Identifier
11992 + * @frame_length: Maximum Frame Length
11993 + *
11994 + * Return: Completion status. '0' on Success; Error code otherwise.
11995 + */
11996 +int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io,
11997 + u32 cmd_flags,
11998 + u16 token,
11999 + u16 if_id,
12000 + u16 frame_length)
12001 +{
12002 + struct mc_command cmd = { 0 };
12003 + struct dpsw_cmd_if_set_max_frame_length *cmd_params;
12004 +
12005 + /* prepare command */
12006 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH,
12007 + cmd_flags,
12008 + token);
12009 + cmd_params = (struct dpsw_cmd_if_set_max_frame_length *)cmd.params;
12010 + cmd_params->if_id = cpu_to_le16(if_id);
12011 + cmd_params->frame_length = cpu_to_le16(frame_length);
12012 +
12013 + /* send command to mc*/
12014 + return mc_send_command(mc_io, &cmd);
12015 +}
12016 +
12017 +/**
12018 + * dpsw_if_get_max_frame_length() - Get Maximum Receive frame length.
12019 + * @mc_io: Pointer to MC portal's I/O object
12020 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12021 + * @token: Token of DPSW object
12022 + * @if_id: Interface Identifier
12023 + * @frame_length: Returned maximum Frame Length
12024 + *
12025 + * Return: Completion status. '0' on Success; Error code otherwise.
12026 + */
12027 +int dpsw_if_get_max_frame_length(struct fsl_mc_io *mc_io,
12028 + u32 cmd_flags,
12029 + u16 token,
12030 + u16 if_id,
12031 + u16 *frame_length)
12032 +{
12033 + struct mc_command cmd = { 0 };
12034 + struct dpsw_cmd_if_get_max_frame_length *cmd_params;
12035 + struct dpsw_rsp_if_get_max_frame_length *rsp_params;
12036 + int err;
12037 +
12038 + /* prepare command */
12039 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_MAX_FRAME_LENGTH,
12040 + cmd_flags,
12041 + token);
12042 + cmd_params = (struct dpsw_cmd_if_get_max_frame_length *)cmd.params;
12043 + cmd_params->if_id = cpu_to_le16(if_id);
12044 +
12045 + /* send command to mc*/
12046 + err = mc_send_command(mc_io, &cmd);
12047 + if (err)
12048 + return err;
12049 +
12050 + rsp_params = (struct dpsw_rsp_if_get_max_frame_length *)cmd.params;
12051 + *frame_length = le16_to_cpu(rsp_params->frame_length);
12052 +
12053 + return 0;
12054 +}
12055 +
12056 +/**
12057 + * dpsw_vlan_add() - Adding new VLAN to DPSW.
12058 + * @mc_io: Pointer to MC portal's I/O object
12059 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12060 + * @token: Token of DPSW object
12061 + * @vlan_id: VLAN Identifier
12062 + * @cfg: VLAN configuration
12063 + *
12064 + * Only VLAN ID and FDB ID are required parameters here.
12065 + * 12 bit VLAN ID is defined in IEEE802.1Q.
12066 + * Adding a duplicate VLAN ID is not allowed.
12067 + * FDB ID can be shared across multiple VLANs. Shared learning
12068 + * is obtained by calling dpsw_vlan_add for multiple VLAN IDs
12069 + * with same fdb_id
12070 + *
12071 + * Return: Completion status. '0' on Success; Error code otherwise.
12072 + */
12073 +int dpsw_vlan_add(struct fsl_mc_io *mc_io,
12074 + u32 cmd_flags,
12075 + u16 token,
12076 + u16 vlan_id,
12077 + const struct dpsw_vlan_cfg *cfg)
12078 +{
12079 + struct mc_command cmd = { 0 };
12080 + struct dpsw_vlan_add *cmd_params;
12081 +
12082 + /* prepare command */
12083 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD,
12084 + cmd_flags,
12085 + token);
12086 + cmd_params = (struct dpsw_vlan_add *)cmd.params;
12087 + cmd_params->fdb_id = cpu_to_le16(cfg->fdb_id);
12088 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12089 +
12090 + /* send command to mc*/
12091 + return mc_send_command(mc_io, &cmd);
12092 +}
12093 +
12094 +/**
12095 + * dpsw_vlan_add_if() - Adding a set of interfaces to an existing VLAN.
12096 + * @mc_io: Pointer to MC portal's I/O object
12097 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12098 + * @token: Token of DPSW object
12099 + * @vlan_id: VLAN Identifier
12100 + * @cfg: Set of interfaces to add
12101 + *
12102 + * It adds only interfaces not belonging to this VLAN yet,
12103 + * otherwise an error is generated and an entire command is
12104 + * ignored. This function can be called numerous times always
12105 + * providing required interfaces delta.
12106 + *
12107 + * Return: Completion status. '0' on Success; Error code otherwise.
12108 + */
12109 +int dpsw_vlan_add_if(struct fsl_mc_io *mc_io,
12110 + u32 cmd_flags,
12111 + u16 token,
12112 + u16 vlan_id,
12113 + const struct dpsw_vlan_if_cfg *cfg)
12114 +{
12115 + struct mc_command cmd = { 0 };
12116 + struct dpsw_cmd_vlan_manage_if *cmd_params;
12117 +
12118 + /* prepare command */
12119 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF,
12120 + cmd_flags,
12121 + token);
12122 + cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
12123 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12124 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
12125 +
12126 + /* send command to mc*/
12127 + return mc_send_command(mc_io, &cmd);
12128 +}
12129 +
12130 +/**
12131 + * dpsw_vlan_add_if_untagged() - Defining a set of interfaces that should be
12132 + * transmitted as untagged.
12133 + * @mc_io: Pointer to MC portal's I/O object
12134 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12135 + * @token: Token of DPSW object
12136 + * @vlan_id: VLAN Identifier
12137 + * @cfg: Set of interfaces that should be transmitted as untagged
12138 + *
12139 + * These interfaces should already belong to this VLAN.
12140 + * By default all interfaces are transmitted as tagged.
12141 + * Providing un-existing interface or untagged interface that is
12142 + * configured untagged already generates an error and the entire
12143 + * command is ignored.
12144 + *
12145 + * Return: Completion status. '0' on Success; Error code otherwise.
12146 + */
12147 +int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io,
12148 + u32 cmd_flags,
12149 + u16 token,
12150 + u16 vlan_id,
12151 + const struct dpsw_vlan_if_cfg *cfg)
12152 +{
12153 + struct mc_command cmd = { 0 };
12154 + struct dpsw_cmd_vlan_manage_if *cmd_params;
12155 +
12156 + /* prepare command */
12157 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF_UNTAGGED,
12158 + cmd_flags,
12159 + token);
12160 + cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
12161 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12162 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
12163 +
12164 + /* send command to mc*/
12165 + return mc_send_command(mc_io, &cmd);
12166 +}
12167 +
12168 +/**
12169 + * dpsw_vlan_add_if_flooding() - Define a set of interfaces that should be
12170 + * included in flooding when frame with unknown destination
12171 + * unicast MAC arrived.
12172 + * @mc_io: Pointer to MC portal's I/O object
12173 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12174 + * @token: Token of DPSW object
12175 + * @vlan_id: VLAN Identifier
12176 + * @cfg: Set of interfaces that should be used for flooding
12177 + *
12178 + * These interfaces should belong to this VLAN. By default all
12179 + * interfaces are included into flooding list. Providing
12180 + * un-existing interface or an interface that already in the
12181 + * flooding list generates an error and the entire command is
12182 + * ignored.
12183 + *
12184 + * Return: Completion status. '0' on Success; Error code otherwise.
12185 + */
12186 +int dpsw_vlan_add_if_flooding(struct fsl_mc_io *mc_io,
12187 + u32 cmd_flags,
12188 + u16 token,
12189 + u16 vlan_id,
12190 + const struct dpsw_vlan_if_cfg *cfg)
12191 +{
12192 + struct mc_command cmd = { 0 };
12193 + struct dpsw_cmd_vlan_manage_if *cmd_params;
12194 +
12195 + /* prepare command */
12196 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF_FLOODING,
12197 + cmd_flags,
12198 + token);
12199 + cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
12200 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12201 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
12202 +
12203 + /* send command to mc*/
12204 + return mc_send_command(mc_io, &cmd);
12205 +}
12206 +
12207 +/**
12208 + * dpsw_vlan_remove_if() - Remove interfaces from an existing VLAN.
12209 + * @mc_io: Pointer to MC portal's I/O object
12210 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12211 + * @token: Token of DPSW object
12212 + * @vlan_id: VLAN Identifier
12213 + * @cfg: Set of interfaces that should be removed
12214 + *
12215 + * Interfaces must belong to this VLAN, otherwise an error
12216 + * is returned and an the command is ignored
12217 + *
12218 + * Return: Completion status. '0' on Success; Error code otherwise.
12219 + */
12220 +int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io,
12221 + u32 cmd_flags,
12222 + u16 token,
12223 + u16 vlan_id,
12224 + const struct dpsw_vlan_if_cfg *cfg)
12225 +{
12226 + struct mc_command cmd = { 0 };
12227 + struct dpsw_cmd_vlan_manage_if *cmd_params;
12228 +
12229 + /* prepare command */
12230 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF,
12231 + cmd_flags,
12232 + token);
12233 + cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
12234 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12235 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
12236 +
12237 + /* send command to mc*/
12238 + return mc_send_command(mc_io, &cmd);
12239 +}
12240 +
12241 +/**
12242 + * dpsw_vlan_remove_if_untagged() - Define a set of interfaces that should be
12243 + * converted from transmitted as untagged to transmit as tagged.
12244 + * @mc_io: Pointer to MC portal's I/O object
12245 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12246 + * @token: Token of DPSW object
12247 + * @vlan_id: VLAN Identifier
12248 + * @cfg: Set of interfaces that should be removed
12249 + *
12250 + * Interfaces provided by API have to belong to this VLAN and
12251 + * configured untagged, otherwise an error is returned and the
12252 + * command is ignored
12253 + *
12254 + * Return: Completion status. '0' on Success; Error code otherwise.
12255 + */
12256 +int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io,
12257 + u32 cmd_flags,
12258 + u16 token,
12259 + u16 vlan_id,
12260 + const struct dpsw_vlan_if_cfg *cfg)
12261 +{
12262 + struct mc_command cmd = { 0 };
12263 + struct dpsw_cmd_vlan_manage_if *cmd_params;
12264 +
12265 + /* prepare command */
12266 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED,
12267 + cmd_flags,
12268 + token);
12269 + cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
12270 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12271 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
12272 +
12273 + /* send command to mc*/
12274 + return mc_send_command(mc_io, &cmd);
12275 +}
12276 +
12277 +/**
12278 + * dpsw_vlan_remove_if_flooding() - Define a set of interfaces that should be
12279 + * removed from the flooding list.
12280 + * @mc_io: Pointer to MC portal's I/O object
12281 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12282 + * @token: Token of DPSW object
12283 + * @vlan_id: VLAN Identifier
12284 + * @cfg: Set of interfaces used for flooding
12285 + *
12286 + * Return: Completion status. '0' on Success; Error code otherwise.
12287 + */
12288 +int dpsw_vlan_remove_if_flooding(struct fsl_mc_io *mc_io,
12289 + u32 cmd_flags,
12290 + u16 token,
12291 + u16 vlan_id,
12292 + const struct dpsw_vlan_if_cfg *cfg)
12293 +{
12294 + struct mc_command cmd = { 0 };
12295 + struct dpsw_cmd_vlan_manage_if *cmd_params;
12296 +
12297 + /* prepare command */
12298 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF_FLOODING,
12299 + cmd_flags,
12300 + token);
12301 + cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
12302 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12303 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
12304 +
12305 + /* send command to mc*/
12306 + return mc_send_command(mc_io, &cmd);
12307 +}
12308 +
12309 +/**
12310 + * dpsw_vlan_remove() - Remove an entire VLAN
12311 + * @mc_io: Pointer to MC portal's I/O object
12312 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12313 + * @token: Token of DPSW object
12314 + * @vlan_id: VLAN Identifier
12315 + *
12316 + * Return: Completion status. '0' on Success; Error code otherwise.
12317 + */
12318 +int dpsw_vlan_remove(struct fsl_mc_io *mc_io,
12319 + u32 cmd_flags,
12320 + u16 token,
12321 + u16 vlan_id)
12322 +{
12323 + struct mc_command cmd = { 0 };
12324 + struct dpsw_cmd_vlan_remove *cmd_params;
12325 +
12326 + /* prepare command */
12327 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE,
12328 + cmd_flags,
12329 + token);
12330 + cmd_params = (struct dpsw_cmd_vlan_remove *)cmd.params;
12331 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12332 +
12333 + /* send command to mc*/
12334 + return mc_send_command(mc_io, &cmd);
12335 +}
12336 +
12337 +/**
12338 + * dpsw_vlan_get_attributes() - Get VLAN attributes
12339 + * @mc_io: Pointer to MC portal's I/O object
12340 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12341 + * @token: Token of DPSW object
12342 + * @vlan_id: VLAN Identifier
12343 + * @attr: Returned DPSW attributes
12344 + *
12345 + * Return: Completion status. '0' on Success; Error code otherwise.
12346 + */
12347 +int dpsw_vlan_get_attributes(struct fsl_mc_io *mc_io,
12348 + u32 cmd_flags,
12349 + u16 token,
12350 + u16 vlan_id,
12351 + struct dpsw_vlan_attr *attr)
12352 +{
12353 + struct mc_command cmd = { 0 };
12354 + struct dpsw_cmd_vlan_get_attr *cmd_params;
12355 + struct dpsw_rsp_vlan_get_attr *rsp_params;
12356 + int err;
12357 +
12358 + /* prepare command */
12359 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_ATTRIBUTES,
12360 + cmd_flags,
12361 + token);
12362 + cmd_params = (struct dpsw_cmd_vlan_get_attr *)cmd.params;
12363 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12364 +
12365 + /* send command to mc*/
12366 + err = mc_send_command(mc_io, &cmd);
12367 + if (err)
12368 + return err;
12369 +
12370 + /* retrieve response parameters */
12371 + rsp_params = (struct dpsw_rsp_vlan_get_attr *)cmd.params;
12372 + attr->fdb_id = le16_to_cpu(rsp_params->fdb_id);
12373 + attr->num_ifs = le16_to_cpu(rsp_params->num_ifs);
12374 + attr->num_untagged_ifs = le16_to_cpu(rsp_params->num_untagged_ifs);
12375 + attr->num_flooding_ifs = le16_to_cpu(rsp_params->num_flooding_ifs);
12376 +
12377 + return 0;
12378 +}
12379 +
12380 +/**
12381 + * dpsw_vlan_get_if() - Get interfaces belong to this VLAN
12382 + * @mc_io: Pointer to MC portal's I/O object
12383 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12384 + * @token: Token of DPSW object
12385 + * @vlan_id: VLAN Identifier
12386 + * @cfg: Returned set of interfaces belong to this VLAN
12387 + *
12388 + * Return: Completion status. '0' on Success; Error code otherwise.
12389 + */
12390 +int dpsw_vlan_get_if(struct fsl_mc_io *mc_io,
12391 + u32 cmd_flags,
12392 + u16 token,
12393 + u16 vlan_id,
12394 + struct dpsw_vlan_if_cfg *cfg)
12395 +{
12396 + struct mc_command cmd = { 0 };
12397 + struct dpsw_cmd_vlan_get_if *cmd_params;
12398 + struct dpsw_rsp_vlan_get_if *rsp_params;
12399 + int err;
12400 +
12401 + /* prepare command */
12402 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF,
12403 + cmd_flags,
12404 + token);
12405 + cmd_params = (struct dpsw_cmd_vlan_get_if *)cmd.params;
12406 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12407 +
12408 + /* send command to mc*/
12409 + err = mc_send_command(mc_io, &cmd);
12410 + if (err)
12411 + return err;
12412 +
12413 + /* retrieve response parameters */
12414 + rsp_params = (struct dpsw_rsp_vlan_get_if *)cmd.params;
12415 + cfg->num_ifs = le16_to_cpu(rsp_params->num_ifs);
12416 + read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, rsp_params->if_id);
12417 +
12418 + return 0;
12419 +}
12420 +
12421 +/**
12422 + * dpsw_vlan_get_if_flooding() - Get interfaces used in flooding for this VLAN
12423 + * @mc_io: Pointer to MC portal's I/O object
12424 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12425 + * @token: Token of DPSW object
12426 + * @vlan_id: VLAN Identifier
12427 + * @cfg: Returned set of flooding interfaces
12428 + *
12429 + * Return: Completion status. '0' on Success; Error code otherwise.
12430 + */
12431 +
12432 +int dpsw_vlan_get_if_flooding(struct fsl_mc_io *mc_io,
12433 + u32 cmd_flags,
12434 + u16 token,
12435 + u16 vlan_id,
12436 + struct dpsw_vlan_if_cfg *cfg)
12437 +{
12438 + struct mc_command cmd = { 0 };
12439 + struct dpsw_cmd_vlan_get_if_flooding *cmd_params;
12440 + struct dpsw_rsp_vlan_get_if_flooding *rsp_params;
12441 + int err;
12442 +
12443 + /* prepare command */
12444 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF_FLOODING,
12445 + cmd_flags,
12446 + token);
12447 + cmd_params = (struct dpsw_cmd_vlan_get_if_flooding *)cmd.params;
12448 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12449 +
12450 + /* send command to mc*/
12451 + err = mc_send_command(mc_io, &cmd);
12452 + if (err)
12453 + return err;
12454 +
12455 + /* retrieve response parameters */
12456 + rsp_params = (struct dpsw_rsp_vlan_get_if_flooding *)cmd.params;
12457 + cfg->num_ifs = le16_to_cpu(rsp_params->num_ifs);
12458 + read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, rsp_params->if_id);
12459 +
12460 + return 0;
12461 +}
12462 +
12463 +/**
12464 + * dpsw_vlan_get_if_untagged() - Get interfaces that should be transmitted as
12465 + * untagged
12466 + * @mc_io: Pointer to MC portal's I/O object
12467 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12468 + * @token: Token of DPSW object
12469 + * @vlan_id: VLAN Identifier
12470 + * @cfg: Returned set of untagged interfaces
12471 + *
12472 + * Return: Completion status. '0' on Success; Error code otherwise.
12473 + */
12474 +int dpsw_vlan_get_if_untagged(struct fsl_mc_io *mc_io,
12475 + u32 cmd_flags,
12476 + u16 token,
12477 + u16 vlan_id,
12478 + struct dpsw_vlan_if_cfg *cfg)
12479 +{
12480 + struct mc_command cmd = { 0 };
12481 + struct dpsw_cmd_vlan_get_if_untagged *cmd_params;
12482 + struct dpsw_rsp_vlan_get_if_untagged *rsp_params;
12483 + int err;
12484 +
12485 + /* prepare command */
12486 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF_UNTAGGED,
12487 + cmd_flags,
12488 + token);
12489 + cmd_params = (struct dpsw_cmd_vlan_get_if_untagged *)cmd.params;
12490 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12491 +
12492 + /* send command to mc*/
12493 + err = mc_send_command(mc_io, &cmd);
12494 + if (err)
12495 + return err;
12496 +
12497 + /* retrieve response parameters */
12498 + rsp_params = (struct dpsw_rsp_vlan_get_if_untagged *)cmd.params;
12499 + cfg->num_ifs = le16_to_cpu(rsp_params->num_ifs);
12500 + read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, rsp_params->if_id);
12501 +
12502 + return 0;
12503 +}
12504 +
12505 +/**
12506 + * dpsw_fdb_add() - Add FDB to switch and Returns handle to FDB table for
12507 + * the reference
12508 + * @mc_io: Pointer to MC portal's I/O object
12509 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12510 + * @token: Token of DPSW object
12511 + * @fdb_id: Returned Forwarding Database Identifier
12512 + * @cfg: FDB Configuration
12513 + *
12514 + * Return: Completion status. '0' on Success; Error code otherwise.
12515 + */
12516 +int dpsw_fdb_add(struct fsl_mc_io *mc_io,
12517 + u32 cmd_flags,
12518 + u16 token,
12519 + u16 *fdb_id,
12520 + const struct dpsw_fdb_cfg *cfg)
12521 +{
12522 + struct mc_command cmd = { 0 };
12523 + struct dpsw_cmd_fdb_add *cmd_params;
12524 + struct dpsw_rsp_fdb_add *rsp_params;
12525 + int err;
12526 +
12527 + /* prepare command */
12528 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD,
12529 + cmd_flags,
12530 + token);
12531 + cmd_params = (struct dpsw_cmd_fdb_add *)cmd.params;
12532 + cmd_params->fdb_aging_time = cpu_to_le16(cfg->fdb_aging_time);
12533 + cmd_params->num_fdb_entries = cpu_to_le16(cfg->num_fdb_entries);
12534 +
12535 + /* send command to mc*/
12536 + err = mc_send_command(mc_io, &cmd);
12537 + if (err)
12538 + return err;
12539 +
12540 + /* retrieve response parameters */
12541 + rsp_params = (struct dpsw_rsp_fdb_add *)cmd.params;
12542 + *fdb_id = le16_to_cpu(rsp_params->fdb_id);
12543 +
12544 + return 0;
12545 +}
12546 +
12547 +/**
12548 + * dpsw_fdb_remove() - Remove FDB from switch
12549 + * @mc_io: Pointer to MC portal's I/O object
12550 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12551 + * @token: Token of DPSW object
12552 + * @fdb_id: Forwarding Database Identifier
12553 + *
12554 + * Return: Completion status. '0' on Success; Error code otherwise.
12555 + */
12556 +int dpsw_fdb_remove(struct fsl_mc_io *mc_io,
12557 + u32 cmd_flags,
12558 + u16 token,
12559 + u16 fdb_id)
12560 +{
12561 + struct mc_command cmd = { 0 };
12562 + struct dpsw_cmd_fdb_remove *cmd_params;
12563 +
12564 + /* prepare command */
12565 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE,
12566 + cmd_flags,
12567 + token);
12568 + cmd_params = (struct dpsw_cmd_fdb_remove *)cmd.params;
12569 + cmd_params->fdb_id = cpu_to_le16(fdb_id);
12570 +
12571 + /* send command to mc*/
12572 + return mc_send_command(mc_io, &cmd);
12573 +}
12574 +
12575 +/**
12576 + * dpsw_fdb_add_unicast() - Function adds an unicast entry into MAC lookup table
12577 + * @mc_io: Pointer to MC portal's I/O object
12578 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12579 + * @token: Token of DPSW object
12580 + * @fdb_id: Forwarding Database Identifier
12581 + * @cfg: Unicast entry configuration
12582 + *
12583 + * Return: Completion status. '0' on Success; Error code otherwise.
12584 + */
12585 +int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io,
12586 + u32 cmd_flags,
12587 + u16 token,
12588 + u16 fdb_id,
12589 + const struct dpsw_fdb_unicast_cfg *cfg)
12590 +{
12591 + struct mc_command cmd = { 0 };
12592 + struct dpsw_cmd_fdb_add_unicast *cmd_params;
12593 + int i;
12594 +
12595 + /* prepare command */
12596 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_UNICAST,
12597 + cmd_flags,
12598 + token);
12599 + cmd_params = (struct dpsw_cmd_fdb_add_unicast *)cmd.params;
12600 + cmd_params->fdb_id = cpu_to_le16(fdb_id);
12601 + cmd_params->if_egress = cpu_to_le16(cfg->if_egress);
12602 + for (i = 0; i < 6; i++)
12603 + cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
12604 + dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
12605 +
12606 + /* send command to mc*/
12607 + return mc_send_command(mc_io, &cmd);
12608 +}
12609 +
12610 +/**
12611 + * dpsw_fdb_get_unicast() - Get unicast entry from MAC lookup table by
12612 + * unicast Ethernet address
12613 + * @mc_io: Pointer to MC portal's I/O object
12614 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12615 + * @token: Token of DPSW object
12616 + * @fdb_id: Forwarding Database Identifier
12617 + * @cfg: Returned unicast entry configuration
12618 + *
12619 + * Return: Completion status. '0' on Success; Error code otherwise.
12620 + */
12621 +int dpsw_fdb_get_unicast(struct fsl_mc_io *mc_io,
12622 + u32 cmd_flags,
12623 + u16 token,
12624 + u16 fdb_id,
12625 + struct dpsw_fdb_unicast_cfg *cfg)
12626 +{
12627 + struct mc_command cmd = { 0 };
12628 + struct dpsw_cmd_fdb_get_unicast *cmd_params;
12629 + struct dpsw_rsp_fdb_get_unicast *rsp_params;
12630 + int err, i;
12631 +
12632 + /* prepare command */
12633 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_UNICAST,
12634 + cmd_flags,
12635 + token);
12636 + cmd_params = (struct dpsw_cmd_fdb_get_unicast *)cmd.params;
12637 + cmd_params->fdb_id = cpu_to_le16(fdb_id);
12638 + for (i = 0; i < 6; i++)
12639 + cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
12640 +
12641 + /* send command to mc*/
12642 + err = mc_send_command(mc_io, &cmd);
12643 + if (err)
12644 + return err;
12645 +
12646 + /* retrieve response parameters */
12647 + rsp_params = (struct dpsw_rsp_fdb_get_unicast *)cmd.params;
12648 + cfg->if_egress = le16_to_cpu(rsp_params->if_egress);
12649 + cfg->type = dpsw_get_field(rsp_params->type, ENTRY_TYPE);
12650 +
12651 + return 0;
12652 +}
12653 +
12654 +/**
12655 + * dpsw_fdb_remove_unicast() - removes an entry from MAC lookup table
12656 + * @mc_io: Pointer to MC portal's I/O object
12657 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12658 + * @token: Token of DPSW object
12659 + * @fdb_id: Forwarding Database Identifier
12660 + * @cfg: Unicast entry configuration
12661 + *
12662 + * Return: Completion status. '0' on Success; Error code otherwise.
12663 + */
12664 +int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io,
12665 + u32 cmd_flags,
12666 + u16 token,
12667 + u16 fdb_id,
12668 + const struct dpsw_fdb_unicast_cfg *cfg)
12669 +{
12670 + struct mc_command cmd = { 0 };
12671 + struct dpsw_cmd_fdb_remove_unicast *cmd_params;
12672 + int i;
12673 +
12674 + /* prepare command */
12675 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_UNICAST,
12676 + cmd_flags,
12677 + token);
12678 + cmd_params = (struct dpsw_cmd_fdb_remove_unicast *)cmd.params;
12679 + cmd_params->fdb_id = cpu_to_le16(fdb_id);
12680 + for (i = 0; i < 6; i++)
12681 + cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
12682 + cmd_params->if_egress = cpu_to_le16(cfg->if_egress);
12683 + dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
12684 +
12685 + /* send command to mc*/
12686 + return mc_send_command(mc_io, &cmd);
12687 +}
12688 +
12689 +/**
12690 + * dpsw_fdb_add_multicast() - Add a set of egress interfaces to multi-cast group
12691 + * @mc_io: Pointer to MC portal's I/O object
12692 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12693 + * @token: Token of DPSW object
12694 + * @fdb_id: Forwarding Database Identifier
12695 + * @cfg: Multicast entry configuration
12696 + *
12697 + * If group doesn't exist, it will be created.
12698 + * It adds only interfaces not belonging to this multicast group
12699 + * yet, otherwise error will be generated and the command is
12700 + * ignored.
12701 + * This function may be called numerous times always providing
12702 + * required interfaces delta.
12703 + *
12704 + * Return: Completion status. '0' on Success; Error code otherwise.
12705 + */
12706 +int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io,
12707 + u32 cmd_flags,
12708 + u16 token,
12709 + u16 fdb_id,
12710 + const struct dpsw_fdb_multicast_cfg *cfg)
12711 +{
12712 + struct mc_command cmd = { 0 };
12713 + struct dpsw_cmd_fdb_add_multicast *cmd_params;
12714 + int i;
12715 +
12716 + /* prepare command */
12717 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_MULTICAST,
12718 + cmd_flags,
12719 + token);
12720 + cmd_params = (struct dpsw_cmd_fdb_add_multicast *)cmd.params;
12721 + cmd_params->fdb_id = cpu_to_le16(fdb_id);
12722 + cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
12723 + dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
12724 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
12725 + for (i = 0; i < 6; i++)
12726 + cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
12727 +
12728 + /* send command to mc*/
12729 + return mc_send_command(mc_io, &cmd);
12730 +}
12731 +
12732 +/**
12733 + * dpsw_fdb_get_multicast() - Reading multi-cast group by multi-cast Ethernet
12734 + * address.
12735 + * @mc_io: Pointer to MC portal's I/O object
12736 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12737 + * @token: Token of DPSW object
12738 + * @fdb_id: Forwarding Database Identifier
12739 + * @cfg: Returned multicast entry configuration
12740 + *
12741 + * Return: Completion status. '0' on Success; Error code otherwise.
12742 + */
12743 +int dpsw_fdb_get_multicast(struct fsl_mc_io *mc_io,
12744 + u32 cmd_flags,
12745 + u16 token,
12746 + u16 fdb_id,
12747 + struct dpsw_fdb_multicast_cfg *cfg)
12748 +{
12749 + struct mc_command cmd = { 0 };
12750 + struct dpsw_cmd_fdb_get_multicast *cmd_params;
12751 + struct dpsw_rsp_fdb_get_multicast *rsp_params;
12752 + int err, i;
12753 +
12754 + /* prepare command */
12755 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_MULTICAST,
12756 + cmd_flags,
12757 + token);
12758 + cmd_params = (struct dpsw_cmd_fdb_get_multicast *)cmd.params;
12759 + cmd_params->fdb_id = cpu_to_le16(fdb_id);
12760 + for (i = 0; i < 6; i++)
12761 + cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
12762 +
12763 + /* send command to mc*/
12764 + err = mc_send_command(mc_io, &cmd);
12765 + if (err)
12766 + return err;
12767 +
12768 + /* retrieve response parameters */
12769 + rsp_params = (struct dpsw_rsp_fdb_get_multicast *)cmd.params;
12770 + cfg->num_ifs = le16_to_cpu(rsp_params->num_ifs);
12771 + cfg->type = dpsw_get_field(rsp_params->type, ENTRY_TYPE);
12772 + read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, rsp_params->if_id);
12773 +
12774 + return 0;
12775 +}
12776 +
12777 +/**
12778 + * dpsw_fdb_remove_multicast() - Removing interfaces from an existing multicast
12779 + * group.
12780 + * @mc_io: Pointer to MC portal's I/O object
12781 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12782 + * @token: Token of DPSW object
12783 + * @fdb_id: Forwarding Database Identifier
12784 + * @cfg: Multicast entry configuration
12785 + *
12786 + * Interfaces provided by this API have to exist in the group,
12787 + * otherwise an error will be returned and an entire command
12788 + * ignored. If there is no interface left in the group,
12789 + * an entire group is deleted
12790 + *
12791 + * Return: Completion status. '0' on Success; Error code otherwise.
12792 + */
12793 +int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io,
12794 + u32 cmd_flags,
12795 + u16 token,
12796 + u16 fdb_id,
12797 + const struct dpsw_fdb_multicast_cfg *cfg)
12798 +{
12799 + struct mc_command cmd = { 0 };
12800 + struct dpsw_cmd_fdb_remove_multicast *cmd_params;
12801 + int i;
12802 +
12803 + /* prepare command */
12804 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_MULTICAST,
12805 + cmd_flags,
12806 + token);
12807 + cmd_params = (struct dpsw_cmd_fdb_remove_multicast *)cmd.params;
12808 + cmd_params->fdb_id = cpu_to_le16(fdb_id);
12809 + cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
12810 + dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
12811 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
12812 + for (i = 0; i < 6; i++)
12813 + cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
12814 +
12815 + /* send command to mc*/
12816 + return mc_send_command(mc_io, &cmd);
12817 +}
12818 +
12819 +/**
12820 + * dpsw_fdb_set_learning_mode() - Define FDB learning mode
12821 + * @mc_io: Pointer to MC portal's I/O object
12822 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12823 + * @token: Token of DPSW object
12824 + * @fdb_id: Forwarding Database Identifier
12825 + * @mode: Learning mode
12826 + *
12827 + * Return: Completion status. '0' on Success; Error code otherwise.
12828 + */
12829 +int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io,
12830 + u32 cmd_flags,
12831 + u16 token,
12832 + u16 fdb_id,
12833 + enum dpsw_fdb_learning_mode mode)
12834 +{
12835 + struct mc_command cmd = { 0 };
12836 + struct dpsw_cmd_fdb_set_learning_mode *cmd_params;
12837 +
12838 + /* prepare command */
12839 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_SET_LEARNING_MODE,
12840 + cmd_flags,
12841 + token);
12842 + cmd_params = (struct dpsw_cmd_fdb_set_learning_mode *)cmd.params;
12843 + cmd_params->fdb_id = cpu_to_le16(fdb_id);
12844 + dpsw_set_field(cmd_params->mode, LEARNING_MODE, mode);
12845 +
12846 + /* send command to mc*/
12847 + return mc_send_command(mc_io, &cmd);
12848 +}
12849 +
12850 +/**
12851 + * dpsw_fdb_get_attributes() - Get FDB attributes
12852 + * @mc_io: Pointer to MC portal's I/O object
12853 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12854 + * @token: Token of DPSW object
12855 + * @fdb_id: Forwarding Database Identifier
12856 + * @attr: Returned FDB attributes
12857 + *
12858 + * Return: Completion status. '0' on Success; Error code otherwise.
12859 + */
12860 +int dpsw_fdb_get_attributes(struct fsl_mc_io *mc_io,
12861 + u32 cmd_flags,
12862 + u16 token,
12863 + u16 fdb_id,
12864 + struct dpsw_fdb_attr *attr)
12865 +{
12866 + struct mc_command cmd = { 0 };
12867 + struct dpsw_cmd_fdb_get_attr *cmd_params;
12868 + struct dpsw_rsp_fdb_get_attr *rsp_params;
12869 + int err;
12870 +
12871 + /* prepare command */
12872 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_ATTR,
12873 + cmd_flags,
12874 + token);
12875 + cmd_params = (struct dpsw_cmd_fdb_get_attr *)cmd.params;
12876 + cmd_params->fdb_id = cpu_to_le16(fdb_id);
12877 +
12878 + /* send command to mc*/
12879 + err = mc_send_command(mc_io, &cmd);
12880 + if (err)
12881 + return err;
12882 +
12883 + /* retrieve response parameters */
12884 + rsp_params = (struct dpsw_rsp_fdb_get_attr *)cmd.params;
12885 + attr->max_fdb_entries = le16_to_cpu(rsp_params->max_fdb_entries);
12886 + attr->fdb_aging_time = le16_to_cpu(rsp_params->fdb_aging_time);
12887 + attr->learning_mode = dpsw_get_field(rsp_params->learning_mode,
12888 + LEARNING_MODE);
12889 + attr->num_fdb_mc_groups = le16_to_cpu(rsp_params->num_fdb_mc_groups);
12890 + attr->max_fdb_mc_groups = le16_to_cpu(rsp_params->max_fdb_mc_groups);
12891 +
12892 + return 0;
12893 +}
12894 +
12895 +/**
12896 + * dpsw_acl_add() - Adds ACL to L2 switch.
12897 + * @mc_io: Pointer to MC portal's I/O object
12898 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12899 + * @token: Token of DPSW object
12900 + * @acl_id: Returned ACL ID, for the future reference
12901 + * @cfg: ACL configuration
12902 + *
12903 + * Create Access Control List. Multiple ACLs can be created and
12904 + * co-exist in L2 switch
12905 + *
12906 + * Return: '0' on Success; Error code otherwise.
12907 + */
12908 +int dpsw_acl_add(struct fsl_mc_io *mc_io,
12909 + u32 cmd_flags,
12910 + u16 token,
12911 + u16 *acl_id,
12912 + const struct dpsw_acl_cfg *cfg)
12913 +{
12914 + struct mc_command cmd = { 0 };
12915 + struct dpsw_cmd_acl_add *cmd_params;
12916 + struct dpsw_rsp_acl_add *rsp_params;
12917 + int err;
12918 +
12919 + /* prepare command */
12920 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD,
12921 + cmd_flags,
12922 + token);
12923 + cmd_params = (struct dpsw_cmd_acl_add *)cmd.params;
12924 + cmd_params->max_entries = cpu_to_le16(cfg->max_entries);
12925 +
12926 + /* send command to mc*/
12927 + err = mc_send_command(mc_io, &cmd);
12928 + if (err)
12929 + return err;
12930 +
12931 + /* retrieve response parameters */
12932 + rsp_params = (struct dpsw_rsp_acl_add *)cmd.params;
12933 + *acl_id = le16_to_cpu(rsp_params->acl_id);
12934 +
12935 + return 0;
12936 +}
12937 +
12938 +/**
12939 + * dpsw_acl_remove() - Removes ACL from L2 switch.
12940 + * @mc_io: Pointer to MC portal's I/O object
12941 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12942 + * @token: Token of DPSW object
12943 + * @acl_id: ACL ID
12944 + *
12945 + * Return: '0' on Success; Error code otherwise.
12946 + */
12947 +int dpsw_acl_remove(struct fsl_mc_io *mc_io,
12948 + u32 cmd_flags,
12949 + u16 token,
12950 + u16 acl_id)
12951 +{
12952 + struct mc_command cmd = { 0 };
12953 + struct dpsw_cmd_acl_remove *cmd_params;
12954 +
12955 + /* prepare command */
12956 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE,
12957 + cmd_flags,
12958 + token);
12959 + cmd_params = (struct dpsw_cmd_acl_remove *)cmd.params;
12960 + cmd_params->acl_id = cpu_to_le16(acl_id);
12961 +
12962 + /* send command to mc*/
12963 + return mc_send_command(mc_io, &cmd);
12964 +}
12965 +
12966 +/**
12967 + * dpsw_acl_prepare_entry_cfg() - Set an entry to ACL.
12968 + * @key: Key
12969 + * @entry_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA
12970 + *
12971 + * This function has to be called before adding or removing acl_entry
12972 + *
12973 + */
12974 +void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key,
12975 + u8 *entry_cfg_buf)
12976 +{
12977 + struct dpsw_prep_acl_entry *ext_params;
12978 + int i;
12979 +
12980 + ext_params = (struct dpsw_prep_acl_entry *)entry_cfg_buf;
12981 +
12982 + for (i = 0; i < 6; i++) {
12983 + ext_params->match_l2_dest_mac[i] =
12984 + key->match.l2_dest_mac[5 - i];
12985 + ext_params->match_l2_source_mac[i] =
12986 + key->match.l2_source_mac[5 - i];
12987 + ext_params->mask_l2_dest_mac[i] =
12988 + key->mask.l2_dest_mac[5 - i];
12989 + ext_params->mask_l2_source_mac[i] =
12990 + key->mask.l2_source_mac[5 - i];
12991 + }
12992 +
12993 + ext_params->match_l2_tpid = cpu_to_le16(key->match.l2_tpid);
12994 + ext_params->match_l2_vlan_id = cpu_to_le16(key->match.l2_vlan_id);
12995 + ext_params->match_l3_dest_ip = cpu_to_le32(key->match.l3_dest_ip);
12996 + ext_params->match_l3_source_ip = cpu_to_le32(key->match.l3_source_ip);
12997 + ext_params->match_l4_dest_port = cpu_to_le16(key->match.l4_dest_port);
12998 + ext_params->match_l2_ether_type = cpu_to_le16(key->match.l2_ether_type);
12999 + ext_params->match_l2_pcp_dei = key->match.l2_pcp_dei;
13000 + ext_params->match_l3_dscp = key->match.l3_dscp;
13001 + ext_params->match_l4_source_port =
13002 + cpu_to_le16(key->match.l4_source_port);
13003 +
13004 + ext_params->mask_l2_tpid = cpu_to_le16(key->mask.l2_tpid);
13005 + ext_params->mask_l2_vlan_id = cpu_to_le16(key->mask.l2_vlan_id);
13006 + ext_params->mask_l3_dest_ip = cpu_to_le32(key->mask.l3_dest_ip);
13007 + ext_params->mask_l3_source_ip = cpu_to_le32(key->mask.l3_source_ip);
13008 + ext_params->mask_l4_dest_port = cpu_to_le16(key->mask.l4_dest_port);
13009 + ext_params->mask_l4_source_port = cpu_to_le16(key->mask.l4_source_port);
13010 + ext_params->mask_l2_ether_type = cpu_to_le16(key->mask.l2_ether_type);
13011 + ext_params->mask_l2_pcp_dei = key->mask.l2_pcp_dei;
13012 + ext_params->mask_l3_dscp = key->mask.l3_dscp;
13013 + ext_params->match_l3_protocol = key->match.l3_protocol;
13014 + ext_params->mask_l3_protocol = key->mask.l3_protocol;
13015 +}
13016 +
13017 +/**
13018 + * dpsw_acl_add_entry() - Adds an entry to ACL.
13019 + * @mc_io: Pointer to MC portal's I/O object
13020 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13021 + * @token: Token of DPSW object
13022 + * @acl_id: ACL ID
13023 + * @cfg: Entry configuration
13024 + *
13025 + * warning: This function has to be called after dpsw_acl_set_entry_cfg()
13026 + *
13027 + * Return: '0' on Success; Error code otherwise.
13028 + */
13029 +int dpsw_acl_add_entry(struct fsl_mc_io *mc_io,
13030 + u32 cmd_flags,
13031 + u16 token,
13032 + u16 acl_id,
13033 + const struct dpsw_acl_entry_cfg *cfg)
13034 +{
13035 + struct mc_command cmd = { 0 };
13036 + struct dpsw_cmd_acl_entry *cmd_params;
13037 +
13038 + /* prepare command */
13039 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD_ENTRY,
13040 + cmd_flags,
13041 + token);
13042 + cmd_params = (struct dpsw_cmd_acl_entry *)cmd.params;
13043 + cmd_params->acl_id = cpu_to_le16(acl_id);
13044 + cmd_params->result_if_id = cpu_to_le16(cfg->result.if_id);
13045 + cmd_params->precedence = cpu_to_le32(cfg->precedence);
13046 + dpsw_set_field(cmd_params->result_action, RESULT_ACTION,
13047 + cfg->result.action);
13048 + cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
13049 +
13050 + /* send command to mc*/
13051 + return mc_send_command(mc_io, &cmd);
13052 +}
13053 +
13054 +/**
13055 + * dpsw_acl_remove_entry() - Removes an entry from ACL.
13056 + * @mc_io: Pointer to MC portal's I/O object
13057 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13058 + * @token: Token of DPSW object
13059 + * @acl_id: ACL ID
13060 + * @cfg: Entry configuration
13061 + *
13062 + * warning: This function has to be called after dpsw_acl_set_entry_cfg()
13063 + *
13064 + * Return: '0' on Success; Error code otherwise.
13065 + */
13066 +int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io,
13067 + u32 cmd_flags,
13068 + u16 token,
13069 + u16 acl_id,
13070 + const struct dpsw_acl_entry_cfg *cfg)
13071 +{
13072 + struct mc_command cmd = { 0 };
13073 + struct dpsw_cmd_acl_entry *cmd_params;
13074 +
13075 + /* prepare command */
13076 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_ENTRY,
13077 + cmd_flags,
13078 + token);
13079 + cmd_params = (struct dpsw_cmd_acl_entry *)cmd.params;
13080 + cmd_params->acl_id = cpu_to_le16(acl_id);
13081 + cmd_params->result_if_id = cpu_to_le16(cfg->result.if_id);
13082 + cmd_params->precedence = cpu_to_le32(cfg->precedence);
13083 + dpsw_set_field(cmd_params->result_action, RESULT_ACTION,
13084 + cfg->result.action);
13085 + cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
13086 +
13087 + /* send command to mc*/
13088 + return mc_send_command(mc_io, &cmd);
13089 +}
13090 +
13091 +/**
13092 + * dpsw_acl_add_if() - Associate interface/interfaces with ACL.
13093 + * @mc_io: Pointer to MC portal's I/O object
13094 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13095 + * @token: Token of DPSW object
13096 + * @acl_id: ACL ID
13097 + * @cfg: Interfaces list
13098 + *
13099 + * Return: '0' on Success; Error code otherwise.
13100 + */
13101 +int dpsw_acl_add_if(struct fsl_mc_io *mc_io,
13102 + u32 cmd_flags,
13103 + u16 token,
13104 + u16 acl_id,
13105 + const struct dpsw_acl_if_cfg *cfg)
13106 +{
13107 + struct mc_command cmd = { 0 };
13108 + struct dpsw_cmd_acl_if *cmd_params;
13109 +
13110 + /* prepare command */
13111 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD_IF,
13112 + cmd_flags,
13113 + token);
13114 + cmd_params = (struct dpsw_cmd_acl_if *)cmd.params;
13115 + cmd_params->acl_id = cpu_to_le16(acl_id);
13116 + cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
13117 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
13118 +
13119 + /* send command to mc*/
13120 + return mc_send_command(mc_io, &cmd);
13121 +}
13122 +
13123 +/**
13124 + * dpsw_acl_remove_if() - De-associate interface/interfaces from ACL.
13125 + * @mc_io: Pointer to MC portal's I/O object
13126 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13127 + * @token: Token of DPSW object
13128 + * @acl_id: ACL ID
13129 + * @cfg: Interfaces list
13130 + *
13131 + * Return: '0' on Success; Error code otherwise.
13132 + */
13133 +int dpsw_acl_remove_if(struct fsl_mc_io *mc_io,
13134 + u32 cmd_flags,
13135 + u16 token,
13136 + u16 acl_id,
13137 + const struct dpsw_acl_if_cfg *cfg)
13138 +{
13139 + struct mc_command cmd = { 0 };
13140 + struct dpsw_cmd_acl_if *cmd_params;
13141 +
13142 + /* prepare command */
13143 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_IF,
13144 + cmd_flags,
13145 + token);
13146 + cmd_params = (struct dpsw_cmd_acl_if *)cmd.params;
13147 + cmd_params->acl_id = cpu_to_le16(acl_id);
13148 + cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
13149 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
13150 +
13151 + /* send command to mc*/
13152 + return mc_send_command(mc_io, &cmd);
13153 +}
13154 +
13155 +/**
13156 + * dpsw_acl_get_attributes() - Get specific counter of particular interface
13157 + * @mc_io: Pointer to MC portal's I/O object
13158 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13159 + * @token: Token of DPSW object
13160 + * @acl_id: ACL Identifier
13161 + * @attr: Returned ACL attributes
13162 + *
13163 + * Return: '0' on Success; Error code otherwise.
13164 + */
13165 +int dpsw_acl_get_attributes(struct fsl_mc_io *mc_io,
13166 + u32 cmd_flags,
13167 + u16 token,
13168 + u16 acl_id,
13169 + struct dpsw_acl_attr *attr)
13170 +{
13171 + struct mc_command cmd = { 0 };
13172 + struct dpsw_cmd_acl_get_attr *cmd_params;
13173 + struct dpsw_rsp_acl_get_attr *rsp_params;
13174 + int err;
13175 +
13176 + /* prepare command */
13177 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_GET_ATTR,
13178 + cmd_flags,
13179 + token);
13180 + cmd_params = (struct dpsw_cmd_acl_get_attr *)cmd.params;
13181 + cmd_params->acl_id = cpu_to_le16(acl_id);
13182 +
13183 + /* send command to mc*/
13184 + err = mc_send_command(mc_io, &cmd);
13185 + if (err)
13186 + return err;
13187 +
13188 + /* retrieve response parameters */
13189 + rsp_params = (struct dpsw_rsp_acl_get_attr *)cmd.params;
13190 + attr->max_entries = le16_to_cpu(rsp_params->max_entries);
13191 + attr->num_entries = le16_to_cpu(rsp_params->num_entries);
13192 + attr->num_ifs = le16_to_cpu(rsp_params->num_ifs);
13193 +
13194 + return 0;
13195 +}
13196 +
13197 +/**
13198 + * dpsw_ctrl_if_get_attributes() - Obtain control interface attributes
13199 + * @mc_io: Pointer to MC portal's I/O object
13200 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13201 + * @token: Token of DPSW object
13202 + * @attr: Returned control interface attributes
13203 + *
13204 + * Return: '0' on Success; Error code otherwise.
13205 + */
13206 +int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io,
13207 + u32 cmd_flags,
13208 + u16 token,
13209 + struct dpsw_ctrl_if_attr *attr)
13210 +{
13211 + struct mc_command cmd = { 0 };
13212 + struct dpsw_rsp_ctrl_if_get_attr *rsp_params;
13213 + int err;
13214 +
13215 + /* prepare command */
13216 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_GET_ATTR,
13217 + cmd_flags,
13218 + token);
13219 +
13220 + /* send command to mc*/
13221 + err = mc_send_command(mc_io, &cmd);
13222 + if (err)
13223 + return err;
13224 +
13225 + /* retrieve response parameters */
13226 + rsp_params = (struct dpsw_rsp_ctrl_if_get_attr *)cmd.params;
13227 + attr->rx_fqid = le32_to_cpu(rsp_params->rx_fqid);
13228 + attr->rx_err_fqid = le32_to_cpu(rsp_params->rx_err_fqid);
13229 + attr->tx_err_conf_fqid = le32_to_cpu(rsp_params->tx_err_conf_fqid);
13230 +
13231 + return 0;
13232 +}
13233 +
13234 +/**
13235 + * dpsw_ctrl_if_set_pools() - Set control interface buffer pools
13236 + * @mc_io: Pointer to MC portal's I/O object
13237 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13238 + * @token: Token of DPSW object
13239 + * @cfg: Buffer pools configuration
13240 + *
13241 + * Return: '0' on Success; Error code otherwise.
13242 + */
13243 +int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io,
13244 + u32 cmd_flags,
13245 + u16 token,
13246 + const struct dpsw_ctrl_if_pools_cfg *pools)
13247 +{
13248 + struct mc_command cmd = { 0 };
13249 + struct dpsw_cmd_ctrl_if_set_pools *cmd_params;
13250 + int i;
13251 +
13252 + /* prepare command */
13253 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_SET_POOLS,
13254 + cmd_flags,
13255 + token);
13256 + cmd_params = (struct dpsw_cmd_ctrl_if_set_pools *)cmd.params;
13257 + cmd_params->num_dpbp = pools->num_dpbp;
13258 + for (i = 0; i < 8; i++) {
13259 + cmd_params->backup_pool = dpsw_set_bit(cmd_params->backup_pool,
13260 + i,
13261 + pools->pools[i].backup_pool);
13262 + cmd_params->buffer_size[i] =
13263 + cpu_to_le16(pools->pools[i].buffer_size);
13264 + cmd_params->dpbp_id[i] =
13265 + cpu_to_le32(pools->pools[i].dpbp_id);
13266 + }
13267 +
13268 + /* send command to mc*/
13269 + return mc_send_command(mc_io, &cmd);
13270 +}
13271 +
13272 +/**
13273 + * dpsw_ctrl_if_enable() - Enable control interface
13274 + * @mc_io: Pointer to MC portal's I/O object
13275 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13276 + * @token: Token of DPSW object
13277 + *
13278 + * Return: '0' on Success; Error code otherwise.
13279 + */
13280 +int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io,
13281 + u32 cmd_flags,
13282 + u16 token)
13283 +{
13284 + struct mc_command cmd = { 0 };
13285 +
13286 + /* prepare command */
13287 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_ENABLE,
13288 + cmd_flags,
13289 + token);
13290 +
13291 + /* send command to mc*/
13292 + return mc_send_command(mc_io, &cmd);
13293 +}
13294 +
13295 +/**
13296 + * dpsw_ctrl_if_disable() - Function disables control interface
13297 + * @mc_io: Pointer to MC portal's I/O object
13298 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13299 + * @token: Token of DPSW object
13300 + *
13301 + * Return: '0' on Success; Error code otherwise.
13302 + */
13303 +int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io,
13304 + u32 cmd_flags,
13305 + u16 token)
13306 +{
13307 + struct mc_command cmd = { 0 };
13308 +
13309 + /* prepare command */
13310 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_DISABLE,
13311 + cmd_flags,
13312 + token);
13313 +
13314 + /* send command to mc*/
13315 + return mc_send_command(mc_io, &cmd);
13316 +}
13317 +
13318 +/**
13319 + * dpsw_get_api_version() - Get Data Path Switch API version
13320 + * @mc_io: Pointer to MC portal's I/O object
13321 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13322 + * @major_ver: Major version of data path switch API
13323 + * @minor_ver: Minor version of data path switch API
13324 + *
13325 + * Return: '0' on Success; Error code otherwise.
13326 + */
13327 +int dpsw_get_api_version(struct fsl_mc_io *mc_io,
13328 + u32 cmd_flags,
13329 + u16 *major_ver,
13330 + u16 *minor_ver)
13331 +{
13332 + struct mc_command cmd = { 0 };
13333 + struct dpsw_rsp_get_api_version *rsp_params;
13334 + int err;
13335 +
13336 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_API_VERSION,
13337 + cmd_flags,
13338 + 0);
13339 +
13340 + err = mc_send_command(mc_io, &cmd);
13341 + if (err)
13342 + return err;
13343 +
13344 + rsp_params = (struct dpsw_rsp_get_api_version *)cmd.params;
13345 + *major_ver = le16_to_cpu(rsp_params->version_major);
13346 + *minor_ver = le16_to_cpu(rsp_params->version_minor);
13347 +
13348 + return 0;
13349 +}
13350 --- /dev/null
13351 +++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw.h
13352 @@ -0,0 +1,1269 @@
13353 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
13354 + *
13355 + * Redistribution and use in source and binary forms, with or without
13356 + * modification, are permitted provided that the following conditions are met:
13357 + * * Redistributions of source code must retain the above copyright
13358 + * notice, this list of conditions and the following disclaimer.
13359 + * * Redistributions in binary form must reproduce the above copyright
13360 + * notice, this list of conditions and the following disclaimer in the
13361 + * documentation and/or other materials provided with the distribution.
13362 + * * Neither the name of the above-listed copyright holders nor the
13363 + * names of any contributors may be used to endorse or promote products
13364 + * derived from this software without specific prior written permission.
13365 + *
13366 + *
13367 + * ALTERNATIVELY, this software may be distributed under the terms of the
13368 + * GNU General Public License ("GPL") as published by the Free Software
13369 + * Foundation, either version 2 of that License or (at your option) any
13370 + * later version.
13371 + *
13372 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
13373 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
13374 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
13375 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
13376 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
13377 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
13378 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
13379 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
13380 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
13381 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
13382 + * POSSIBILITY OF SUCH DAMAGE.
13383 + */
13384 +#ifndef __FSL_DPSW_H
13385 +#define __FSL_DPSW_H
13386 +
13387 +/* Data Path L2-Switch API
13388 + * Contains API for handling DPSW topology and functionality
13389 + */
13390 +
13391 +struct fsl_mc_io;
13392 +
13393 +/**
13394 + * DPSW general definitions
13395 + */
13396 +
13397 +/**
13398 + * Maximum number of traffic class priorities
13399 + */
13400 +#define DPSW_MAX_PRIORITIES 8
13401 +/**
13402 + * Maximum number of interfaces
13403 + */
13404 +#define DPSW_MAX_IF 64
13405 +
13406 +int dpsw_open(struct fsl_mc_io *mc_io,
13407 + u32 cmd_flags,
13408 + int dpsw_id,
13409 + u16 *token);
13410 +
13411 +int dpsw_close(struct fsl_mc_io *mc_io,
13412 + u32 cmd_flags,
13413 + u16 token);
13414 +
13415 +/**
13416 + * DPSW options
13417 + */
13418 +
13419 +/**
13420 + * Disable flooding
13421 + */
13422 +#define DPSW_OPT_FLOODING_DIS 0x0000000000000001ULL
13423 +/**
13424 + * Disable Multicast
13425 + */
13426 +#define DPSW_OPT_MULTICAST_DIS 0x0000000000000004ULL
13427 +/**
13428 + * Support control interface
13429 + */
13430 +#define DPSW_OPT_CTRL_IF_DIS 0x0000000000000010ULL
13431 +/**
13432 + * Disable flooding metering
13433 + */
13434 +#define DPSW_OPT_FLOODING_METERING_DIS 0x0000000000000020ULL
13435 +/**
13436 + * Enable metering
13437 + */
13438 +#define DPSW_OPT_METERING_EN 0x0000000000000040ULL
13439 +
13440 +/**
13441 + * enum dpsw_component_type - component type of a bridge
13442 + * @DPSW_COMPONENT_TYPE_C_VLAN: A C-VLAN component of an
13443 + * enterprise VLAN bridge or of a Provider Bridge used
13444 + * to process C-tagged frames
13445 + * @DPSW_COMPONENT_TYPE_S_VLAN: An S-VLAN component of a
13446 + * Provider Bridge
13447 + *
13448 + */
13449 +enum dpsw_component_type {
13450 + DPSW_COMPONENT_TYPE_C_VLAN = 0,
13451 + DPSW_COMPONENT_TYPE_S_VLAN
13452 +};
13453 +
13454 +/**
13455 + * struct dpsw_cfg - DPSW configuration
13456 + * @num_ifs: Number of external and internal interfaces
13457 + * @adv: Advanced parameters; default is all zeros;
13458 + * use this structure to change default settings
13459 + */
13460 +struct dpsw_cfg {
13461 + u16 num_ifs;
13462 + /**
13463 + * struct adv - Advanced parameters
13464 + * @options: Enable/Disable DPSW features (bitmap)
13465 + * @max_vlans: Maximum Number of VLAN's; 0 - indicates default 16
13466 + * @max_meters_per_if: Number of meters per interface
13467 + * @max_fdbs: Maximum Number of FDB's; 0 - indicates default 16
13468 + * @max_fdb_entries: Number of FDB entries for default FDB table;
13469 + * 0 - indicates default 1024 entries.
13470 + * @fdb_aging_time: Default FDB aging time for default FDB table;
13471 + * 0 - indicates default 300 seconds
13472 + * @max_fdb_mc_groups: Number of multicast groups in each FDB table;
13473 + * 0 - indicates default 32
13474 + * @component_type: Indicates the component type of this bridge
13475 + */
13476 + struct {
13477 + u64 options;
13478 + u16 max_vlans;
13479 + u8 max_meters_per_if;
13480 + u8 max_fdbs;
13481 + u16 max_fdb_entries;
13482 + u16 fdb_aging_time;
13483 + u16 max_fdb_mc_groups;
13484 + enum dpsw_component_type component_type;
13485 + } adv;
13486 +};
13487 +
13488 +int dpsw_create(struct fsl_mc_io *mc_io,
13489 + u16 dprc_token,
13490 + u32 cmd_flags,
13491 + const struct dpsw_cfg *cfg,
13492 + u32 *obj_id);
13493 +
13494 +int dpsw_destroy(struct fsl_mc_io *mc_io,
13495 + u16 dprc_token,
13496 + u32 cmd_flags,
13497 + u32 object_id);
13498 +
13499 +int dpsw_enable(struct fsl_mc_io *mc_io,
13500 + u32 cmd_flags,
13501 + u16 token);
13502 +
13503 +int dpsw_disable(struct fsl_mc_io *mc_io,
13504 + u32 cmd_flags,
13505 + u16 token);
13506 +
13507 +int dpsw_is_enabled(struct fsl_mc_io *mc_io,
13508 + u32 cmd_flags,
13509 + u16 token,
13510 + int *en);
13511 +
13512 +int dpsw_reset(struct fsl_mc_io *mc_io,
13513 + u32 cmd_flags,
13514 + u16 token);
13515 +
13516 +/**
13517 + * DPSW IRQ Index and Events
13518 + */
13519 +
13520 +#define DPSW_IRQ_INDEX_IF 0x0000
13521 +#define DPSW_IRQ_INDEX_L2SW 0x0001
13522 +
13523 +/**
13524 + * IRQ event - Indicates that the link state changed
13525 + */
13526 +#define DPSW_IRQ_EVENT_LINK_CHANGED 0x0001
13527 +
13528 +/**
13529 + * struct dpsw_irq_cfg - IRQ configuration
13530 + * @addr: Address that must be written to signal a message-based interrupt
13531 + * @val: Value to write into irq_addr address
13532 + * @irq_num: A user defined number associated with this IRQ
13533 + */
13534 +struct dpsw_irq_cfg {
13535 + u64 addr;
13536 + u32 val;
13537 + int irq_num;
13538 +};
13539 +
13540 +int dpsw_set_irq(struct fsl_mc_io *mc_io,
13541 + u32 cmd_flags,
13542 + u16 token,
13543 + u8 irq_index,
13544 + struct dpsw_irq_cfg *irq_cfg);
13545 +
13546 +int dpsw_get_irq(struct fsl_mc_io *mc_io,
13547 + u32 cmd_flags,
13548 + u16 token,
13549 + u8 irq_index,
13550 + int *type,
13551 + struct dpsw_irq_cfg *irq_cfg);
13552 +
13553 +int dpsw_set_irq_enable(struct fsl_mc_io *mc_io,
13554 + u32 cmd_flags,
13555 + u16 token,
13556 + u8 irq_index,
13557 + u8 en);
13558 +
13559 +int dpsw_get_irq_enable(struct fsl_mc_io *mc_io,
13560 + u32 cmd_flags,
13561 + u16 token,
13562 + u8 irq_index,
13563 + u8 *en);
13564 +
13565 +int dpsw_set_irq_mask(struct fsl_mc_io *mc_io,
13566 + u32 cmd_flags,
13567 + u16 token,
13568 + u8 irq_index,
13569 + u32 mask);
13570 +
13571 +int dpsw_get_irq_mask(struct fsl_mc_io *mc_io,
13572 + u32 cmd_flags,
13573 + u16 token,
13574 + u8 irq_index,
13575 + u32 *mask);
13576 +
13577 +int dpsw_get_irq_status(struct fsl_mc_io *mc_io,
13578 + u32 cmd_flags,
13579 + u16 token,
13580 + u8 irq_index,
13581 + u32 *status);
13582 +
13583 +int dpsw_clear_irq_status(struct fsl_mc_io *mc_io,
13584 + u32 cmd_flags,
13585 + u16 token,
13586 + u8 irq_index,
13587 + u32 status);
13588 +
13589 +/**
13590 + * struct dpsw_attr - Structure representing DPSW attributes
13591 + * @id: DPSW object ID
13592 + * @options: Enable/Disable DPSW features
13593 + * @max_vlans: Maximum Number of VLANs
13594 + * @max_meters_per_if: Number of meters per interface
13595 + * @max_fdbs: Maximum Number of FDBs
13596 + * @max_fdb_entries: Number of FDB entries for default FDB table;
13597 + * 0 - indicates default 1024 entries.
13598 + * @fdb_aging_time: Default FDB aging time for default FDB table;
13599 + * 0 - indicates default 300 seconds
13600 + * @max_fdb_mc_groups: Number of multicast groups in each FDB table;
13601 + * 0 - indicates default 32
13602 + * @mem_size: DPSW frame storage memory size
13603 + * @num_ifs: Number of interfaces
13604 + * @num_vlans: Current number of VLANs
13605 + * @num_fdbs: Current number of FDBs
13606 + * @component_type: Component type of this bridge
13607 + */
13608 +struct dpsw_attr {
13609 + int id;
13610 + u64 options;
13611 + u16 max_vlans;
13612 + u8 max_meters_per_if;
13613 + u8 max_fdbs;
13614 + u16 max_fdb_entries;
13615 + u16 fdb_aging_time;
13616 + u16 max_fdb_mc_groups;
13617 + u16 num_ifs;
13618 + u16 mem_size;
13619 + u16 num_vlans;
13620 + u8 num_fdbs;
13621 + enum dpsw_component_type component_type;
13622 +};
13623 +
13624 +int dpsw_get_attributes(struct fsl_mc_io *mc_io,
13625 + u32 cmd_flags,
13626 + u16 token,
13627 + struct dpsw_attr *attr);
13628 +
13629 +int dpsw_set_reflection_if(struct fsl_mc_io *mc_io,
13630 + u32 cmd_flags,
13631 + u16 token,
13632 + u16 if_id);
13633 +
13634 +/**
13635 + * enum dpsw_action - Action selection for special/control frames
13636 + * @DPSW_ACTION_DROP: Drop frame
13637 + * @DPSW_ACTION_REDIRECT: Redirect frame to control port
13638 + */
13639 +enum dpsw_action {
13640 + DPSW_ACTION_DROP = 0,
13641 + DPSW_ACTION_REDIRECT = 1
13642 +};
13643 +
13644 +/**
13645 + * Enable auto-negotiation
13646 + */
13647 +#define DPSW_LINK_OPT_AUTONEG 0x0000000000000001ULL
13648 +/**
13649 + * Enable half-duplex mode
13650 + */
13651 +#define DPSW_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
13652 +/**
13653 + * Enable pause frames
13654 + */
13655 +#define DPSW_LINK_OPT_PAUSE 0x0000000000000004ULL
13656 +/**
13657 + * Enable a-symmetric pause frames
13658 + */
13659 +#define DPSW_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
13660 +
13661 +/**
13662 + * struct dpsw_link_cfg - Structure representing DPSW link configuration
13663 + * @rate: Rate
13664 + * @options: Mask of available options; use 'DPSW_LINK_OPT_<X>' values
13665 + */
13666 +struct dpsw_link_cfg {
13667 + u32 rate;
13668 + u64 options;
13669 +};
13670 +
13671 +int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io,
13672 + u32 cmd_flags,
13673 + u16 token,
13674 + u16 if_id,
13675 + struct dpsw_link_cfg *cfg);
13676 +/**
13677 + * struct dpsw_link_state - Structure representing DPSW link state
13678 + * @rate: Rate
13679 + * @options: Mask of available options; use 'DPSW_LINK_OPT_<X>' values
13680 + * @up: 0 - covers two cases: down and disconnected, 1 - up
13681 + */
13682 +struct dpsw_link_state {
13683 + u32 rate;
13684 + u64 options;
13685 + int up;
13686 +};
13687 +
13688 +int dpsw_if_get_link_state(struct fsl_mc_io *mc_io,
13689 + u32 cmd_flags,
13690 + u16 token,
13691 + u16 if_id,
13692 + struct dpsw_link_state *state);
13693 +
13694 +int dpsw_if_set_flooding(struct fsl_mc_io *mc_io,
13695 + u32 cmd_flags,
13696 + u16 token,
13697 + u16 if_id,
13698 + int en);
13699 +
13700 +int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io,
13701 + u32 cmd_flags,
13702 + u16 token,
13703 + u16 if_id,
13704 + int en);
13705 +
13706 +int dpsw_if_set_multicast(struct fsl_mc_io *mc_io,
13707 + u32 cmd_flags,
13708 + u16 token,
13709 + u16 if_id,
13710 + int en);
13711 +
13712 +/**
13713 + * struct dpsw_tci_cfg - Tag Contorl Information (TCI) configuration
13714 + * @pcp: Priority Code Point (PCP): a 3-bit field which refers
13715 + * to the IEEE 802.1p priority
13716 + * @dei: Drop Eligible Indicator (DEI): a 1-bit field. May be used
13717 + * separately or in conjunction with PCP to indicate frames
13718 + * eligible to be dropped in the presence of congestion
13719 + * @vlan_id: VLAN Identifier (VID): a 12-bit field specifying the VLAN
13720 + * to which the frame belongs. The hexadecimal values
13721 + * of 0x000 and 0xFFF are reserved;
13722 + * all other values may be used as VLAN identifiers,
13723 + * allowing up to 4,094 VLANs
13724 + */
13725 +struct dpsw_tci_cfg {
13726 + u8 pcp;
13727 + u8 dei;
13728 + u16 vlan_id;
13729 +};
13730 +
13731 +int dpsw_if_set_tci(struct fsl_mc_io *mc_io,
13732 + u32 cmd_flags,
13733 + u16 token,
13734 + u16 if_id,
13735 + const struct dpsw_tci_cfg *cfg);
13736 +
13737 +int dpsw_if_get_tci(struct fsl_mc_io *mc_io,
13738 + u32 cmd_flags,
13739 + u16 token,
13740 + u16 if_id,
13741 + struct dpsw_tci_cfg *cfg);
13742 +
13743 +/**
13744 + * enum dpsw_stp_state - Spanning Tree Protocol (STP) states
13745 + * @DPSW_STP_STATE_BLOCKING: Blocking state
13746 + * @DPSW_STP_STATE_LISTENING: Listening state
13747 + * @DPSW_STP_STATE_LEARNING: Learning state
13748 + * @DPSW_STP_STATE_FORWARDING: Forwarding state
13749 + *
13750 + */
13751 +enum dpsw_stp_state {
13752 + DPSW_STP_STATE_BLOCKING = 0,
13753 + DPSW_STP_STATE_LISTENING = 1,
13754 + DPSW_STP_STATE_LEARNING = 2,
13755 + DPSW_STP_STATE_FORWARDING = 3
13756 +};
13757 +
13758 +/**
13759 + * struct dpsw_stp_cfg - Spanning Tree Protocol (STP) Configuration
13760 + * @vlan_id: VLAN ID STP state
13761 + * @state: STP state
13762 + */
13763 +struct dpsw_stp_cfg {
13764 + u16 vlan_id;
13765 + enum dpsw_stp_state state;
13766 +};
13767 +
13768 +int dpsw_if_set_stp(struct fsl_mc_io *mc_io,
13769 + u32 cmd_flags,
13770 + u16 token,
13771 + u16 if_id,
13772 + const struct dpsw_stp_cfg *cfg);
13773 +
13774 +/**
13775 + * enum dpsw_accepted_frames - Types of frames to accept
13776 + * @DPSW_ADMIT_ALL: The device accepts VLAN tagged, untagged and
13777 + * priority tagged frames
13778 + * @DPSW_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or
13779 + * Priority-Tagged frames received on this interface.
13780 + *
13781 + */
13782 +enum dpsw_accepted_frames {
13783 + DPSW_ADMIT_ALL = 1,
13784 + DPSW_ADMIT_ONLY_VLAN_TAGGED = 3
13785 +};
13786 +
13787 +/**
13788 + * struct dpsw_accepted_frames_cfg - Types of frames to accept configuration
13789 + * @type: Defines ingress accepted frames
13790 + * @unaccept_act: When a frame is not accepted, it may be discarded or
13791 + * redirected to control interface depending on this mode
13792 + */
13793 +struct dpsw_accepted_frames_cfg {
13794 + enum dpsw_accepted_frames type;
13795 + enum dpsw_action unaccept_act;
13796 +};
13797 +
13798 +int dpsw_if_set_accepted_frames(struct fsl_mc_io *mc_io,
13799 + u32 cmd_flags,
13800 + u16 token,
13801 + u16 if_id,
13802 + const struct dpsw_accepted_frames_cfg *cfg);
13803 +
13804 +int dpsw_if_set_accept_all_vlan(struct fsl_mc_io *mc_io,
13805 + u32 cmd_flags,
13806 + u16 token,
13807 + u16 if_id,
13808 + int accept_all);
13809 +
13810 +/**
13811 + * enum dpsw_counter - Counters types
13812 + * @DPSW_CNT_ING_FRAME: Counts ingress frames
13813 + * @DPSW_CNT_ING_BYTE: Counts ingress bytes
13814 + * @DPSW_CNT_ING_FLTR_FRAME: Counts filtered ingress frames
13815 + * @DPSW_CNT_ING_FRAME_DISCARD: Counts discarded ingress frame
13816 + * @DPSW_CNT_ING_MCAST_FRAME: Counts ingress multicast frames
13817 + * @DPSW_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes
13818 + * @DPSW_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames
13819 + * @DPSW_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes
13820 + * @DPSW_CNT_EGR_FRAME: Counts egress frames
13821 + * @DPSW_CNT_EGR_BYTE: Counts eEgress bytes
13822 + * @DPSW_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames
13823 + * @DPSW_CNT_EGR_STP_FRAME_DISCARD: Counts egress STP discarded frames
13824 + */
13825 +enum dpsw_counter {
13826 + DPSW_CNT_ING_FRAME = 0x0,
13827 + DPSW_CNT_ING_BYTE = 0x1,
13828 + DPSW_CNT_ING_FLTR_FRAME = 0x2,
13829 + DPSW_CNT_ING_FRAME_DISCARD = 0x3,
13830 + DPSW_CNT_ING_MCAST_FRAME = 0x4,
13831 + DPSW_CNT_ING_MCAST_BYTE = 0x5,
13832 + DPSW_CNT_ING_BCAST_FRAME = 0x6,
13833 + DPSW_CNT_ING_BCAST_BYTES = 0x7,
13834 + DPSW_CNT_EGR_FRAME = 0x8,
13835 + DPSW_CNT_EGR_BYTE = 0x9,
13836 + DPSW_CNT_EGR_FRAME_DISCARD = 0xa,
13837 + DPSW_CNT_EGR_STP_FRAME_DISCARD = 0xb
13838 +};
13839 +
13840 +int dpsw_if_get_counter(struct fsl_mc_io *mc_io,
13841 + u32 cmd_flags,
13842 + u16 token,
13843 + u16 if_id,
13844 + enum dpsw_counter type,
13845 + u64 *counter);
13846 +
13847 +int dpsw_if_set_counter(struct fsl_mc_io *mc_io,
13848 + u32 cmd_flags,
13849 + u16 token,
13850 + u16 if_id,
13851 + enum dpsw_counter type,
13852 + u64 counter);
13853 +
13854 +/**
13855 + * Maximum number of TC
13856 + */
13857 +#define DPSW_MAX_TC 8
13858 +
13859 +/**
13860 + * enum dpsw_priority_selector - User priority
13861 + * @DPSW_UP_PCP: Priority Code Point (PCP): a 3-bit field which
13862 + * refers to the IEEE 802.1p priority.
13863 + * @DPSW_UP_DSCP: Differentiated services Code Point (DSCP): 6 bit
13864 + * field from IP header
13865 + *
13866 + */
13867 +enum dpsw_priority_selector {
13868 + DPSW_UP_PCP = 0,
13869 + DPSW_UP_DSCP = 1
13870 +};
13871 +
13872 +/**
13873 + * enum dpsw_schedule_mode - Traffic classes scheduling
13874 + * @DPSW_SCHED_STRICT_PRIORITY: schedule strict priority
13875 + * @DPSW_SCHED_WEIGHTED: schedule based on token bucket created algorithm
13876 + */
13877 +enum dpsw_schedule_mode {
13878 + DPSW_SCHED_STRICT_PRIORITY,
13879 + DPSW_SCHED_WEIGHTED
13880 +};
13881 +
13882 +/**
13883 + * struct dpsw_tx_schedule_cfg - traffic class configuration
13884 + * @mode: Strict or weight-based scheduling
13885 + * @delta_bandwidth: weighted Bandwidth in range from 100 to 10000
13886 + */
13887 +struct dpsw_tx_schedule_cfg {
13888 + enum dpsw_schedule_mode mode;
13889 + u16 delta_bandwidth;
13890 +};
13891 +
13892 +/**
13893 + * struct dpsw_tx_selection_cfg - Mapping user priority into traffic
13894 + * class configuration
13895 + * @priority_selector: Source for user priority regeneration
13896 + * @tc_id: The Regenerated User priority that the incoming
13897 + * User Priority is mapped to for this interface
13898 + * @tc_sched: Traffic classes configuration
13899 + */
13900 +struct dpsw_tx_selection_cfg {
13901 + enum dpsw_priority_selector priority_selector;
13902 + u8 tc_id[DPSW_MAX_PRIORITIES];
13903 + struct dpsw_tx_schedule_cfg tc_sched[DPSW_MAX_TC];
13904 +};
13905 +
13906 +int dpsw_if_set_tx_selection(struct fsl_mc_io *mc_io,
13907 + u32 cmd_flags,
13908 + u16 token,
13909 + u16 if_id,
13910 + const struct dpsw_tx_selection_cfg *cfg);
13911 +
13912 +/**
13913 + * enum dpsw_reflection_filter - Filter type for frames to reflect
13914 + * @DPSW_REFLECTION_FILTER_INGRESS_ALL: Reflect all frames
13915 + * @DPSW_REFLECTION_FILTER_INGRESS_VLAN: Reflect only frames belong to
13916 + * particular VLAN defined by vid parameter
13917 + *
13918 + */
13919 +enum dpsw_reflection_filter {
13920 + DPSW_REFLECTION_FILTER_INGRESS_ALL = 0,
13921 + DPSW_REFLECTION_FILTER_INGRESS_VLAN = 1
13922 +};
13923 +
13924 +/**
13925 + * struct dpsw_reflection_cfg - Structure representing reflection information
13926 + * @filter: Filter type for frames to reflect
13927 + * @vlan_id: Vlan Id to reflect; valid only when filter type is
13928 + * DPSW_INGRESS_VLAN
13929 + */
13930 +struct dpsw_reflection_cfg {
13931 + enum dpsw_reflection_filter filter;
13932 + u16 vlan_id;
13933 +};
13934 +
13935 +int dpsw_if_add_reflection(struct fsl_mc_io *mc_io,
13936 + u32 cmd_flags,
13937 + u16 token,
13938 + u16 if_id,
13939 + const struct dpsw_reflection_cfg *cfg);
13940 +
13941 +int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io,
13942 + u32 cmd_flags,
13943 + u16 token,
13944 + u16 if_id,
13945 + const struct dpsw_reflection_cfg *cfg);
13946 +
13947 +/**
13948 + * enum dpsw_metering_mode - Metering modes
13949 + * @DPSW_METERING_MODE_NONE: metering disabled
13950 + * @DPSW_METERING_MODE_RFC2698: RFC 2698
13951 + * @DPSW_METERING_MODE_RFC4115: RFC 4115
13952 + */
13953 +enum dpsw_metering_mode {
13954 + DPSW_METERING_MODE_NONE = 0,
13955 + DPSW_METERING_MODE_RFC2698,
13956 + DPSW_METERING_MODE_RFC4115
13957 +};
13958 +
13959 +/**
13960 + * enum dpsw_metering_unit - Metering count
13961 + * @DPSW_METERING_UNIT_BYTES: count bytes
13962 + * @DPSW_METERING_UNIT_FRAMES: count frames
13963 + */
13964 +enum dpsw_metering_unit {
13965 + DPSW_METERING_UNIT_BYTES = 0,
13966 + DPSW_METERING_UNIT_FRAMES
13967 +};
13968 +
13969 +/**
13970 + * struct dpsw_metering_cfg - Metering configuration
13971 + * @mode: metering modes
13972 + * @units: Bytes or frame units
13973 + * @cir: Committed information rate (CIR) in Kbits/s
13974 + * @eir: Peak information rate (PIR) Kbit/s rfc2698
13975 + * Excess information rate (EIR) Kbit/s rfc4115
13976 + * @cbs: Committed burst size (CBS) in bytes
13977 + * @ebs: Peak burst size (PBS) in bytes for rfc2698
13978 + * Excess bust size (EBS) in bytes rfc4115
13979 + *
13980 + */
13981 +struct dpsw_metering_cfg {
13982 + enum dpsw_metering_mode mode;
13983 + enum dpsw_metering_unit units;
13984 + u32 cir;
13985 + u32 eir;
13986 + u32 cbs;
13987 + u32 ebs;
13988 +};
13989 +
13990 +int dpsw_if_set_flooding_metering(struct fsl_mc_io *mc_io,
13991 + u32 cmd_flags,
13992 + u16 token,
13993 + u16 if_id,
13994 + const struct dpsw_metering_cfg *cfg);
13995 +
13996 +int dpsw_if_set_metering(struct fsl_mc_io *mc_io,
13997 + u32 cmd_flags,
13998 + u16 token,
13999 + u16 if_id,
14000 + u8 tc_id,
14001 + const struct dpsw_metering_cfg *cfg);
14002 +
14003 +/**
14004 + * enum dpsw_early_drop_unit - DPSW early drop unit
14005 + * @DPSW_EARLY_DROP_UNIT_BYTE: count bytes
14006 + * @DPSW_EARLY_DROP_UNIT_FRAMES: count frames
14007 + */
14008 +enum dpsw_early_drop_unit {
14009 + DPSW_EARLY_DROP_UNIT_BYTE = 0,
14010 + DPSW_EARLY_DROP_UNIT_FRAMES
14011 +};
14012 +
14013 +/**
14014 + * enum dpsw_early_drop_mode - DPSW early drop mode
14015 + * @DPSW_EARLY_DROP_MODE_NONE: early drop is disabled
14016 + * @DPSW_EARLY_DROP_MODE_TAIL: early drop in taildrop mode
14017 + * @DPSW_EARLY_DROP_MODE_WRED: early drop in WRED mode
14018 + */
14019 +enum dpsw_early_drop_mode {
14020 + DPSW_EARLY_DROP_MODE_NONE = 0,
14021 + DPSW_EARLY_DROP_MODE_TAIL,
14022 + DPSW_EARLY_DROP_MODE_WRED
14023 +};
14024 +
14025 +/**
14026 + * struct dpsw_wred_cfg - WRED configuration
14027 + * @max_threshold: maximum threshold that packets may be discarded. Above this
14028 + * threshold all packets are discarded; must be less than 2^39;
14029 + * approximated to be expressed as (x+256)*2^(y-1) due to HW
14030 + * implementation.
14031 + * @min_threshold: minimum threshold that packets may be discarded at
14032 + * @drop_probability: probability that a packet will be discarded (1-100,
14033 + * associated with the maximum threshold)
14034 + */
14035 +struct dpsw_wred_cfg {
14036 + u64 min_threshold;
14037 + u64 max_threshold;
14038 + u8 drop_probability;
14039 +};
14040 +
14041 +/**
14042 + * struct dpsw_early_drop_cfg - early-drop configuration
14043 + * @drop_mode: drop mode
14044 + * @units: count units
14045 + * @yellow: WRED - 'yellow' configuration
14046 + * @green: WRED - 'green' configuration
14047 + * @tail_drop_threshold: tail drop threshold
14048 + */
14049 +struct dpsw_early_drop_cfg {
14050 + enum dpsw_early_drop_mode drop_mode;
14051 + enum dpsw_early_drop_unit units;
14052 + struct dpsw_wred_cfg yellow;
14053 + struct dpsw_wred_cfg green;
14054 + u32 tail_drop_threshold;
14055 +};
14056 +
14057 +void dpsw_prepare_early_drop(const struct dpsw_early_drop_cfg *cfg,
14058 + u8 *early_drop_buf);
14059 +
14060 +int dpsw_if_set_early_drop(struct fsl_mc_io *mc_io,
14061 + u32 cmd_flags,
14062 + u16 token,
14063 + u16 if_id,
14064 + u8 tc_id,
14065 + u64 early_drop_iova);
14066 +
14067 +/**
14068 + * struct dpsw_custom_tpid_cfg - Structure representing tag Protocol identifier
14069 + * @tpid: An additional tag protocol identifier
14070 + */
14071 +struct dpsw_custom_tpid_cfg {
14072 + u16 tpid;
14073 +};
14074 +
14075 +int dpsw_add_custom_tpid(struct fsl_mc_io *mc_io,
14076 + u32 cmd_flags,
14077 + u16 token,
14078 + const struct dpsw_custom_tpid_cfg *cfg);
14079 +
14080 +int dpsw_remove_custom_tpid(struct fsl_mc_io *mc_io,
14081 + u32 cmd_flags,
14082 + u16 token,
14083 + const struct dpsw_custom_tpid_cfg *cfg);
14084 +
14085 +int dpsw_if_enable(struct fsl_mc_io *mc_io,
14086 + u32 cmd_flags,
14087 + u16 token,
14088 + u16 if_id);
14089 +
14090 +int dpsw_if_disable(struct fsl_mc_io *mc_io,
14091 + u32 cmd_flags,
14092 + u16 token,
14093 + u16 if_id);
14094 +
14095 +/**
14096 + * struct dpsw_if_attr - Structure representing DPSW interface attributes
14097 + * @num_tcs: Number of traffic classes
14098 + * @rate: Transmit rate in bits per second
14099 + * @options: Interface configuration options (bitmap)
14100 + * @enabled: Indicates if interface is enabled
14101 + * @accept_all_vlan: The device discards/accepts incoming frames
14102 + * for VLANs that do not include this interface
14103 + * @admit_untagged: When set to 'DPSW_ADMIT_ONLY_VLAN_TAGGED', the device
14104 + * discards untagged frames or priority-tagged frames received on
14105 + * this interface;
14106 + * When set to 'DPSW_ADMIT_ALL', untagged frames or priority-
14107 + * tagged frames received on this interface are accepted
14108 + * @qdid: control frames transmit qdid
14109 + */
14110 +struct dpsw_if_attr {
14111 + u8 num_tcs;
14112 + u32 rate;
14113 + u32 options;
14114 + int enabled;
14115 + int accept_all_vlan;
14116 + enum dpsw_accepted_frames admit_untagged;
14117 + u16 qdid;
14118 +};
14119 +
14120 +int dpsw_if_get_attributes(struct fsl_mc_io *mc_io,
14121 + u32 cmd_flags,
14122 + u16 token,
14123 + u16 if_id,
14124 + struct dpsw_if_attr *attr);
14125 +
14126 +int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io,
14127 + u32 cmd_flags,
14128 + u16 token,
14129 + u16 if_id,
14130 + u16 frame_length);
14131 +
14132 +int dpsw_if_get_max_frame_length(struct fsl_mc_io *mc_io,
14133 + u32 cmd_flags,
14134 + u16 token,
14135 + u16 if_id,
14136 + u16 *frame_length);
14137 +
14138 +/**
14139 + * struct dpsw_vlan_cfg - VLAN Configuration
14140 + * @fdb_id: Forwarding Data Base
14141 + */
14142 +struct dpsw_vlan_cfg {
14143 + u16 fdb_id;
14144 +};
14145 +
14146 +int dpsw_vlan_add(struct fsl_mc_io *mc_io,
14147 + u32 cmd_flags,
14148 + u16 token,
14149 + u16 vlan_id,
14150 + const struct dpsw_vlan_cfg *cfg);
14151 +
14152 +/**
14153 + * struct dpsw_vlan_if_cfg - Set of VLAN Interfaces
14154 + * @num_ifs: The number of interfaces that are assigned to the egress
14155 + * list for this VLAN
14156 + * @if_id: The set of interfaces that are
14157 + * assigned to the egress list for this VLAN
14158 + */
14159 +struct dpsw_vlan_if_cfg {
14160 + u16 num_ifs;
14161 + u16 if_id[DPSW_MAX_IF];
14162 +};
14163 +
14164 +int dpsw_vlan_add_if(struct fsl_mc_io *mc_io,
14165 + u32 cmd_flags,
14166 + u16 token,
14167 + u16 vlan_id,
14168 + const struct dpsw_vlan_if_cfg *cfg);
14169 +
14170 +int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io,
14171 + u32 cmd_flags,
14172 + u16 token,
14173 + u16 vlan_id,
14174 + const struct dpsw_vlan_if_cfg *cfg);
14175 +
14176 +int dpsw_vlan_add_if_flooding(struct fsl_mc_io *mc_io,
14177 + u32 cmd_flags,
14178 + u16 token,
14179 + u16 vlan_id,
14180 + const struct dpsw_vlan_if_cfg *cfg);
14181 +
14182 +int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io,
14183 + u32 cmd_flags,
14184 + u16 token,
14185 + u16 vlan_id,
14186 + const struct dpsw_vlan_if_cfg *cfg);
14187 +
14188 +int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io,
14189 + u32 cmd_flags,
14190 + u16 token,
14191 + u16 vlan_id,
14192 + const struct dpsw_vlan_if_cfg *cfg);
14193 +
14194 +int dpsw_vlan_remove_if_flooding(struct fsl_mc_io *mc_io,
14195 + u32 cmd_flags,
14196 + u16 token,
14197 + u16 vlan_id,
14198 + const struct dpsw_vlan_if_cfg *cfg);
14199 +
14200 +int dpsw_vlan_remove(struct fsl_mc_io *mc_io,
14201 + u32 cmd_flags,
14202 + u16 token,
14203 + u16 vlan_id);
14204 +
14205 +/**
14206 + * struct dpsw_vlan_attr - VLAN attributes
14207 + * @fdb_id: Associated FDB ID
14208 + * @num_ifs: Number of interfaces
14209 + * @num_untagged_ifs: Number of untagged interfaces
14210 + * @num_flooding_ifs: Number of flooding interfaces
14211 + */
14212 +struct dpsw_vlan_attr {
14213 + u16 fdb_id;
14214 + u16 num_ifs;
14215 + u16 num_untagged_ifs;
14216 + u16 num_flooding_ifs;
14217 +};
14218 +
14219 +int dpsw_vlan_get_attributes(struct fsl_mc_io *mc_io,
14220 + u32 cmd_flags,
14221 + u16 token,
14222 + u16 vlan_id,
14223 + struct dpsw_vlan_attr *attr);
14224 +
14225 +int dpsw_vlan_get_if(struct fsl_mc_io *mc_io,
14226 + u32 cmd_flags,
14227 + u16 token,
14228 + u16 vlan_id,
14229 + struct dpsw_vlan_if_cfg *cfg);
14230 +
14231 +int dpsw_vlan_get_if_flooding(struct fsl_mc_io *mc_io,
14232 + u32 cmd_flags,
14233 + u16 token,
14234 + u16 vlan_id,
14235 + struct dpsw_vlan_if_cfg *cfg);
14236 +
14237 +int dpsw_vlan_get_if_untagged(struct fsl_mc_io *mc_io,
14238 + u32 cmd_flags,
14239 + u16 token,
14240 + u16 vlan_id,
14241 + struct dpsw_vlan_if_cfg *cfg);
14242 +
14243 +/**
14244 + * struct dpsw_fdb_cfg - FDB Configuration
14245 + * @num_fdb_entries: Number of FDB entries
14246 + * @fdb_aging_time: Aging time in seconds
14247 + */
14248 +struct dpsw_fdb_cfg {
14249 + u16 num_fdb_entries;
14250 + u16 fdb_aging_time;
14251 +};
14252 +
14253 +int dpsw_fdb_add(struct fsl_mc_io *mc_io,
14254 + u32 cmd_flags,
14255 + u16 token,
14256 + u16 *fdb_id,
14257 + const struct dpsw_fdb_cfg *cfg);
14258 +
14259 +int dpsw_fdb_remove(struct fsl_mc_io *mc_io,
14260 + u32 cmd_flags,
14261 + u16 token,
14262 + u16 fdb_id);
14263 +
14264 +/**
14265 + * enum dpsw_fdb_entry_type - FDB Entry type - Static/Dynamic
14266 + * @DPSW_FDB_ENTRY_STATIC: Static entry
14267 + * @DPSW_FDB_ENTRY_DINAMIC: Dynamic entry
14268 + */
14269 +enum dpsw_fdb_entry_type {
14270 + DPSW_FDB_ENTRY_STATIC = 0,
14271 + DPSW_FDB_ENTRY_DINAMIC = 1
14272 +};
14273 +
14274 +/**
14275 + * struct dpsw_fdb_unicast_cfg - Unicast entry configuration
14276 + * @type: Select static or dynamic entry
14277 + * @mac_addr: MAC address
14278 + * @if_egress: Egress interface ID
14279 + */
14280 +struct dpsw_fdb_unicast_cfg {
14281 + enum dpsw_fdb_entry_type type;
14282 + u8 mac_addr[6];
14283 + u16 if_egress;
14284 +};
14285 +
14286 +int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io,
14287 + u32 cmd_flags,
14288 + u16 token,
14289 + u16 fdb_id,
14290 + const struct dpsw_fdb_unicast_cfg *cfg);
14291 +
14292 +int dpsw_fdb_get_unicast(struct fsl_mc_io *mc_io,
14293 + u32 cmd_flags,
14294 + u16 token,
14295 + u16 fdb_id,
14296 + struct dpsw_fdb_unicast_cfg *cfg);
14297 +
14298 +int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io,
14299 + u32 cmd_flags,
14300 + u16 token,
14301 + u16 fdb_id,
14302 + const struct dpsw_fdb_unicast_cfg *cfg);
14303 +
14304 +/**
14305 + * struct dpsw_fdb_multicast_cfg - Multi-cast entry configuration
14306 + * @type: Select static or dynamic entry
14307 + * @mac_addr: MAC address
14308 + * @num_ifs: Number of external and internal interfaces
14309 + * @if_id: Egress interface IDs
14310 + */
14311 +struct dpsw_fdb_multicast_cfg {
14312 + enum dpsw_fdb_entry_type type;
14313 + u8 mac_addr[6];
14314 + u16 num_ifs;
14315 + u16 if_id[DPSW_MAX_IF];
14316 +};
14317 +
14318 +int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io,
14319 + u32 cmd_flags,
14320 + u16 token,
14321 + u16 fdb_id,
14322 + const struct dpsw_fdb_multicast_cfg *cfg);
14323 +
14324 +int dpsw_fdb_get_multicast(struct fsl_mc_io *mc_io,
14325 + u32 cmd_flags,
14326 + u16 token,
14327 + u16 fdb_id,
14328 + struct dpsw_fdb_multicast_cfg *cfg);
14329 +
14330 +int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io,
14331 + u32 cmd_flags,
14332 + u16 token,
14333 + u16 fdb_id,
14334 + const struct dpsw_fdb_multicast_cfg *cfg);
14335 +
14336 +/**
14337 + * enum dpsw_fdb_learning_mode - Auto-learning modes
14338 + * @DPSW_FDB_LEARNING_MODE_DIS: Disable Auto-learning
14339 + * @DPSW_FDB_LEARNING_MODE_HW: Enable HW auto-Learning
14340 + * @DPSW_FDB_LEARNING_MODE_NON_SECURE: Enable None secure learning by CPU
14341 + * @DPSW_FDB_LEARNING_MODE_SECURE: Enable secure learning by CPU
14342 + *
14343 + * NONE - SECURE LEARNING
14344 + * SMAC found DMAC found CTLU Action
14345 + * v v Forward frame to
14346 + * 1. DMAC destination
14347 + * - v Forward frame to
14348 + * 1. DMAC destination
14349 + * 2. Control interface
14350 + * v - Forward frame to
14351 + * 1. Flooding list of interfaces
14352 + * - - Forward frame to
14353 + * 1. Flooding list of interfaces
14354 + * 2. Control interface
14355 + * SECURE LEARING
14356 + * SMAC found DMAC found CTLU Action
14357 + * v v Forward frame to
14358 + * 1. DMAC destination
14359 + * - v Forward frame to
14360 + * 1. Control interface
14361 + * v - Forward frame to
14362 + * 1. Flooding list of interfaces
14363 + * - - Forward frame to
14364 + * 1. Control interface
14365 + */
14366 +enum dpsw_fdb_learning_mode {
14367 + DPSW_FDB_LEARNING_MODE_DIS = 0,
14368 + DPSW_FDB_LEARNING_MODE_HW = 1,
14369 + DPSW_FDB_LEARNING_MODE_NON_SECURE = 2,
14370 + DPSW_FDB_LEARNING_MODE_SECURE = 3
14371 +};
14372 +
14373 +int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io,
14374 + u32 cmd_flags,
14375 + u16 token,
14376 + u16 fdb_id,
14377 + enum dpsw_fdb_learning_mode mode);
14378 +
14379 +/**
14380 + * struct dpsw_fdb_attr - FDB Attributes
14381 + * @max_fdb_entries: Number of FDB entries
14382 + * @fdb_aging_time: Aging time in seconds
14383 + * @learning_mode: Learning mode
14384 + * @num_fdb_mc_groups: Current number of multicast groups
14385 + * @max_fdb_mc_groups: Maximum number of multicast groups
14386 + */
14387 +struct dpsw_fdb_attr {
14388 + u16 max_fdb_entries;
14389 + u16 fdb_aging_time;
14390 + enum dpsw_fdb_learning_mode learning_mode;
14391 + u16 num_fdb_mc_groups;
14392 + u16 max_fdb_mc_groups;
14393 +};
14394 +
14395 +int dpsw_fdb_get_attributes(struct fsl_mc_io *mc_io,
14396 + u32 cmd_flags,
14397 + u16 token,
14398 + u16 fdb_id,
14399 + struct dpsw_fdb_attr *attr);
14400 +
14401 +/**
14402 + * struct dpsw_acl_cfg - ACL Configuration
14403 + * @max_entries: Number of FDB entries
14404 + */
14405 +struct dpsw_acl_cfg {
14406 + u16 max_entries;
14407 +};
14408 +
14409 +/**
14410 + * struct dpsw_acl_fields - ACL fields.
14411 + * @l2_dest_mac: Destination MAC address: BPDU, Multicast, Broadcast, Unicast,
14412 + * slow protocols, MVRP, STP
14413 + * @l2_source_mac: Source MAC address
14414 + * @l2_tpid: Layer 2 (Ethernet) protocol type, used to identify the following
14415 + * protocols: MPLS, PTP, PFC, ARP, Jumbo frames, LLDP, IEEE802.1ae,
14416 + * Q-in-Q, IPv4, IPv6, PPPoE
14417 + * @l2_pcp_dei: indicate which protocol is encapsulated in the payload
14418 + * @l2_vlan_id: layer 2 VLAN ID
14419 + * @l2_ether_type: layer 2 Ethernet type
14420 + * @l3_dscp: Layer 3 differentiated services code point
14421 + * @l3_protocol: Tells the Network layer at the destination host, to which
14422 + * Protocol this packet belongs to. The following protocol are
14423 + * supported: ICMP, IGMP, IPv4 (encapsulation), TCP, IPv6
14424 + * (encapsulation), GRE, PTP
14425 + * @l3_source_ip: Source IPv4 IP
14426 + * @l3_dest_ip: Destination IPv4 IP
14427 + * @l4_source_port: Source TCP/UDP Port
14428 + * @l4_dest_port: Destination TCP/UDP Port
14429 + */
14430 +struct dpsw_acl_fields {
14431 + u8 l2_dest_mac[6];
14432 + u8 l2_source_mac[6];
14433 + u16 l2_tpid;
14434 + u8 l2_pcp_dei;
14435 + u16 l2_vlan_id;
14436 + u16 l2_ether_type;
14437 + u8 l3_dscp;
14438 + u8 l3_protocol;
14439 + u32 l3_source_ip;
14440 + u32 l3_dest_ip;
14441 + u16 l4_source_port;
14442 + u16 l4_dest_port;
14443 +};
14444 +
14445 +/**
14446 + * struct dpsw_acl_key - ACL key
14447 + * @match: Match fields
14448 + * @mask: Mask: b'1 - valid, b'0 don't care
14449 + */
14450 +struct dpsw_acl_key {
14451 + struct dpsw_acl_fields match;
14452 + struct dpsw_acl_fields mask;
14453 +};
14454 +
14455 +/**
14456 + * enum dpsw_acl_action
14457 + * @DPSW_ACL_ACTION_DROP: Drop frame
14458 + * @DPSW_ACL_ACTION_REDIRECT: Redirect to certain port
14459 + * @DPSW_ACL_ACTION_ACCEPT: Accept frame
14460 + * @DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF: Redirect to control interface
14461 + */
14462 +enum dpsw_acl_action {
14463 + DPSW_ACL_ACTION_DROP,
14464 + DPSW_ACL_ACTION_REDIRECT,
14465 + DPSW_ACL_ACTION_ACCEPT,
14466 + DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF
14467 +};
14468 +
14469 +/**
14470 + * struct dpsw_acl_result - ACL action
14471 + * @action: Action should be taken when ACL entry hit
14472 + * @if_id: Interface IDs to redirect frame. Valid only if redirect selected for
14473 + * action
14474 + */
14475 +struct dpsw_acl_result {
14476 + enum dpsw_acl_action action;
14477 + u16 if_id;
14478 +};
14479 +
14480 +/**
14481 + * struct dpsw_acl_entry_cfg - ACL entry
14482 + * @key_iova: I/O virtual address of DMA-able memory filled with key after call
14483 + * to dpsw_acl_prepare_entry_cfg()
14484 + * @result: Required action when entry hit occurs
14485 + * @precedence: Precedence inside ACL 0 is lowest; This priority can not change
14486 + * during the lifetime of a Policy. It is user responsibility to
14487 + * space the priorities according to consequent rule additions.
14488 + */
14489 +struct dpsw_acl_entry_cfg {
14490 + u64 key_iova;
14491 + struct dpsw_acl_result result;
14492 + int precedence;
14493 +};
14494 +
14495 +int dpsw_acl_add(struct fsl_mc_io *mc_io,
14496 + u32 cmd_flags,
14497 + u16 token,
14498 + u16 *acl_id,
14499 + const struct dpsw_acl_cfg *cfg);
14500 +
14501 +int dpsw_acl_remove(struct fsl_mc_io *mc_io,
14502 + u32 cmd_flags,
14503 + u16 token,
14504 + u16 acl_id);
14505 +
14506 +void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key,
14507 + uint8_t *entry_cfg_buf);
14508 +
14509 +int dpsw_acl_add_entry(struct fsl_mc_io *mc_io,
14510 + u32 cmd_flags,
14511 + u16 token,
14512 + u16 acl_id,
14513 + const struct dpsw_acl_entry_cfg *cfg);
14514 +
14515 +int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io,
14516 + u32 cmd_flags,
14517 + u16 token,
14518 + u16 acl_id,
14519 + const struct dpsw_acl_entry_cfg *cfg);
14520 +
14521 +/**
14522 + * struct dpsw_acl_if_cfg - List of interfaces to Associate with ACL
14523 + * @num_ifs: Number of interfaces
14524 + * @if_id: List of interfaces
14525 + */
14526 +struct dpsw_acl_if_cfg {
14527 + u16 num_ifs;
14528 + u16 if_id[DPSW_MAX_IF];
14529 +};
14530 +
14531 +int dpsw_acl_add_if(struct fsl_mc_io *mc_io,
14532 + u32 cmd_flags,
14533 + u16 token,
14534 + u16 acl_id,
14535 + const struct dpsw_acl_if_cfg *cfg);
14536 +
14537 +int dpsw_acl_remove_if(struct fsl_mc_io *mc_io,
14538 + u32 cmd_flags,
14539 + u16 token,
14540 + u16 acl_id,
14541 + const struct dpsw_acl_if_cfg *cfg);
14542 +
14543 +/**
14544 + * struct dpsw_acl_attr - ACL Attributes
14545 + * @max_entries: Max number of ACL entries
14546 + * @num_entries: Number of used ACL entries
14547 + * @num_ifs: Number of interfaces associated with ACL
14548 + */
14549 +struct dpsw_acl_attr {
14550 + u16 max_entries;
14551 + u16 num_entries;
14552 + u16 num_ifs;
14553 +};
14554 +
14555 +int dpsw_acl_get_attributes(struct fsl_mc_io *mc_io,
14556 + u32 cmd_flags,
14557 + u16 token,
14558 + u16 acl_id,
14559 + struct dpsw_acl_attr *attr);
14560 +/**
14561 + * struct dpsw_ctrl_if_attr - Control interface attributes
14562 + * @rx_fqid: Receive FQID
14563 + * @rx_err_fqid: Receive error FQID
14564 + * @tx_err_conf_fqid: Transmit error and confirmation FQID
14565 + */
14566 +struct dpsw_ctrl_if_attr {
14567 + u32 rx_fqid;
14568 + u32 rx_err_fqid;
14569 + u32 tx_err_conf_fqid;
14570 +};
14571 +
14572 +int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io,
14573 + u32 cmd_flags,
14574 + u16 token,
14575 + struct dpsw_ctrl_if_attr *attr);
14576 +
14577 +/**
14578 + * Maximum number of DPBP
14579 + */
14580 +#define DPSW_MAX_DPBP 8
14581 +
14582 +/**
14583 + * struct dpsw_ctrl_if_pools_cfg - Control interface buffer pools configuration
14584 + * @num_dpbp: Number of DPBPs
14585 + * @pools: Array of buffer pools parameters; The number of valid entries
14586 + * must match 'num_dpbp' value
14587 + */
14588 +struct dpsw_ctrl_if_pools_cfg {
14589 + u8 num_dpbp;
14590 + /**
14591 + * struct pools - Buffer pools parameters
14592 + * @dpbp_id: DPBP object ID
14593 + * @buffer_size: Buffer size
14594 + * @backup_pool: Backup pool
14595 + */
14596 + struct {
14597 + int dpbp_id;
14598 + u16 buffer_size;
14599 + int backup_pool;
14600 + } pools[DPSW_MAX_DPBP];
14601 +};
14602 +
14603 +int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io,
14604 + u32 cmd_flags,
14605 + u16 token,
14606 + const struct dpsw_ctrl_if_pools_cfg *cfg);
14607 +
14608 +int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io,
14609 + u32 cmd_flags,
14610 + u16 token);
14611 +
14612 +int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io,
14613 + u32 cmd_flags,
14614 + u16 token);
14615 +
14616 +int dpsw_get_api_version(struct fsl_mc_io *mc_io,
14617 + u32 cmd_flags,
14618 + u16 *major_ver,
14619 + u16 *minor_ver);
14620 +
14621 +#endif /* __FSL_DPSW_H */
14622 --- /dev/null
14623 +++ b/drivers/staging/fsl-dpaa2/ethsw/switch.c
14624 @@ -0,0 +1,1857 @@
14625 +/* Copyright 2014-2015 Freescale Semiconductor Inc.
14626 + *
14627 + * Redistribution and use in source and binary forms, with or without
14628 + * modification, are permitted provided that the following conditions are met:
14629 + * * Redistributions of source code must retain the above copyright
14630 + * notice, this list of conditions and the following disclaimer.
14631 + * * Redistributions in binary form must reproduce the above copyright
14632 + * notice, this list of conditions and the following disclaimer in the
14633 + * documentation and/or other materials provided with the distribution.
14634 + * * Neither the name of Freescale Semiconductor nor the
14635 + * names of its contributors may be used to endorse or promote products
14636 + * derived from this software without specific prior written permission.
14637 + *
14638 + *
14639 + * ALTERNATIVELY, this software may be distributed under the terms of the
14640 + * GNU General Public License ("GPL") as published by the Free Software
14641 + * Foundation, either version 2 of that License or (at your option) any
14642 + * later version.
14643 + *
14644 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
14645 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
14646 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
14647 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
14648 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
14649 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
14650 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
14651 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
14652 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
14653 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
14654 + */
14655 +
14656 +#include <linux/module.h>
14657 +#include <linux/msi.h>
14658 +
14659 +#include <linux/netdevice.h>
14660 +#include <linux/etherdevice.h>
14661 +#include <linux/rtnetlink.h>
14662 +#include <linux/if_vlan.h>
14663 +
14664 +#include <uapi/linux/if_bridge.h>
14665 +#include <net/netlink.h>
14666 +
14667 +#include "../../fsl-mc/include/mc.h"
14668 +#include "dpsw.h"
14669 +#include "dpsw-cmd.h"
14670 +
14671 +static const char ethsw_drv_version[] = "0.1";
14672 +
14673 +/* Minimal supported DPSE version */
14674 +#define DPSW_MIN_VER_MAJOR 8
14675 +#define DPSW_MIN_VER_MINOR 0
14676 +
14677 +/* IRQ index */
14678 +#define DPSW_MAX_IRQ_NUM 2
14679 +
14680 +#define ETHSW_VLAN_MEMBER 1
14681 +#define ETHSW_VLAN_UNTAGGED 2
14682 +#define ETHSW_VLAN_PVID 4
14683 +#define ETHSW_VLAN_GLOBAL 8
14684 +
14685 +/* Maximum Frame Length supported by HW (currently 10k) */
14686 +#define DPAA2_MFL (10 * 1024)
14687 +#define ETHSW_MAX_FRAME_LENGTH (DPAA2_MFL - VLAN_ETH_HLEN - ETH_FCS_LEN)
14688 +#define ETHSW_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN + ETH_FCS_LEN)
14689 +
14690 +struct ethsw_port_priv {
14691 + struct net_device *netdev;
14692 + struct list_head list;
14693 + u16 port_index;
14694 + struct ethsw_dev_priv *ethsw_priv;
14695 + u8 stp_state;
14696 +
14697 + char vlans[VLAN_VID_MASK + 1];
14698 +
14699 +};
14700 +
14701 +struct ethsw_dev_priv {
14702 + struct net_device *netdev;
14703 + struct fsl_mc_io *mc_io;
14704 + u16 dpsw_handle;
14705 + struct dpsw_attr sw_attr;
14706 + int dev_id;
14707 + /*TODO: redundant, we can use the slave dev list */
14708 + struct list_head port_list;
14709 +
14710 + bool flood;
14711 + bool learning;
14712 +
14713 + char vlans[VLAN_VID_MASK + 1];
14714 +};
14715 +
14716 +static int ethsw_port_stop(struct net_device *netdev);
14717 +static int ethsw_port_open(struct net_device *netdev);
14718 +
14719 +static inline void __get_priv(struct net_device *netdev,
14720 + struct ethsw_dev_priv **priv,
14721 + struct ethsw_port_priv **port_priv)
14722 +{
14723 + struct ethsw_dev_priv *_priv = NULL;
14724 + struct ethsw_port_priv *_port_priv = NULL;
14725 +
14726 + if (netdev->flags & IFF_MASTER) {
14727 + _priv = netdev_priv(netdev);
14728 + } else {
14729 + _port_priv = netdev_priv(netdev);
14730 + _priv = _port_priv->ethsw_priv;
14731 + }
14732 +
14733 + if (priv)
14734 + *priv = _priv;
14735 + if (port_priv)
14736 + *port_priv = _port_priv;
14737 +}
14738 +
14739 +/* -------------------------------------------------------------------------- */
14740 +/* ethsw netdevice ops */
14741 +
14742 +static netdev_tx_t ethsw_dropframe(struct sk_buff *skb, struct net_device *dev)
14743 +{
14744 + /* we don't support I/O for now, drop the frame */
14745 + dev_kfree_skb_any(skb);
14746 + return NETDEV_TX_OK;
14747 +}
14748 +
14749 +static int ethsw_open(struct net_device *netdev)
14750 +{
14751 + struct ethsw_dev_priv *priv = netdev_priv(netdev);
14752 + struct list_head *pos;
14753 + struct ethsw_port_priv *port_priv = NULL;
14754 + int err;
14755 +
14756 + err = dpsw_enable(priv->mc_io, 0, priv->dpsw_handle);
14757 + if (err) {
14758 + netdev_err(netdev, "dpsw_enable err %d\n", err);
14759 + return err;
14760 + }
14761 +
14762 + list_for_each(pos, &priv->port_list) {
14763 + port_priv = list_entry(pos, struct ethsw_port_priv, list);
14764 + err = dev_open(port_priv->netdev);
14765 + if (err)
14766 + netdev_err(port_priv->netdev, "dev_open err %d\n", err);
14767 + }
14768 +
14769 + return 0;
14770 +}
14771 +
14772 +static int ethsw_stop(struct net_device *netdev)
14773 +{
14774 + struct ethsw_dev_priv *priv = netdev_priv(netdev);
14775 + struct list_head *pos;
14776 + struct ethsw_port_priv *port_priv = NULL;
14777 + int err;
14778 +
14779 + err = dpsw_disable(priv->mc_io, 0, priv->dpsw_handle);
14780 + if (err) {
14781 + netdev_err(netdev, "dpsw_disable err %d\n", err);
14782 + return err;
14783 + }
14784 +
14785 + list_for_each(pos, &priv->port_list) {
14786 + port_priv = list_entry(pos, struct ethsw_port_priv, list);
14787 + err = dev_close(port_priv->netdev);
14788 + if (err)
14789 + netdev_err(port_priv->netdev,
14790 + "dev_close err %d\n", err);
14791 + }
14792 +
14793 + return 0;
14794 +}
14795 +
14796 +static int ethsw_add_vlan(struct net_device *netdev, u16 vid)
14797 +{
14798 + struct ethsw_dev_priv *priv = netdev_priv(netdev);
14799 + int err;
14800 +
14801 + struct dpsw_vlan_cfg vcfg = {
14802 + /* TODO: add support for VLAN private FDBs */
14803 + .fdb_id = 0,
14804 + };
14805 + if (priv->vlans[vid]) {
14806 + netdev_err(netdev, "VLAN already configured\n");
14807 + return -EEXIST;
14808 + }
14809 +
14810 + err = dpsw_vlan_add(priv->mc_io, 0, priv->dpsw_handle, vid, &vcfg);
14811 + if (err) {
14812 + netdev_err(netdev, "dpsw_vlan_add err %d\n", err);
14813 + return err;
14814 + }
14815 + priv->vlans[vid] = ETHSW_VLAN_MEMBER;
14816 +
14817 + return 0;
14818 +}
14819 +
14820 +static int ethsw_port_add_vlan(struct net_device *netdev, u16 vid, u16 flags)
14821 +{
14822 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
14823 + struct ethsw_dev_priv *priv = port_priv->ethsw_priv;
14824 + int err;
14825 +
14826 + struct dpsw_vlan_if_cfg vcfg = {
14827 + .num_ifs = 1,
14828 + .if_id[0] = port_priv->port_index,
14829 + };
14830 +
14831 + if (port_priv->vlans[vid]) {
14832 + netdev_err(netdev, "VLAN already configured\n");
14833 + return -EEXIST;
14834 + }
14835 +
14836 + if (flags & BRIDGE_VLAN_INFO_PVID && netif_oper_up(netdev)) {
14837 + netdev_err(netdev, "interface must be down to change PVID!\n");
14838 + return -EBUSY;
14839 + }
14840 +
14841 + err = dpsw_vlan_add_if(priv->mc_io, 0, priv->dpsw_handle, vid, &vcfg);
14842 + if (err) {
14843 + netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err);
14844 + return err;
14845 + }
14846 + port_priv->vlans[vid] = ETHSW_VLAN_MEMBER;
14847 +
14848 + if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
14849 + err = dpsw_vlan_add_if_untagged(priv->mc_io, 0,
14850 + priv->dpsw_handle, vid, &vcfg);
14851 + if (err) {
14852 + netdev_err(netdev, "dpsw_vlan_add_if_untagged err %d\n",
14853 + err);
14854 + return err;
14855 + }
14856 + port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED;
14857 + }
14858 +
14859 + if (flags & BRIDGE_VLAN_INFO_PVID) {
14860 + struct dpsw_tci_cfg tci_cfg = {
14861 + /* TODO: at least add better defaults if these cannot
14862 + * be configured
14863 + */
14864 + .pcp = 0,
14865 + .dei = 0,
14866 + .vlan_id = vid,
14867 + };
14868 +
14869 + err = dpsw_if_set_tci(priv->mc_io, 0, priv->dpsw_handle,
14870 + port_priv->port_index, &tci_cfg);
14871 + if (err) {
14872 + netdev_err(netdev, "dpsw_if_set_tci err %d\n", err);
14873 + return err;
14874 + }
14875 + port_priv->vlans[vid] |= ETHSW_VLAN_PVID;
14876 + }
14877 +
14878 + return 0;
14879 +}
14880 +
14881 +static const struct nla_policy ifla_br_policy[IFLA_MAX + 1] = {
14882 + [IFLA_BRIDGE_FLAGS] = { .type = NLA_U16 },
14883 + [IFLA_BRIDGE_MODE] = { .type = NLA_U16 },
14884 + [IFLA_BRIDGE_VLAN_INFO] = { .type = NLA_BINARY,
14885 + .len = sizeof(struct bridge_vlan_info), },
14886 +};
14887 +
14888 +static int ethsw_setlink_af_spec(struct net_device *netdev,
14889 + struct nlattr **tb)
14890 +{
14891 + struct bridge_vlan_info *vinfo;
14892 + struct ethsw_dev_priv *priv = NULL;
14893 + struct ethsw_port_priv *port_priv = NULL;
14894 + int err = 0;
14895 +
14896 + if (!tb[IFLA_BRIDGE_VLAN_INFO]) {
14897 + netdev_err(netdev, "no VLAN INFO in nlmsg\n");
14898 + return -EOPNOTSUPP;
14899 + }
14900 +
14901 + vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
14902 +
14903 + if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK)
14904 + return -EINVAL;
14905 +
14906 + __get_priv(netdev, &priv, &port_priv);
14907 +
14908 + if (!port_priv || !priv->vlans[vinfo->vid]) {
14909 + /* command targets switch device or this is a new VLAN */
14910 + err = ethsw_add_vlan(priv->netdev, vinfo->vid);
14911 + if (err)
14912 + return err;
14913 +
14914 + /* command targets switch device; mark it*/
14915 + if (!port_priv)
14916 + priv->vlans[vinfo->vid] |= ETHSW_VLAN_GLOBAL;
14917 + }
14918 +
14919 + if (port_priv) {
14920 + /* command targets switch port */
14921 + err = ethsw_port_add_vlan(netdev, vinfo->vid, vinfo->flags);
14922 + if (err)
14923 + return err;
14924 + }
14925 +
14926 + return 0;
14927 +}
14928 +
14929 +static const struct nla_policy ifla_brport_policy[IFLA_BRPORT_MAX + 1] = {
14930 + [IFLA_BRPORT_STATE] = { .type = NLA_U8 },
14931 + [IFLA_BRPORT_COST] = { .type = NLA_U32 },
14932 + [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 },
14933 + [IFLA_BRPORT_MODE] = { .type = NLA_U8 },
14934 + [IFLA_BRPORT_GUARD] = { .type = NLA_U8 },
14935 + [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 },
14936 + [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 },
14937 + [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
14938 +};
14939 +
14940 +static int ethsw_set_learning(struct net_device *netdev, u8 flag)
14941 +{
14942 + struct ethsw_dev_priv *priv = netdev_priv(netdev);
14943 + enum dpsw_fdb_learning_mode learn_mode;
14944 + int err;
14945 +
14946 + if (flag)
14947 + learn_mode = DPSW_FDB_LEARNING_MODE_HW;
14948 + else
14949 + learn_mode = DPSW_FDB_LEARNING_MODE_DIS;
14950 +
14951 + err = dpsw_fdb_set_learning_mode(priv->mc_io, 0, priv->dpsw_handle,
14952 + 0, learn_mode);
14953 + if (err) {
14954 + netdev_err(netdev, "dpsw_fdb_set_learning_mode err %d\n", err);
14955 + return err;
14956 + }
14957 + priv->learning = !!flag;
14958 +
14959 + return 0;
14960 +}
14961 +
14962 +static int ethsw_port_set_flood(struct net_device *netdev, u8 flag)
14963 +{
14964 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
14965 + struct ethsw_dev_priv *priv = port_priv->ethsw_priv;
14966 + int err;
14967 +
14968 + err = dpsw_if_set_flooding(priv->mc_io, 0, priv->dpsw_handle,
14969 + port_priv->port_index, (int)flag);
14970 + if (err) {
14971 + netdev_err(netdev, "dpsw_fdb_set_learning_mode err %d\n", err);
14972 + return err;
14973 + }
14974 + priv->flood = !!flag;
14975 +
14976 + return 0;
14977 +}
14978 +
14979 +static int ethsw_port_set_state(struct net_device *netdev, u8 state)
14980 +{
14981 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
14982 + struct ethsw_dev_priv *priv = port_priv->ethsw_priv;
14983 + u8 old_state = port_priv->stp_state;
14984 + int err;
14985 +
14986 + struct dpsw_stp_cfg stp_cfg = {
14987 + .vlan_id = 1,
14988 + .state = state,
14989 + };
14990 + /* TODO: check port state, interface may be down */
14991 +
14992 + if (state > BR_STATE_BLOCKING)
14993 + return -EINVAL;
14994 +
14995 + if (state == port_priv->stp_state)
14996 + return 0;
14997 +
14998 + if (state == BR_STATE_DISABLED) {
14999 + port_priv->stp_state = state;
15000 +
15001 + err = ethsw_port_stop(netdev);
15002 + if (err)
15003 + goto error;
15004 + } else {
15005 + err = dpsw_if_set_stp(priv->mc_io, 0, priv->dpsw_handle,
15006 + port_priv->port_index, &stp_cfg);
15007 + if (err) {
15008 + netdev_err(netdev, "dpsw_if_set_stp err %d\n", err);
15009 + return err;
15010 + }
15011 +
15012 + port_priv->stp_state = state;
15013 +
15014 + if (old_state == BR_STATE_DISABLED) {
15015 + err = ethsw_port_open(netdev);
15016 + if (err)
15017 + goto error;
15018 + }
15019 + }
15020 +
15021 + return 0;
15022 +error:
15023 + port_priv->stp_state = old_state;
15024 + return err;
15025 +}
15026 +
15027 +static int ethsw_setlink_protinfo(struct net_device *netdev,
15028 + struct nlattr **tb)
15029 +{
15030 + struct ethsw_dev_priv *priv;
15031 + struct ethsw_port_priv *port_priv = NULL;
15032 + int err = 0;
15033 +
15034 + __get_priv(netdev, &priv, &port_priv);
15035 +
15036 + if (tb[IFLA_BRPORT_LEARNING]) {
15037 + u8 flag = nla_get_u8(tb[IFLA_BRPORT_LEARNING]);
15038 +
15039 + if (port_priv)
15040 + netdev_warn(netdev,
15041 + "learning set on whole switch dev\n");
15042 +
15043 + err = ethsw_set_learning(priv->netdev, flag);
15044 + if (err)
15045 + return err;
15046 +
15047 + } else if (tb[IFLA_BRPORT_UNICAST_FLOOD] && port_priv) {
15048 + u8 flag = nla_get_u8(tb[IFLA_BRPORT_UNICAST_FLOOD]);
15049 +
15050 + err = ethsw_port_set_flood(port_priv->netdev, flag);
15051 + if (err)
15052 + return err;
15053 +
15054 + } else if (tb[IFLA_BRPORT_STATE] && port_priv) {
15055 + u8 state = nla_get_u8(tb[IFLA_BRPORT_STATE]);
15056 +
15057 + err = ethsw_port_set_state(port_priv->netdev, state);
15058 + if (err)
15059 + return err;
15060 +
15061 + } else {
15062 + return -EOPNOTSUPP;
15063 + }
15064 +
15065 + return 0;
15066 +}
15067 +
15068 +static int ethsw_setlink(struct net_device *netdev,
15069 + struct nlmsghdr *nlh,
15070 + u16 flags)
15071 +{
15072 + struct nlattr *attr;
15073 + struct nlattr *tb[(IFLA_BRIDGE_MAX > IFLA_BRPORT_MAX) ?
15074 + IFLA_BRIDGE_MAX : IFLA_BRPORT_MAX + 1];
15075 + int err = 0;
15076 +
15077 + attr = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
15078 + if (attr) {
15079 + err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, attr,
15080 + ifla_br_policy);
15081 + if (err) {
15082 + netdev_err(netdev,
15083 + "nla_parse_nested for br_policy err %d\n",
15084 + err);
15085 + return err;
15086 + }
15087 +
15088 + err = ethsw_setlink_af_spec(netdev, tb);
15089 + return err;
15090 + }
15091 +
15092 + attr = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO);
15093 + if (attr) {
15094 + err = nla_parse_nested(tb, IFLA_BRPORT_MAX, attr,
15095 + ifla_brport_policy);
15096 + if (err) {
15097 + netdev_err(netdev,
15098 + "nla_parse_nested for brport_policy err %d\n",
15099 + err);
15100 + return err;
15101 + }
15102 +
15103 + err = ethsw_setlink_protinfo(netdev, tb);
15104 + return err;
15105 + }
15106 +
15107 + netdev_err(netdev, "nlmsg_find_attr found no AF_SPEC/PROTINFO\n");
15108 + return -EOPNOTSUPP;
15109 +}
15110 +
15111 +static int __nla_put_netdev(struct sk_buff *skb, struct net_device *netdev,
15112 + struct ethsw_dev_priv *priv)
15113 +{
15114 + u8 operstate = netif_running(netdev) ? netdev->operstate : IF_OPER_DOWN;
15115 + int iflink;
15116 + int err;
15117 +
15118 + err = nla_put_string(skb, IFLA_IFNAME, netdev->name);
15119 + if (err)
15120 + goto nla_put_err;
15121 + err = nla_put_u32(skb, IFLA_MASTER, priv->netdev->ifindex);
15122 + if (err)
15123 + goto nla_put_err;
15124 + err = nla_put_u32(skb, IFLA_MTU, netdev->mtu);
15125 + if (err)
15126 + goto nla_put_err;
15127 + err = nla_put_u8(skb, IFLA_OPERSTATE, operstate);
15128 + if (err)
15129 + goto nla_put_err;
15130 + if (netdev->addr_len) {
15131 + err = nla_put(skb, IFLA_ADDRESS, netdev->addr_len,
15132 + netdev->dev_addr);
15133 + if (err)
15134 + goto nla_put_err;
15135 + }
15136 +
15137 + iflink = dev_get_iflink(netdev);
15138 + if (netdev->ifindex != iflink) {
15139 + err = nla_put_u32(skb, IFLA_LINK, iflink);
15140 + if (err)
15141 + goto nla_put_err;
15142 + }
15143 +
15144 + return 0;
15145 +
15146 +nla_put_err:
15147 + netdev_err(netdev, "nla_put_ err %d\n", err);
15148 + return err;
15149 +}
15150 +
15151 +static int __nla_put_port(struct sk_buff *skb, struct net_device *netdev,
15152 + struct ethsw_port_priv *port_priv)
15153 +{
15154 + struct nlattr *nest;
15155 + int err;
15156 +
15157 + u8 stp_state = port_priv->stp_state;
15158 +
15159 + if (port_priv->stp_state == DPSW_STP_STATE_BLOCKING)
15160 + stp_state = BR_STATE_BLOCKING;
15161 +
15162 + nest = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
15163 + if (!nest) {
15164 + netdev_err(netdev, "nla_nest_start failed\n");
15165 + return -ENOMEM;
15166 + }
15167 +
15168 + err = nla_put_u8(skb, IFLA_BRPORT_STATE, stp_state);
15169 + if (err)
15170 + goto nla_put_err;
15171 + err = nla_put_u16(skb, IFLA_BRPORT_PRIORITY, 0);
15172 + if (err)
15173 + goto nla_put_err;
15174 + err = nla_put_u32(skb, IFLA_BRPORT_COST, 0);
15175 + if (err)
15176 + goto nla_put_err;
15177 + err = nla_put_u8(skb, IFLA_BRPORT_MODE, 0);
15178 + if (err)
15179 + goto nla_put_err;
15180 + err = nla_put_u8(skb, IFLA_BRPORT_GUARD, 0);
15181 + if (err)
15182 + goto nla_put_err;
15183 + err = nla_put_u8(skb, IFLA_BRPORT_PROTECT, 0);
15184 + if (err)
15185 + goto nla_put_err;
15186 + err = nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, 0);
15187 + if (err)
15188 + goto nla_put_err;
15189 + err = nla_put_u8(skb, IFLA_BRPORT_LEARNING,
15190 + port_priv->ethsw_priv->learning);
15191 + if (err)
15192 + goto nla_put_err;
15193 + err = nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD,
15194 + port_priv->ethsw_priv->flood);
15195 + if (err)
15196 + goto nla_put_err;
15197 + nla_nest_end(skb, nest);
15198 +
15199 + return 0;
15200 +
15201 +nla_put_err:
15202 + netdev_err(netdev, "nla_put_ err %d\n", err);
15203 + nla_nest_cancel(skb, nest);
15204 + return err;
15205 +}
15206 +
15207 +static int __nla_put_vlan(struct sk_buff *skb, struct net_device *netdev,
15208 + struct ethsw_dev_priv *priv,
15209 + struct ethsw_port_priv *port_priv)
15210 +{
15211 + struct nlattr *nest;
15212 + struct bridge_vlan_info vinfo;
15213 + const char *vlans;
15214 + u16 i;
15215 + int err;
15216 +
15217 + nest = nla_nest_start(skb, IFLA_AF_SPEC);
15218 + if (!nest) {
15219 + netdev_err(netdev, "nla_nest_start failed");
15220 + return -ENOMEM;
15221 + }
15222 +
15223 + if (port_priv)
15224 + vlans = port_priv->vlans;
15225 + else
15226 + vlans = priv->vlans;
15227 +
15228 + for (i = 0; i < VLAN_VID_MASK + 1; i++) {
15229 + vinfo.flags = 0;
15230 + vinfo.vid = i;
15231 +
15232 + if (vlans[i] & ETHSW_VLAN_UNTAGGED)
15233 + vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
15234 +
15235 + if (vlans[i] & ETHSW_VLAN_PVID)
15236 + vinfo.flags |= BRIDGE_VLAN_INFO_PVID;
15237 +
15238 + if (vlans[i] & ETHSW_VLAN_MEMBER) {
15239 + err = nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
15240 + sizeof(vinfo), &vinfo);
15241 + if (err)
15242 + goto nla_put_err;
15243 + }
15244 + }
15245 +
15246 + nla_nest_end(skb, nest);
15247 +
15248 + return 0;
15249 +nla_put_err:
15250 + netdev_err(netdev, "nla_put_ err %d\n", err);
15251 + nla_nest_cancel(skb, nest);
15252 + return err;
15253 +}
15254 +
15255 +static int ethsw_getlink(struct sk_buff *skb, u32 pid, u32 seq,
15256 + struct net_device *netdev, u32 filter_mask,
15257 + int nlflags)
15258 +{
15259 + struct ethsw_dev_priv *priv;
15260 + struct ethsw_port_priv *port_priv = NULL;
15261 + struct ifinfomsg *hdr;
15262 + struct nlmsghdr *nlh;
15263 + int err;
15264 +
15265 + __get_priv(netdev, &priv, &port_priv);
15266 +
15267 + nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*hdr), NLM_F_MULTI);
15268 + if (!nlh)
15269 + return -EMSGSIZE;
15270 +
15271 + hdr = nlmsg_data(nlh);
15272 + memset(hdr, 0, sizeof(*hdr));
15273 + hdr->ifi_family = AF_BRIDGE;
15274 + hdr->ifi_type = netdev->type;
15275 + hdr->ifi_index = netdev->ifindex;
15276 + hdr->ifi_flags = dev_get_flags(netdev);
15277 +
15278 + err = __nla_put_netdev(skb, netdev, priv);
15279 + if (err)
15280 + goto nla_put_err;
15281 +
15282 + if (port_priv) {
15283 + err = __nla_put_port(skb, netdev, port_priv);
15284 + if (err)
15285 + goto nla_put_err;
15286 + }
15287 +
15288 + /* Check if the VID information is requested */
15289 + if (filter_mask & RTEXT_FILTER_BRVLAN) {
15290 + err = __nla_put_vlan(skb, netdev, priv, port_priv);
15291 + if (err)
15292 + goto nla_put_err;
15293 + }
15294 +
15295 + nlmsg_end(skb, nlh);
15296 + return skb->len;
15297 +
15298 +nla_put_err:
15299 + nlmsg_cancel(skb, nlh);
15300 + return -EMSGSIZE;
15301 +}
15302 +
15303 +static int ethsw_dellink_switch(struct ethsw_dev_priv *priv, u16 vid)
15304 +{
15305 + struct list_head *pos;
15306 + struct ethsw_port_priv *ppriv_local = NULL;
15307 + int err = 0;
15308 +
15309 + if (!priv->vlans[vid])
15310 + return -ENOENT;
15311 +
15312 + err = dpsw_vlan_remove(priv->mc_io, 0, priv->dpsw_handle, vid);
15313 + if (err) {
15314 + netdev_err(priv->netdev, "dpsw_vlan_remove err %d\n", err);
15315 + return err;
15316 + }
15317 + priv->vlans[vid] = 0;
15318 +
15319 + list_for_each(pos, &priv->port_list) {
15320 + ppriv_local = list_entry(pos, struct ethsw_port_priv,
15321 + list);
15322 + ppriv_local->vlans[vid] = 0;
15323 + }
15324 +
15325 + return 0;
15326 +}
15327 +
15328 +static int ethsw_dellink_port(struct ethsw_dev_priv *priv,
15329 + struct ethsw_port_priv *port_priv,
15330 + u16 vid)
15331 +{
15332 + struct list_head *pos;
15333 + struct ethsw_port_priv *ppriv_local = NULL;
15334 + struct dpsw_vlan_if_cfg vcfg = {
15335 + .num_ifs = 1,
15336 + .if_id[0] = port_priv->port_index,
15337 + };
15338 + unsigned int count = 0;
15339 + int err = 0;
15340 +
15341 + if (!port_priv->vlans[vid])
15342 + return -ENOENT;
15343 +
15344 + /* VLAN will be deleted from switch if global flag is not set
15345 + * and is configured on only one port
15346 + */
15347 + if (!(priv->vlans[vid] & ETHSW_VLAN_GLOBAL)) {
15348 + list_for_each(pos, &priv->port_list) {
15349 + ppriv_local = list_entry(pos, struct ethsw_port_priv,
15350 + list);
15351 + if (ppriv_local->vlans[vid] & ETHSW_VLAN_MEMBER)
15352 + count++;
15353 + }
15354 +
15355 + if (count == 1)
15356 + return ethsw_dellink_switch(priv, vid);
15357 + }
15358 +
15359 + err = dpsw_vlan_remove_if(priv->mc_io, 0, priv->dpsw_handle,
15360 + vid, &vcfg);
15361 + if (err) {
15362 + netdev_err(priv->netdev, "dpsw_vlan_remove_if err %d\n", err);
15363 + return err;
15364 + }
15365 + port_priv->vlans[vid] = 0;
15366 + return 0;
15367 +}
15368 +
15369 +static int ethsw_dellink(struct net_device *netdev,
15370 + struct nlmsghdr *nlh,
15371 + u16 flags)
15372 +{
15373 + struct nlattr *tb[IFLA_BRIDGE_MAX + 1];
15374 + struct nlattr *spec;
15375 + struct bridge_vlan_info *vinfo;
15376 + struct ethsw_dev_priv *priv;
15377 + struct ethsw_port_priv *port_priv = NULL;
15378 + int err = 0;
15379 +
15380 + spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
15381 + if (!spec)
15382 + return 0;
15383 +
15384 + err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, spec, ifla_br_policy);
15385 + if (err)
15386 + return err;
15387 +
15388 + if (!tb[IFLA_BRIDGE_VLAN_INFO])
15389 + return -EOPNOTSUPP;
15390 +
15391 + vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
15392 +
15393 + if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK)
15394 + return -EINVAL;
15395 +
15396 + __get_priv(netdev, &priv, &port_priv);
15397 +
15398 + /* decide if command targets switch device or port */
15399 + if (!port_priv)
15400 + err = ethsw_dellink_switch(priv, vinfo->vid);
15401 + else
15402 + err = ethsw_dellink_port(priv, port_priv, vinfo->vid);
15403 +
15404 + return err;
15405 +}
15406 +
15407 +static const struct net_device_ops ethsw_ops = {
15408 + .ndo_open = &ethsw_open,
15409 + .ndo_stop = &ethsw_stop,
15410 +
15411 + .ndo_bridge_setlink = &ethsw_setlink,
15412 + .ndo_bridge_getlink = &ethsw_getlink,
15413 + .ndo_bridge_dellink = &ethsw_dellink,
15414 +
15415 + .ndo_start_xmit = &ethsw_dropframe,
15416 +};
15417 +
15418 +/*--------------------------------------------------------------------------- */
15419 +/* switch port netdevice ops */
15420 +
15421 +static int _ethsw_port_carrier_state_sync(struct net_device *netdev)
15422 +{
15423 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15424 + struct dpsw_link_state state;
15425 + int err;
15426 +
15427 + err = dpsw_if_get_link_state(port_priv->ethsw_priv->mc_io, 0,
15428 + port_priv->ethsw_priv->dpsw_handle,
15429 + port_priv->port_index, &state);
15430 + if (unlikely(err)) {
15431 + netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
15432 + return err;
15433 + }
15434 +
15435 + WARN_ONCE(state.up > 1, "Garbage read into link_state");
15436 +
15437 + if (state.up)
15438 + netif_carrier_on(port_priv->netdev);
15439 + else
15440 + netif_carrier_off(port_priv->netdev);
15441 +
15442 + return 0;
15443 +}
15444 +
15445 +static int ethsw_port_open(struct net_device *netdev)
15446 +{
15447 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15448 + int err;
15449 +
15450 + err = dpsw_if_enable(port_priv->ethsw_priv->mc_io, 0,
15451 + port_priv->ethsw_priv->dpsw_handle,
15452 + port_priv->port_index);
15453 + if (err) {
15454 + netdev_err(netdev, "dpsw_if_enable err %d\n", err);
15455 + return err;
15456 + }
15457 +
15458 + /* sync carrier state */
15459 + err = _ethsw_port_carrier_state_sync(netdev);
15460 + if (err) {
15461 + netdev_err(netdev, "_ethsw_port_carrier_state_sync err %d\n",
15462 + err);
15463 + goto err_carrier_sync;
15464 + }
15465 +
15466 + return 0;
15467 +
15468 +err_carrier_sync:
15469 + dpsw_if_disable(port_priv->ethsw_priv->mc_io, 0,
15470 + port_priv->ethsw_priv->dpsw_handle,
15471 + port_priv->port_index);
15472 + return err;
15473 +}
15474 +
15475 +static int ethsw_port_stop(struct net_device *netdev)
15476 +{
15477 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15478 + int err;
15479 +
15480 + err = dpsw_if_disable(port_priv->ethsw_priv->mc_io, 0,
15481 + port_priv->ethsw_priv->dpsw_handle,
15482 + port_priv->port_index);
15483 + if (err) {
15484 + netdev_err(netdev, "dpsw_if_disable err %d\n", err);
15485 + return err;
15486 + }
15487 +
15488 + return 0;
15489 +}
15490 +
15491 +static int ethsw_port_fdb_add_uc(struct net_device *netdev,
15492 + const unsigned char *addr)
15493 +{
15494 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15495 + struct dpsw_fdb_unicast_cfg entry = {0};
15496 + int err;
15497 +
15498 + entry.if_egress = port_priv->port_index;
15499 + entry.type = DPSW_FDB_ENTRY_STATIC;
15500 + ether_addr_copy(entry.mac_addr, addr);
15501 +
15502 + err = dpsw_fdb_add_unicast(port_priv->ethsw_priv->mc_io, 0,
15503 + port_priv->ethsw_priv->dpsw_handle,
15504 + 0, &entry);
15505 + if (err)
15506 + netdev_err(netdev, "dpsw_fdb_add_unicast err %d\n", err);
15507 + return err;
15508 +}
15509 +
15510 +static int ethsw_port_fdb_del_uc(struct net_device *netdev,
15511 + const unsigned char *addr)
15512 +{
15513 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15514 + struct dpsw_fdb_unicast_cfg entry = {0};
15515 + int err;
15516 +
15517 + entry.if_egress = port_priv->port_index;
15518 + entry.type = DPSW_FDB_ENTRY_STATIC;
15519 + ether_addr_copy(entry.mac_addr, addr);
15520 +
15521 + err = dpsw_fdb_remove_unicast(port_priv->ethsw_priv->mc_io, 0,
15522 + port_priv->ethsw_priv->dpsw_handle,
15523 + 0, &entry);
15524 + if (err)
15525 + netdev_err(netdev, "dpsw_fdb_remove_unicast err %d\n", err);
15526 + return err;
15527 +}
15528 +
15529 +static int ethsw_port_fdb_add_mc(struct net_device *netdev,
15530 + const unsigned char *addr)
15531 +{
15532 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15533 + struct dpsw_fdb_multicast_cfg entry = {0};
15534 + int err;
15535 +
15536 + ether_addr_copy(entry.mac_addr, addr);
15537 + entry.type = DPSW_FDB_ENTRY_STATIC;
15538 + entry.num_ifs = 1;
15539 + entry.if_id[0] = port_priv->port_index;
15540 +
15541 + err = dpsw_fdb_add_multicast(port_priv->ethsw_priv->mc_io, 0,
15542 + port_priv->ethsw_priv->dpsw_handle,
15543 + 0, &entry);
15544 + if (err)
15545 + netdev_err(netdev, "dpsw_fdb_add_multicast err %d\n", err);
15546 + return err;
15547 +}
15548 +
15549 +static int ethsw_port_fdb_del_mc(struct net_device *netdev,
15550 + const unsigned char *addr)
15551 +{
15552 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15553 + struct dpsw_fdb_multicast_cfg entry = {0};
15554 + int err;
15555 +
15556 + ether_addr_copy(entry.mac_addr, addr);
15557 + entry.type = DPSW_FDB_ENTRY_STATIC;
15558 + entry.num_ifs = 1;
15559 + entry.if_id[0] = port_priv->port_index;
15560 +
15561 + err = dpsw_fdb_remove_multicast(port_priv->ethsw_priv->mc_io, 0,
15562 + port_priv->ethsw_priv->dpsw_handle,
15563 + 0, &entry);
15564 + if (err)
15565 + netdev_err(netdev, "dpsw_fdb_remove_multicast err %d\n", err);
15566 + return err;
15567 +}
15568 +
15569 +static int _lookup_address(struct net_device *netdev, int is_uc,
15570 + const unsigned char *addr)
15571 +{
15572 + struct netdev_hw_addr *ha;
15573 + struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc;
15574 +
15575 + netif_addr_lock_bh(netdev);
15576 + list_for_each_entry(ha, &list->list, list) {
15577 + if (ether_addr_equal(ha->addr, addr)) {
15578 + netif_addr_unlock_bh(netdev);
15579 + return 1;
15580 + }
15581 + }
15582 + netif_addr_unlock_bh(netdev);
15583 + return 0;
15584 +}
15585 +
15586 +static int ethsw_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
15587 + struct net_device *netdev,
15588 + const unsigned char *addr, u16 vid,
15589 + u16 flags)
15590 +{
15591 + struct list_head *pos;
15592 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15593 + struct ethsw_dev_priv *priv = port_priv->ethsw_priv;
15594 + int err;
15595 +
15596 + /* TODO: add replace support when added to iproute bridge */
15597 + if (!(flags & NLM_F_REQUEST)) {
15598 + netdev_err(netdev,
15599 + "ethsw_port_fdb_add unexpected flags value %08x\n",
15600 + flags);
15601 + return -EINVAL;
15602 + }
15603 +
15604 + if (is_unicast_ether_addr(addr)) {
15605 + /* if entry cannot be replaced, return error if exists */
15606 + if (flags & NLM_F_EXCL || flags & NLM_F_APPEND) {
15607 + list_for_each(pos, &priv->port_list) {
15608 + port_priv = list_entry(pos,
15609 + struct ethsw_port_priv,
15610 + list);
15611 + if (_lookup_address(port_priv->netdev,
15612 + 1, addr))
15613 + return -EEXIST;
15614 + }
15615 + }
15616 +
15617 + err = ethsw_port_fdb_add_uc(netdev, addr);
15618 + if (err) {
15619 + netdev_err(netdev, "ethsw_port_fdb_add_uc err %d\n",
15620 + err);
15621 + return err;
15622 + }
15623 +
15624 + /* we might have replaced an existing entry for a different
15625 + * switch port, make sure the address doesn't linger in any
15626 + * port address list
15627 + */
15628 + list_for_each(pos, &priv->port_list) {
15629 + port_priv = list_entry(pos, struct ethsw_port_priv,
15630 + list);
15631 + dev_uc_del(port_priv->netdev, addr);
15632 + }
15633 +
15634 + err = dev_uc_add(netdev, addr);
15635 + if (err) {
15636 + netdev_err(netdev, "dev_uc_add err %d\n", err);
15637 + return err;
15638 + }
15639 + } else {
15640 + struct dpsw_fdb_multicast_cfg entry = {
15641 + .type = DPSW_FDB_ENTRY_STATIC,
15642 + .num_ifs = 0,
15643 + };
15644 +
15645 + /* check if address is already set on this port */
15646 + if (_lookup_address(netdev, 0, addr))
15647 + return -EEXIST;
15648 +
15649 + /* check if the address exists on other port */
15650 + ether_addr_copy(entry.mac_addr, addr);
15651 + err = dpsw_fdb_get_multicast(priv->mc_io, 0, priv->dpsw_handle,
15652 + 0, &entry);
15653 + if (!err) {
15654 + /* entry exists, can we replace it? */
15655 + if (flags & NLM_F_EXCL)
15656 + return -EEXIST;
15657 + } else if (err != -ENAVAIL) {
15658 + netdev_err(netdev, "dpsw_fdb_get_unicast err %d\n",
15659 + err);
15660 + return err;
15661 + }
15662 +
15663 + err = ethsw_port_fdb_add_mc(netdev, addr);
15664 + if (err) {
15665 + netdev_err(netdev, "ethsw_port_fdb_add_mc err %d\n",
15666 + err);
15667 + return err;
15668 + }
15669 +
15670 + err = dev_mc_add(netdev, addr);
15671 + if (err) {
15672 + netdev_err(netdev, "dev_mc_add err %d\n", err);
15673 + return err;
15674 + }
15675 + }
15676 +
15677 + return 0;
15678 +}
15679 +
15680 +static int ethsw_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
15681 + struct net_device *netdev,
15682 + const unsigned char *addr, u16 vid)
15683 +{
15684 + int err;
15685 +
15686 + if (is_unicast_ether_addr(addr)) {
15687 + err = ethsw_port_fdb_del_uc(netdev, addr);
15688 + if (err) {
15689 + netdev_err(netdev, "ethsw_port_fdb_del_uc err %d\n",
15690 + err);
15691 + return err;
15692 + }
15693 +
15694 + /* also delete if configured on port */
15695 + err = dev_uc_del(netdev, addr);
15696 + if (err && err != -ENOENT) {
15697 + netdev_err(netdev, "dev_uc_del err %d\n", err);
15698 + return err;
15699 + }
15700 + } else {
15701 + if (!_lookup_address(netdev, 0, addr))
15702 + return -ENOENT;
15703 +
15704 + err = dev_mc_del(netdev, addr);
15705 + if (err) {
15706 + netdev_err(netdev, "dev_mc_del err %d\n", err);
15707 + return err;
15708 + }
15709 +
15710 + err = ethsw_port_fdb_del_mc(netdev, addr);
15711 + if (err) {
15712 + netdev_err(netdev, "ethsw_port_fdb_del_mc err %d\n",
15713 + err);
15714 + return err;
15715 + }
15716 + }
15717 +
15718 + return 0;
15719 +}
15720 +
15721 +void ethsw_port_get_stats(struct net_device *netdev,
15722 + struct rtnl_link_stats64 *storage)
15723 +{
15724 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15725 + u64 tmp;
15726 + int err;
15727 +
15728 + err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
15729 + port_priv->ethsw_priv->dpsw_handle,
15730 + port_priv->port_index,
15731 + DPSW_CNT_ING_FRAME, &storage->rx_packets);
15732 + if (err)
15733 + goto error;
15734 +
15735 + err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
15736 + port_priv->ethsw_priv->dpsw_handle,
15737 + port_priv->port_index,
15738 + DPSW_CNT_EGR_FRAME, &storage->tx_packets);
15739 + if (err)
15740 + goto error;
15741 +
15742 + err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
15743 + port_priv->ethsw_priv->dpsw_handle,
15744 + port_priv->port_index,
15745 + DPSW_CNT_ING_BYTE, &storage->rx_bytes);
15746 + if (err)
15747 + goto error;
15748 +
15749 + err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
15750 + port_priv->ethsw_priv->dpsw_handle,
15751 + port_priv->port_index,
15752 + DPSW_CNT_EGR_BYTE, &storage->tx_bytes);
15753 + if (err)
15754 + goto error;
15755 +
15756 + err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
15757 + port_priv->ethsw_priv->dpsw_handle,
15758 + port_priv->port_index,
15759 + DPSW_CNT_ING_FRAME_DISCARD,
15760 + &storage->rx_dropped);
15761 + if (err)
15762 + goto error;
15763 +
15764 + err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
15765 + port_priv->ethsw_priv->dpsw_handle,
15766 + port_priv->port_index,
15767 + DPSW_CNT_ING_FLTR_FRAME,
15768 + &tmp);
15769 + if (err)
15770 + goto error;
15771 + storage->rx_dropped += tmp;
15772 +
15773 + err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
15774 + port_priv->ethsw_priv->dpsw_handle,
15775 + port_priv->port_index,
15776 + DPSW_CNT_EGR_FRAME_DISCARD,
15777 + &storage->tx_dropped);
15778 + if (err)
15779 + goto error;
15780 +
15781 + return;
15782 +
15783 +error:
15784 + netdev_err(netdev, "dpsw_if_get_counter err %d\n", err);
15785 +}
15786 +
15787 +static int ethsw_port_change_mtu(struct net_device *netdev, int mtu)
15788 +{
15789 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15790 + int err;
15791 +
15792 + if (mtu < ETH_ZLEN || mtu > ETHSW_MAX_FRAME_LENGTH) {
15793 + netdev_err(netdev, "Invalid MTU %d. Valid range is: %d..%d\n",
15794 + mtu, ETH_ZLEN, ETHSW_MAX_FRAME_LENGTH);
15795 + return -EINVAL;
15796 + }
15797 +
15798 + err = dpsw_if_set_max_frame_length(port_priv->ethsw_priv->mc_io,
15799 + 0,
15800 + port_priv->ethsw_priv->dpsw_handle,
15801 + port_priv->port_index,
15802 + (u16)ETHSW_L2_MAX_FRM(mtu));
15803 + if (err) {
15804 + netdev_err(netdev,
15805 + "dpsw_if_set_max_frame_length() err %d\n", err);
15806 + return err;
15807 + }
15808 +
15809 + netdev->mtu = mtu;
15810 + return 0;
15811 +}
15812 +
15813 +static const struct net_device_ops ethsw_port_ops = {
15814 + .ndo_open = &ethsw_port_open,
15815 + .ndo_stop = &ethsw_port_stop,
15816 +
15817 + .ndo_fdb_add = &ethsw_port_fdb_add,
15818 + .ndo_fdb_del = &ethsw_port_fdb_del,
15819 + .ndo_fdb_dump = &ndo_dflt_fdb_dump,
15820 +
15821 + .ndo_get_stats64 = &ethsw_port_get_stats,
15822 + .ndo_change_mtu = &ethsw_port_change_mtu,
15823 +
15824 + .ndo_start_xmit = &ethsw_dropframe,
15825 +};
15826 +
15827 +static void ethsw_get_drvinfo(struct net_device *netdev,
15828 + struct ethtool_drvinfo *drvinfo)
15829 +{
15830 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15831 + u16 version_major, version_minor;
15832 + int err;
15833 +
15834 + strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
15835 + strlcpy(drvinfo->version, ethsw_drv_version, sizeof(drvinfo->version));
15836 +
15837 + err = dpsw_get_api_version(port_priv->ethsw_priv->mc_io, 0,
15838 + &version_major,
15839 + &version_minor);
15840 + if (err)
15841 + strlcpy(drvinfo->fw_version, "N/A",
15842 + sizeof(drvinfo->fw_version));
15843 + else
15844 + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
15845 + "%u.%u", version_major, version_minor);
15846 +
15847 + strlcpy(drvinfo->bus_info, dev_name(netdev->dev.parent->parent),
15848 + sizeof(drvinfo->bus_info));
15849 +}
15850 +
15851 +static int ethsw_get_settings(struct net_device *netdev,
15852 + struct ethtool_cmd *cmd)
15853 +{
15854 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15855 + struct dpsw_link_state state = {0};
15856 + int err = 0;
15857 +
15858 + err = dpsw_if_get_link_state(port_priv->ethsw_priv->mc_io, 0,
15859 + port_priv->ethsw_priv->dpsw_handle,
15860 + port_priv->port_index,
15861 + &state);
15862 + if (err) {
15863 + netdev_err(netdev, "ERROR %d getting link state", err);
15864 + goto out;
15865 + }
15866 +
15867 + /* At the moment, we have no way of interrogating the DPMAC
15868 + * from the DPSW side or there may not exist a DPMAC at all.
15869 + * Report only autoneg state, duplexity and speed.
15870 + */
15871 + if (state.options & DPSW_LINK_OPT_AUTONEG)
15872 + cmd->autoneg = AUTONEG_ENABLE;
15873 + if (!(state.options & DPSW_LINK_OPT_HALF_DUPLEX))
15874 + cmd->autoneg = DUPLEX_FULL;
15875 + ethtool_cmd_speed_set(cmd, state.rate);
15876 +
15877 +out:
15878 + return err;
15879 +}
15880 +
15881 +static int ethsw_set_settings(struct net_device *netdev,
15882 + struct ethtool_cmd *cmd)
15883 +{
15884 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15885 + struct dpsw_link_state state = {0};
15886 + struct dpsw_link_cfg cfg = {0};
15887 + int err = 0;
15888 +
15889 + netdev_dbg(netdev, "Setting link parameters...");
15890 +
15891 + err = dpsw_if_get_link_state(port_priv->ethsw_priv->mc_io, 0,
15892 + port_priv->ethsw_priv->dpsw_handle,
15893 + port_priv->port_index,
15894 + &state);
15895 + if (err) {
15896 + netdev_err(netdev, "ERROR %d getting link state", err);
15897 + goto out;
15898 + }
15899 +
15900 + /* Due to a temporary MC limitation, the DPSW port must be down
15901 + * in order to be able to change link settings. Taking steps to let
15902 + * the user know that.
15903 + */
15904 + if (netif_running(netdev)) {
15905 + netdev_info(netdev,
15906 + "Sorry, interface must be brought down first.\n");
15907 + return -EACCES;
15908 + }
15909 +
15910 + cfg.options = state.options;
15911 + cfg.rate = ethtool_cmd_speed(cmd);
15912 + if (cmd->autoneg == AUTONEG_ENABLE)
15913 + cfg.options |= DPSW_LINK_OPT_AUTONEG;
15914 + else
15915 + cfg.options &= ~DPSW_LINK_OPT_AUTONEG;
15916 + if (cmd->duplex == DUPLEX_HALF)
15917 + cfg.options |= DPSW_LINK_OPT_HALF_DUPLEX;
15918 + else
15919 + cfg.options &= ~DPSW_LINK_OPT_HALF_DUPLEX;
15920 +
15921 + err = dpsw_if_set_link_cfg(port_priv->ethsw_priv->mc_io, 0,
15922 + port_priv->ethsw_priv->dpsw_handle,
15923 + port_priv->port_index,
15924 + &cfg);
15925 + if (err)
15926 + /* ethtool will be loud enough if we return an error; no point
15927 + * in putting our own error message on the console by default
15928 + */
15929 + netdev_dbg(netdev, "ERROR %d setting link cfg", err);
15930 +
15931 +out:
15932 + return err;
15933 +}
15934 +
15935 +static struct {
15936 + enum dpsw_counter id;
15937 + char name[ETH_GSTRING_LEN];
15938 +} ethsw_ethtool_counters[] = {
15939 + {DPSW_CNT_ING_FRAME, "rx frames"},
15940 + {DPSW_CNT_ING_BYTE, "rx bytes"},
15941 + {DPSW_CNT_ING_FLTR_FRAME, "rx filtered frames"},
15942 + {DPSW_CNT_ING_FRAME_DISCARD, "rx discarded frames"},
15943 + {DPSW_CNT_ING_BCAST_FRAME, "rx b-cast frames"},
15944 + {DPSW_CNT_ING_BCAST_BYTES, "rx b-cast bytes"},
15945 + {DPSW_CNT_ING_MCAST_FRAME, "rx m-cast frames"},
15946 + {DPSW_CNT_ING_MCAST_BYTE, "rx m-cast bytes"},
15947 + {DPSW_CNT_EGR_FRAME, "tx frames"},
15948 + {DPSW_CNT_EGR_BYTE, "tx bytes"},
15949 + {DPSW_CNT_EGR_FRAME_DISCARD, "tx discarded frames"},
15950 +
15951 +};
15952 +
15953 +static int ethsw_ethtool_get_sset_count(struct net_device *dev, int sset)
15954 +{
15955 + switch (sset) {
15956 + case ETH_SS_STATS:
15957 + return ARRAY_SIZE(ethsw_ethtool_counters);
15958 + default:
15959 + return -EOPNOTSUPP;
15960 + }
15961 +}
15962 +
15963 +static void ethsw_ethtool_get_strings(struct net_device *netdev,
15964 + u32 stringset, u8 *data)
15965 +{
15966 + u32 i;
15967 +
15968 + switch (stringset) {
15969 + case ETH_SS_STATS:
15970 + for (i = 0; i < ARRAY_SIZE(ethsw_ethtool_counters); i++)
15971 + memcpy(data + i * ETH_GSTRING_LEN,
15972 + ethsw_ethtool_counters[i].name, ETH_GSTRING_LEN);
15973 + break;
15974 + }
15975 +}
15976 +
15977 +static void ethsw_ethtool_get_stats(struct net_device *netdev,
15978 + struct ethtool_stats *stats,
15979 + u64 *data)
15980 +{
15981 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15982 + u32 i;
15983 + int err;
15984 +
15985 + for (i = 0; i < ARRAY_SIZE(ethsw_ethtool_counters); i++) {
15986 + err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
15987 + port_priv->ethsw_priv->dpsw_handle,
15988 + port_priv->port_index,
15989 + ethsw_ethtool_counters[i].id,
15990 + &data[i]);
15991 + if (err)
15992 + netdev_err(netdev, "dpsw_if_get_counter[%s] err %d\n",
15993 + ethsw_ethtool_counters[i].name, err);
15994 + }
15995 +}
15996 +
15997 +static const struct ethtool_ops ethsw_port_ethtool_ops = {
15998 + .get_drvinfo = &ethsw_get_drvinfo,
15999 + .get_link = &ethtool_op_get_link,
16000 + .get_settings = &ethsw_get_settings,
16001 + .set_settings = &ethsw_set_settings,
16002 + .get_strings = &ethsw_ethtool_get_strings,
16003 + .get_ethtool_stats = &ethsw_ethtool_get_stats,
16004 + .get_sset_count = &ethsw_ethtool_get_sset_count,
16005 +};
16006 +
16007 +/* -------------------------------------------------------------------------- */
16008 +/* ethsw driver functions */
16009 +
16010 +static int ethsw_links_state_update(struct ethsw_dev_priv *priv)
16011 +{
16012 + struct list_head *pos;
16013 + struct ethsw_port_priv *port_priv;
16014 + int err;
16015 +
16016 + list_for_each(pos, &priv->port_list) {
16017 + port_priv = list_entry(pos, struct ethsw_port_priv,
16018 + list);
16019 +
16020 + err = _ethsw_port_carrier_state_sync(port_priv->netdev);
16021 + if (err)
16022 + netdev_err(port_priv->netdev,
16023 + "_ethsw_port_carrier_state_sync err %d\n",
16024 + err);
16025 + }
16026 +
16027 + return 0;
16028 +}
16029 +
16030 +static irqreturn_t ethsw_irq0_handler(int irq_num, void *arg)
16031 +{
16032 + return IRQ_WAKE_THREAD;
16033 +}
16034 +
16035 +static irqreturn_t _ethsw_irq0_handler_thread(int irq_num, void *arg)
16036 +{
16037 + struct device *dev = (struct device *)arg;
16038 + struct net_device *netdev = dev_get_drvdata(dev);
16039 + struct ethsw_dev_priv *priv = netdev_priv(netdev);
16040 +
16041 + struct fsl_mc_io *io = priv->mc_io;
16042 + u16 token = priv->dpsw_handle;
16043 + int irq_index = DPSW_IRQ_INDEX_IF;
16044 +
16045 + /* Mask the events and the if_id reserved bits to be cleared on read */
16046 + u32 status = DPSW_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000;
16047 + int err;
16048 +
16049 + err = dpsw_get_irq_status(io, 0, token, irq_index, &status);
16050 + if (unlikely(err)) {
16051 + netdev_err(netdev, "Can't get irq status (err %d)", err);
16052 +
16053 + err = dpsw_clear_irq_status(io, 0, token, irq_index,
16054 + 0xFFFFFFFF);
16055 + if (unlikely(err))
16056 + netdev_err(netdev, "Can't clear irq status (err %d)",
16057 + err);
16058 + goto out;
16059 + }
16060 +
16061 + if (status & DPSW_IRQ_EVENT_LINK_CHANGED) {
16062 + err = ethsw_links_state_update(priv);
16063 + if (unlikely(err))
16064 + goto out;
16065 + }
16066 +
16067 +out:
16068 + return IRQ_HANDLED;
16069 +}
16070 +
16071 +static int ethsw_setup_irqs(struct fsl_mc_device *sw_dev)
16072 +{
16073 + struct device *dev = &sw_dev->dev;
16074 + struct net_device *netdev = dev_get_drvdata(dev);
16075 + struct ethsw_dev_priv *priv = netdev_priv(netdev);
16076 + int err = 0;
16077 + struct fsl_mc_device_irq *irq;
16078 + const int irq_index = DPSW_IRQ_INDEX_IF;
16079 + u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED;
16080 +
16081 + err = fsl_mc_allocate_irqs(sw_dev);
16082 + if (unlikely(err)) {
16083 + dev_err(dev, "MC irqs allocation failed\n");
16084 + return err;
16085 + }
16086 +
16087 + if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_MAX_IRQ_NUM)) {
16088 + err = -EINVAL;
16089 + goto free_irq;
16090 + }
16091 +
16092 + err = dpsw_set_irq_enable(priv->mc_io, 0, priv->dpsw_handle,
16093 + irq_index, 0);
16094 + if (unlikely(err)) {
16095 + dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
16096 + goto free_irq;
16097 + }
16098 +
16099 + irq = sw_dev->irqs[irq_index];
16100 +
16101 + err = devm_request_threaded_irq(dev, irq->msi_desc->irq,
16102 + ethsw_irq0_handler,
16103 + _ethsw_irq0_handler_thread,
16104 + IRQF_NO_SUSPEND | IRQF_ONESHOT,
16105 + dev_name(dev), dev);
16106 + if (unlikely(err)) {
16107 + dev_err(dev, "devm_request_threaded_irq(): %d", err);
16108 + goto free_irq;
16109 + }
16110 +
16111 + err = dpsw_set_irq_mask(priv->mc_io, 0, priv->dpsw_handle,
16112 + irq_index, mask);
16113 + if (unlikely(err)) {
16114 + dev_err(dev, "dpsw_set_irq_mask(): %d", err);
16115 + goto free_devm_irq;
16116 + }
16117 +
16118 + err = dpsw_set_irq_enable(priv->mc_io, 0, priv->dpsw_handle,
16119 + irq_index, 1);
16120 + if (unlikely(err)) {
16121 + dev_err(dev, "dpsw_set_irq_enable(): %d", err);
16122 + goto free_devm_irq;
16123 + }
16124 +
16125 + return 0;
16126 +
16127 +free_devm_irq:
16128 + devm_free_irq(dev, irq->msi_desc->irq, dev);
16129 +free_irq:
16130 + fsl_mc_free_irqs(sw_dev);
16131 + return err;
16132 +}
16133 +
16134 +static void ethsw_teardown_irqs(struct fsl_mc_device *sw_dev)
16135 +{
16136 + struct device *dev = &sw_dev->dev;
16137 + struct net_device *netdev = dev_get_drvdata(dev);
16138 + struct ethsw_dev_priv *priv = netdev_priv(netdev);
16139 +
16140 + dpsw_set_irq_enable(priv->mc_io, 0, priv->dpsw_handle,
16141 + DPSW_IRQ_INDEX_IF, 0);
16142 + devm_free_irq(dev,
16143 + sw_dev->irqs[DPSW_IRQ_INDEX_IF]->msi_desc->irq,
16144 + dev);
16145 + fsl_mc_free_irqs(sw_dev);
16146 +}
16147 +
16148 +static int __cold
16149 +ethsw_init(struct fsl_mc_device *sw_dev)
16150 +{
16151 + struct device *dev = &sw_dev->dev;
16152 + struct ethsw_dev_priv *priv;
16153 + struct net_device *netdev;
16154 + int err = 0;
16155 + u16 i;
16156 + u16 version_major, version_minor;
16157 + const struct dpsw_stp_cfg stp_cfg = {
16158 + .vlan_id = 1,
16159 + .state = DPSW_STP_STATE_FORWARDING,
16160 + };
16161 +
16162 + netdev = dev_get_drvdata(dev);
16163 + priv = netdev_priv(netdev);
16164 +
16165 + priv->dev_id = sw_dev->obj_desc.id;
16166 +
16167 + err = dpsw_open(priv->mc_io, 0, priv->dev_id, &priv->dpsw_handle);
16168 + if (err) {
16169 + dev_err(dev, "dpsw_open err %d\n", err);
16170 + goto err_exit;
16171 + }
16172 + if (!priv->dpsw_handle) {
16173 + dev_err(dev, "dpsw_open returned null handle but no error\n");
16174 + err = -EFAULT;
16175 + goto err_exit;
16176 + }
16177 +
16178 + err = dpsw_get_attributes(priv->mc_io, 0, priv->dpsw_handle,
16179 + &priv->sw_attr);
16180 + if (err) {
16181 + dev_err(dev, "dpsw_get_attributes err %d\n", err);
16182 + goto err_close;
16183 + }
16184 +
16185 + err = dpsw_get_api_version(priv->mc_io, 0,
16186 + &version_major,
16187 + &version_minor);
16188 + if (err) {
16189 + dev_err(dev, "dpsw_get_api_version err %d\n", err);
16190 + goto err_close;
16191 + }
16192 +
16193 + /* Minimum supported DPSW version check */
16194 + if (version_major < DPSW_MIN_VER_MAJOR ||
16195 + (version_major == DPSW_MIN_VER_MAJOR &&
16196 + version_minor < DPSW_MIN_VER_MINOR)) {
16197 + dev_err(dev, "DPSW version %d:%d not supported. Use %d.%d or greater.\n",
16198 + version_major,
16199 + version_minor,
16200 + DPSW_MIN_VER_MAJOR, DPSW_MIN_VER_MINOR);
16201 + err = -ENOTSUPP;
16202 + goto err_close;
16203 + }
16204 +
16205 + err = dpsw_reset(priv->mc_io, 0, priv->dpsw_handle);
16206 + if (err) {
16207 + dev_err(dev, "dpsw_reset err %d\n", err);
16208 + goto err_close;
16209 + }
16210 +
16211 + err = dpsw_fdb_set_learning_mode(priv->mc_io, 0, priv->dpsw_handle, 0,
16212 + DPSW_FDB_LEARNING_MODE_HW);
16213 + if (err) {
16214 + dev_err(dev, "dpsw_fdb_set_learning_mode err %d\n", err);
16215 + goto err_close;
16216 + }
16217 +
16218 + for (i = 0; i < priv->sw_attr.num_ifs; i++) {
16219 + err = dpsw_if_set_stp(priv->mc_io, 0, priv->dpsw_handle, i,
16220 + &stp_cfg);
16221 + if (err) {
16222 + dev_err(dev, "dpsw_if_set_stp err %d for port %d\n",
16223 + err, i);
16224 + goto err_close;
16225 + }
16226 +
16227 + err = dpsw_if_set_broadcast(priv->mc_io, 0,
16228 + priv->dpsw_handle, i, 1);
16229 + if (err) {
16230 + dev_err(dev,
16231 + "dpsw_if_set_broadcast err %d for port %d\n",
16232 + err, i);
16233 + goto err_close;
16234 + }
16235 + }
16236 +
16237 + return 0;
16238 +
16239 +err_close:
16240 + dpsw_close(priv->mc_io, 0, priv->dpsw_handle);
16241 +err_exit:
16242 + return err;
16243 +}
16244 +
16245 +static int __cold
16246 +ethsw_takedown(struct fsl_mc_device *sw_dev)
16247 +{
16248 + struct device *dev = &sw_dev->dev;
16249 + struct net_device *netdev;
16250 + struct ethsw_dev_priv *priv;
16251 + int err;
16252 +
16253 + netdev = dev_get_drvdata(dev);
16254 + priv = netdev_priv(netdev);
16255 +
16256 + err = dpsw_close(priv->mc_io, 0, priv->dpsw_handle);
16257 + if (err)
16258 + dev_warn(dev, "dpsw_close err %d\n", err);
16259 +
16260 + return 0;
16261 +}
16262 +
16263 +static int __cold
16264 +ethsw_remove(struct fsl_mc_device *sw_dev)
16265 +{
16266 + struct device *dev;
16267 + struct net_device *netdev;
16268 + struct ethsw_dev_priv *priv;
16269 + struct ethsw_port_priv *port_priv;
16270 + struct list_head *pos;
16271 +
16272 + dev = &sw_dev->dev;
16273 + netdev = dev_get_drvdata(dev);
16274 + priv = netdev_priv(netdev);
16275 +
16276 + list_for_each(pos, &priv->port_list) {
16277 + port_priv = list_entry(pos, struct ethsw_port_priv, list);
16278 +
16279 + rtnl_lock();
16280 + netdev_upper_dev_unlink(port_priv->netdev, netdev);
16281 + rtnl_unlock();
16282 +
16283 + unregister_netdev(port_priv->netdev);
16284 + free_netdev(port_priv->netdev);
16285 + }
16286 +
16287 + ethsw_teardown_irqs(sw_dev);
16288 +
16289 + unregister_netdev(netdev);
16290 +
16291 + ethsw_takedown(sw_dev);
16292 + fsl_mc_portal_free(priv->mc_io);
16293 +
16294 + dev_set_drvdata(dev, NULL);
16295 + free_netdev(netdev);
16296 +
16297 + return 0;
16298 +}
16299 +
16300 +static int __cold
16301 +ethsw_probe(struct fsl_mc_device *sw_dev)
16302 +{
16303 + struct device *dev;
16304 + struct net_device *netdev = NULL;
16305 + struct ethsw_dev_priv *priv = NULL;
16306 + int err = 0;
16307 + u16 i;
16308 + const char def_mcast[ETH_ALEN] = {
16309 + 0x01, 0x00, 0x5e, 0x00, 0x00, 0x01,
16310 + };
16311 + char port_name[IFNAMSIZ];
16312 +
16313 + dev = &sw_dev->dev;
16314 +
16315 + /* register switch device, it's for management only - no I/O */
16316 + netdev = alloc_etherdev(sizeof(*priv));
16317 + if (!netdev) {
16318 + dev_err(dev, "alloc_etherdev error\n");
16319 + return -ENOMEM;
16320 + }
16321 + netdev->netdev_ops = &ethsw_ops;
16322 +
16323 + SET_NETDEV_DEV(netdev, dev);
16324 + dev_set_drvdata(dev, netdev);
16325 +
16326 + priv = netdev_priv(netdev);
16327 + priv->netdev = netdev;
16328 +
16329 + err = fsl_mc_portal_allocate(sw_dev, 0, &priv->mc_io);
16330 + if (err) {
16331 + dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
16332 + goto err_free_netdev;
16333 + }
16334 + if (!priv->mc_io) {
16335 + dev_err(dev, "fsl_mc_portal_allocate returned null handle but no error\n");
16336 + err = -EFAULT;
16337 + goto err_free_netdev;
16338 + }
16339 +
16340 + err = ethsw_init(sw_dev);
16341 + if (err) {
16342 + dev_err(dev, "switch init err %d\n", err);
16343 + goto err_free_cmdport;
16344 + }
16345 +
16346 + netdev->flags = netdev->flags | IFF_PROMISC | IFF_MASTER;
16347 +
16348 + /* TODO: should we hold rtnl_lock here? We can't register_netdev under
16349 + * lock
16350 + */
16351 + dev_alloc_name(netdev, "sw%d");
16352 + err = register_netdev(netdev);
16353 + if (err < 0) {
16354 + dev_err(dev, "register_netdev error %d\n", err);
16355 + goto err_takedown;
16356 + }
16357 + if (err)
16358 + dev_info(dev, "register_netdev res %d\n", err);
16359 +
16360 + /* VLAN 1 is implicitly configured on the switch */
16361 + priv->vlans[1] = ETHSW_VLAN_MEMBER;
16362 + /* Flooding, learning are implicitly enabled */
16363 + priv->learning = true;
16364 + priv->flood = true;
16365 +
16366 + /* register switch ports */
16367 + snprintf(port_name, IFNAMSIZ, "%sp%%d", netdev->name);
16368 +
16369 + INIT_LIST_HEAD(&priv->port_list);
16370 + for (i = 0; i < priv->sw_attr.num_ifs; i++) {
16371 + struct net_device *port_netdev;
16372 + struct ethsw_port_priv *port_priv;
16373 +
16374 + port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv));
16375 + if (!port_netdev) {
16376 + dev_err(dev, "alloc_etherdev error\n");
16377 + goto err_takedown;
16378 + }
16379 +
16380 + port_priv = netdev_priv(port_netdev);
16381 + port_priv->netdev = port_netdev;
16382 + port_priv->ethsw_priv = priv;
16383 +
16384 + port_priv->port_index = i;
16385 + port_priv->stp_state = BR_STATE_FORWARDING;
16386 + /* VLAN 1 is configured by default on all switch ports */
16387 + port_priv->vlans[1] = ETHSW_VLAN_MEMBER | ETHSW_VLAN_UNTAGGED |
16388 + ETHSW_VLAN_PVID;
16389 +
16390 + SET_NETDEV_DEV(port_netdev, dev);
16391 + port_netdev->netdev_ops = &ethsw_port_ops;
16392 + port_netdev->ethtool_ops = &ethsw_port_ethtool_ops;
16393 +
16394 + port_netdev->flags = port_netdev->flags |
16395 + IFF_PROMISC | IFF_SLAVE;
16396 +
16397 + dev_alloc_name(port_netdev, port_name);
16398 + err = register_netdev(port_netdev);
16399 + if (err < 0) {
16400 + dev_err(dev, "register_netdev error %d\n", err);
16401 + free_netdev(port_netdev);
16402 + goto err_takedown;
16403 + }
16404 +
16405 + rtnl_lock();
16406 +
16407 + err = netdev_master_upper_dev_link(port_netdev, netdev,
16408 + NULL, NULL);
16409 + if (err) {
16410 + dev_err(dev, "netdev_master_upper_dev_link error %d\n",
16411 + err);
16412 + unregister_netdev(port_netdev);
16413 + free_netdev(port_netdev);
16414 + rtnl_unlock();
16415 + goto err_takedown;
16416 + }
16417 +
16418 + rtmsg_ifinfo(RTM_NEWLINK, port_netdev, IFF_SLAVE, GFP_KERNEL);
16419 +
16420 + rtnl_unlock();
16421 +
16422 + list_add(&port_priv->list, &priv->port_list);
16423 +
16424 + /* TODO: implmenet set_rm_mode instead of this */
16425 + err = ethsw_port_fdb_add_mc(port_netdev, def_mcast);
16426 + if (err)
16427 + dev_warn(&netdev->dev,
16428 + "ethsw_port_fdb_add_mc err %d\n", err);
16429 + }
16430 +
16431 + /* the switch starts up enabled */
16432 + rtnl_lock();
16433 + err = dev_open(netdev);
16434 + rtnl_unlock();
16435 + if (err)
16436 + dev_warn(dev, "dev_open err %d\n", err);
16437 +
16438 + /* setup irqs */
16439 + err = ethsw_setup_irqs(sw_dev);
16440 + if (unlikely(err)) {
16441 + dev_warn(dev, "ethsw_setup_irqs err %d\n", err);
16442 + goto err_takedown;
16443 + }
16444 +
16445 + dev_info(&netdev->dev,
16446 + "probed %d port switch\n", priv->sw_attr.num_ifs);
16447 + return 0;
16448 +
16449 +err_takedown:
16450 + ethsw_remove(sw_dev);
16451 +err_free_cmdport:
16452 + fsl_mc_portal_free(priv->mc_io);
16453 +err_free_netdev:
16454 + dev_set_drvdata(dev, NULL);
16455 + free_netdev(netdev);
16456 +
16457 + return err;
16458 +}
16459 +
16460 +static const struct fsl_mc_device_id ethsw_match_id_table[] = {
16461 + {
16462 + .vendor = FSL_MC_VENDOR_FREESCALE,
16463 + .obj_type = "dpsw",
16464 + },
16465 + {}
16466 +};
16467 +
16468 +static struct fsl_mc_driver eth_sw_drv = {
16469 + .driver = {
16470 + .name = KBUILD_MODNAME,
16471 + .owner = THIS_MODULE,
16472 + },
16473 + .probe = ethsw_probe,
16474 + .remove = ethsw_remove,
16475 + .match_id_table = ethsw_match_id_table,
16476 +};
16477 +
16478 +module_fsl_mc_driver(eth_sw_drv);
16479 +
16480 +MODULE_LICENSE("GPL");
16481 +MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver (prototype)");
16482 --- /dev/null
16483 +++ b/drivers/staging/fsl-dpaa2/evb/Kconfig
16484 @@ -0,0 +1,7 @@
16485 +config FSL_DPAA2_EVB
16486 + tristate "DPAA2 Edge Virtual Bridge"
16487 + depends on FSL_MC_BUS && FSL_DPAA2
16488 + select VLAN_8021Q
16489 + default y
16490 + ---help---
16491 + Prototype driver for DPAA2 Edge Virtual Bridge.
16492 --- /dev/null
16493 +++ b/drivers/staging/fsl-dpaa2/evb/Makefile
16494 @@ -0,0 +1,10 @@
16495 +
16496 +obj-$(CONFIG_FSL_DPAA2_EVB) += dpaa2-evb.o
16497 +
16498 +dpaa2-evb-objs := evb.o dpdmux.o
16499 +
16500 +all:
16501 + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
16502 +
16503 +clean:
16504 + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
16505 --- /dev/null
16506 +++ b/drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h
16507 @@ -0,0 +1,279 @@
16508 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
16509 + *
16510 + * Redistribution and use in source and binary forms, with or without
16511 + * modification, are permitted provided that the following conditions are met:
16512 + * * Redistributions of source code must retain the above copyright
16513 + * notice, this list of conditions and the following disclaimer.
16514 + * * Redistributions in binary form must reproduce the above copyright
16515 + * notice, this list of conditions and the following disclaimer in the
16516 + * documentation and/or other materials provided with the distribution.
16517 + * * Neither the name of the above-listed copyright holders nor the
16518 + * names of any contributors may be used to endorse or promote products
16519 + * derived from this software without specific prior written permission.
16520 + *
16521 + *
16522 + * ALTERNATIVELY, this software may be distributed under the terms of the
16523 + * GNU General Public License ("GPL") as published by the Free Software
16524 + * Foundation, either version 2 of that License or (at your option) any
16525 + * later version.
16526 + *
16527 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16528 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16529 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16530 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
16531 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
16532 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
16533 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
16534 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
16535 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
16536 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
16537 + * POSSIBILITY OF SUCH DAMAGE.
16538 + */
16539 +#ifndef _FSL_DPDMUX_CMD_H
16540 +#define _FSL_DPDMUX_CMD_H
16541 +
16542 +/* DPDMUX Version */
16543 +#define DPDMUX_VER_MAJOR 6
16544 +#define DPDMUX_VER_MINOR 1
16545 +
16546 +#define DPDMUX_CMD_BASE_VER 1
16547 +#define DPDMUX_CMD_ID_OFFSET 4
16548 +
16549 +#define DPDMUX_CMD(id) (((id) << DPDMUX_CMD_ID_OFFSET) | DPDMUX_CMD_BASE_VER)
16550 +
16551 +/* Command IDs */
16552 +#define DPDMUX_CMDID_CLOSE DPDMUX_CMD(0x800)
16553 +#define DPDMUX_CMDID_OPEN DPDMUX_CMD(0x806)
16554 +#define DPDMUX_CMDID_CREATE DPDMUX_CMD(0x906)
16555 +#define DPDMUX_CMDID_DESTROY DPDMUX_CMD(0x986)
16556 +#define DPDMUX_CMDID_GET_API_VERSION DPDMUX_CMD(0xa06)
16557 +
16558 +#define DPDMUX_CMDID_ENABLE DPDMUX_CMD(0x002)
16559 +#define DPDMUX_CMDID_DISABLE DPDMUX_CMD(0x003)
16560 +#define DPDMUX_CMDID_GET_ATTR DPDMUX_CMD(0x004)
16561 +#define DPDMUX_CMDID_RESET DPDMUX_CMD(0x005)
16562 +#define DPDMUX_CMDID_IS_ENABLED DPDMUX_CMD(0x006)
16563 +
16564 +#define DPDMUX_CMDID_SET_IRQ_ENABLE DPDMUX_CMD(0x012)
16565 +#define DPDMUX_CMDID_GET_IRQ_ENABLE DPDMUX_CMD(0x013)
16566 +#define DPDMUX_CMDID_SET_IRQ_MASK DPDMUX_CMD(0x014)
16567 +#define DPDMUX_CMDID_GET_IRQ_MASK DPDMUX_CMD(0x015)
16568 +#define DPDMUX_CMDID_GET_IRQ_STATUS DPDMUX_CMD(0x016)
16569 +#define DPDMUX_CMDID_CLEAR_IRQ_STATUS DPDMUX_CMD(0x017)
16570 +
16571 +#define DPDMUX_CMDID_SET_MAX_FRAME_LENGTH DPDMUX_CMD(0x0a1)
16572 +
16573 +#define DPDMUX_CMDID_UL_RESET_COUNTERS DPDMUX_CMD(0x0a3)
16574 +
16575 +#define DPDMUX_CMDID_IF_SET_ACCEPTED_FRAMES DPDMUX_CMD(0x0a7)
16576 +#define DPDMUX_CMDID_IF_GET_ATTR DPDMUX_CMD(0x0a8)
16577 +#define DPDMUX_CMDID_IF_ENABLE DPDMUX_CMD(0x0a9)
16578 +#define DPDMUX_CMDID_IF_DISABLE DPDMUX_CMD(0x0aa)
16579 +
16580 +#define DPDMUX_CMDID_IF_ADD_L2_RULE DPDMUX_CMD(0x0b0)
16581 +#define DPDMUX_CMDID_IF_REMOVE_L2_RULE DPDMUX_CMD(0x0b1)
16582 +#define DPDMUX_CMDID_IF_GET_COUNTER DPDMUX_CMD(0x0b2)
16583 +#define DPDMUX_CMDID_IF_SET_LINK_CFG DPDMUX_CMD(0x0b3)
16584 +#define DPDMUX_CMDID_IF_GET_LINK_STATE DPDMUX_CMD(0x0b4)
16585 +
16586 +#define DPDMUX_CMDID_SET_CUSTOM_KEY DPDMUX_CMD(0x0b5)
16587 +#define DPDMUX_CMDID_ADD_CUSTOM_CLS_ENTRY DPDMUX_CMD(0x0b6)
16588 +#define DPDMUX_CMDID_REMOVE_CUSTOM_CLS_ENTRY DPDMUX_CMD(0x0b7)
16589 +
16590 +#define DPDMUX_MASK(field) \
16591 + GENMASK(DPDMUX_##field##_SHIFT + DPDMUX_##field##_SIZE - 1, \
16592 + DPDMUX_##field##_SHIFT)
16593 +#define dpdmux_set_field(var, field, val) \
16594 + ((var) |= (((val) << DPDMUX_##field##_SHIFT) & DPDMUX_MASK(field)))
16595 +#define dpdmux_get_field(var, field) \
16596 + (((var) & DPDMUX_MASK(field)) >> DPDMUX_##field##_SHIFT)
16597 +
16598 +struct dpdmux_cmd_open {
16599 + u32 dpdmux_id;
16600 +};
16601 +
16602 +struct dpdmux_cmd_create {
16603 + u8 method;
16604 + u8 manip;
16605 + u16 num_ifs;
16606 + u32 pad;
16607 +
16608 + u16 adv_max_dmat_entries;
16609 + u16 adv_max_mc_groups;
16610 + u16 adv_max_vlan_ids;
16611 + u16 pad1;
16612 +
16613 + u64 options;
16614 +};
16615 +
16616 +struct dpdmux_cmd_destroy {
16617 + u32 dpdmux_id;
16618 +};
16619 +
16620 +#define DPDMUX_ENABLE_SHIFT 0
16621 +#define DPDMUX_ENABLE_SIZE 1
16622 +
16623 +struct dpdmux_rsp_is_enabled {
16624 + u8 en;
16625 +};
16626 +
16627 +struct dpdmux_cmd_set_irq_enable {
16628 + u8 enable;
16629 + u8 pad[3];
16630 + u8 irq_index;
16631 +};
16632 +
16633 +struct dpdmux_cmd_get_irq_enable {
16634 + u32 pad;
16635 + u8 irq_index;
16636 +};
16637 +
16638 +struct dpdmux_rsp_get_irq_enable {
16639 + u8 enable;
16640 +};
16641 +
16642 +struct dpdmux_cmd_set_irq_mask {
16643 + u32 mask;
16644 + u8 irq_index;
16645 +};
16646 +
16647 +struct dpdmux_cmd_get_irq_mask {
16648 + u32 pad;
16649 + u8 irq_index;
16650 +};
16651 +
16652 +struct dpdmux_rsp_get_irq_mask {
16653 + u32 mask;
16654 +};
16655 +
16656 +struct dpdmux_cmd_get_irq_status {
16657 + u32 status;
16658 + u8 irq_index;
16659 +};
16660 +
16661 +struct dpdmux_rsp_get_irq_status {
16662 + u32 status;
16663 +};
16664 +
16665 +struct dpdmux_cmd_clear_irq_status {
16666 + u32 status;
16667 + u8 irq_index;
16668 +};
16669 +
16670 +struct dpdmux_rsp_get_attr {
16671 + u8 method;
16672 + u8 manip;
16673 + u16 num_ifs;
16674 + u16 mem_size;
16675 + u16 pad;
16676 +
16677 + u64 pad1;
16678 +
16679 + u32 id;
16680 + u32 pad2;
16681 +
16682 + u64 options;
16683 +};
16684 +
16685 +struct dpdmux_cmd_set_max_frame_length {
16686 + u16 max_frame_length;
16687 +};
16688 +
16689 +#define DPDMUX_ACCEPTED_FRAMES_TYPE_SHIFT 0
16690 +#define DPDMUX_ACCEPTED_FRAMES_TYPE_SIZE 4
16691 +#define DPDMUX_UNACCEPTED_FRAMES_ACTION_SHIFT 4
16692 +#define DPDMUX_UNACCEPTED_FRAMES_ACTION_SIZE 4
16693 +
16694 +struct dpdmux_cmd_if_set_accepted_frames {
16695 + u16 if_id;
16696 + u8 frames_options;
16697 +};
16698 +
16699 +struct dpdmux_cmd_if {
16700 + u16 if_id;
16701 +};
16702 +
16703 +struct dpdmux_rsp_if_get_attr {
16704 + u8 pad[3];
16705 + u8 enabled;
16706 + u8 pad1[3];
16707 + u8 accepted_frames_type;
16708 + u32 rate;
16709 +};
16710 +
16711 +struct dpdmux_cmd_if_l2_rule {
16712 + u16 if_id;
16713 + u8 mac_addr5;
16714 + u8 mac_addr4;
16715 + u8 mac_addr3;
16716 + u8 mac_addr2;
16717 + u8 mac_addr1;
16718 + u8 mac_addr0;
16719 +
16720 + u32 pad;
16721 + u16 vlan_id;
16722 +};
16723 +
16724 +struct dpdmux_cmd_if_get_counter {
16725 + u16 if_id;
16726 + u8 counter_type;
16727 +};
16728 +
16729 +struct dpdmux_rsp_if_get_counter {
16730 + u64 pad;
16731 + u64 counter;
16732 +};
16733 +
16734 +struct dpdmux_cmd_if_set_link_cfg {
16735 + u16 if_id;
16736 + u16 pad[3];
16737 +
16738 + u32 rate;
16739 + u32 pad1;
16740 +
16741 + u64 options;
16742 +};
16743 +
16744 +struct dpdmux_cmd_if_get_link_state {
16745 + u16 if_id;
16746 +};
16747 +
16748 +struct dpdmux_rsp_if_get_link_state {
16749 + u32 pad;
16750 + u8 up;
16751 + u8 pad1[3];
16752 +
16753 + u32 rate;
16754 + u32 pad2;
16755 +
16756 + u64 options;
16757 +};
16758 +
16759 +struct dpdmux_rsp_get_api_version {
16760 + u16 major;
16761 + u16 minor;
16762 +};
16763 +
16764 +struct dpdmux_set_custom_key {
16765 + u64 pad[6];
16766 + u64 key_cfg_iova;
16767 +};
16768 +
16769 +struct dpdmux_cmd_add_custom_cls_entry {
16770 + u8 pad[3];
16771 + u8 key_size;
16772 + u16 pad1;
16773 + u16 dest_if;
16774 + u64 key_iova;
16775 + u64 mask_iova;
16776 +};
16777 +
16778 +struct dpdmux_cmd_remove_custom_cls_entry {
16779 + u8 pad[3];
16780 + u8 key_size;
16781 + u32 pad1;
16782 + u64 key_iova;
16783 + u64 mask_iova;
16784 +};
16785 +
16786 +#endif /* _FSL_DPDMUX_CMD_H */
16787 --- /dev/null
16788 +++ b/drivers/staging/fsl-dpaa2/evb/dpdmux.c
16789 @@ -0,0 +1,1112 @@
16790 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
16791 + *
16792 + * Redistribution and use in source and binary forms, with or without
16793 + * modification, are permitted provided that the following conditions are met:
16794 + * * Redistributions of source code must retain the above copyright
16795 + * notice, this list of conditions and the following disclaimer.
16796 + * * Redistributions in binary form must reproduce the above copyright
16797 + * notice, this list of conditions and the following disclaimer in the
16798 + * documentation and/or other materials provided with the distribution.
16799 + * * Neither the name of the above-listed copyright holders nor the
16800 + * names of any contributors may be used to endorse or promote products
16801 + * derived from this software without specific prior written permission.
16802 + *
16803 + *
16804 + * ALTERNATIVELY, this software may be distributed under the terms of the
16805 + * GNU General Public License ("GPL") as published by the Free Software
16806 + * Foundation, either version 2 of that License or (at your option) any
16807 + * later version.
16808 + *
16809 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16810 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16811 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16812 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
16813 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
16814 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
16815 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
16816 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
16817 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
16818 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
16819 + * POSSIBILITY OF SUCH DAMAGE.
16820 + */
16821 +#include "../../fsl-mc/include/mc-sys.h"
16822 +#include "../../fsl-mc/include/mc-cmd.h"
16823 +#include "dpdmux.h"
16824 +#include "dpdmux-cmd.h"
16825 +
16826 +/**
16827 + * dpdmux_open() - Open a control session for the specified object
16828 + * @mc_io: Pointer to MC portal's I/O object
16829 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
16830 + * @dpdmux_id: DPDMUX unique ID
16831 + * @token: Returned token; use in subsequent API calls
16832 + *
16833 + * This function can be used to open a control session for an
16834 + * already created object; an object may have been declared in
16835 + * the DPL or by calling the dpdmux_create() function.
16836 + * This function returns a unique authentication token,
16837 + * associated with the specific object ID and the specific MC
16838 + * portal; this token must be used in all subsequent commands for
16839 + * this specific object.
16840 + *
16841 + * Return: '0' on Success; Error code otherwise.
16842 + */
16843 +int dpdmux_open(struct fsl_mc_io *mc_io,
16844 + u32 cmd_flags,
16845 + int dpdmux_id,
16846 + u16 *token)
16847 +{
16848 + struct mc_command cmd = { 0 };
16849 + struct dpdmux_cmd_open *cmd_params;
16850 + int err;
16851 +
16852 + /* prepare command */
16853 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_OPEN,
16854 + cmd_flags,
16855 + 0);
16856 + cmd_params = (struct dpdmux_cmd_open *)cmd.params;
16857 + cmd_params->dpdmux_id = cpu_to_le32(dpdmux_id);
16858 +
16859 + /* send command to mc*/
16860 + err = mc_send_command(mc_io, &cmd);
16861 + if (err)
16862 + return err;
16863 +
16864 + /* retrieve response parameters */
16865 + *token = mc_cmd_hdr_read_token(&cmd);
16866 +
16867 + return 0;
16868 +}
16869 +
16870 +/**
16871 + * dpdmux_close() - Close the control session of the object
16872 + * @mc_io: Pointer to MC portal's I/O object
16873 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
16874 + * @token: Token of DPDMUX object
16875 + *
16876 + * After this function is called, no further operations are
16877 + * allowed on the object without opening a new control session.
16878 + *
16879 + * Return: '0' on Success; Error code otherwise.
16880 + */
16881 +int dpdmux_close(struct fsl_mc_io *mc_io,
16882 + u32 cmd_flags,
16883 + u16 token)
16884 +{
16885 + struct mc_command cmd = { 0 };
16886 +
16887 + /* prepare command */
16888 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CLOSE,
16889 + cmd_flags,
16890 + token);
16891 +
16892 + /* send command to mc*/
16893 + return mc_send_command(mc_io, &cmd);
16894 +}
16895 +
16896 +/**
16897 + * dpdmux_create() - Create the DPDMUX object
16898 + * @mc_io: Pointer to MC portal's I/O object
16899 + * @dprc_token: Parent container token; '0' for default container
16900 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
16901 + * @cfg: Configuration structure
16902 + * @obj_id: returned object id
16903 + *
16904 + * Create the DPDMUX object, allocate required resources and
16905 + * perform required initialization.
16906 + *
16907 + * The object can be created either by declaring it in the
16908 + * DPL file, or by calling this function.
16909 + *
16910 + * The function accepts an authentication token of a parent
16911 + * container that this object should be assigned to. The token
16912 + * can be '0' so the object will be assigned to the default container.
16913 + * The newly created object can be opened with the returned
16914 + * object id and using the container's associated tokens and MC portals.
16915 + *
16916 + * Return: '0' on Success; Error code otherwise.
16917 + */
16918 +int dpdmux_create(struct fsl_mc_io *mc_io,
16919 + u16 dprc_token,
16920 + u32 cmd_flags,
16921 + const struct dpdmux_cfg *cfg,
16922 + u32 *obj_id)
16923 +{
16924 + struct mc_command cmd = { 0 };
16925 + struct dpdmux_cmd_create *cmd_params;
16926 + int err;
16927 +
16928 + /* prepare command */
16929 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CREATE,
16930 + cmd_flags,
16931 + dprc_token);
16932 + cmd_params = (struct dpdmux_cmd_create *)cmd.params;
16933 + cmd_params->method = cfg->method;
16934 + cmd_params->manip = cfg->manip;
16935 + cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
16936 + cmd_params->adv_max_dmat_entries =
16937 + cpu_to_le16(cfg->adv.max_dmat_entries);
16938 + cmd_params->adv_max_mc_groups = cpu_to_le16(cfg->adv.max_mc_groups);
16939 + cmd_params->adv_max_vlan_ids = cpu_to_le16(cfg->adv.max_vlan_ids);
16940 + cmd_params->options = cpu_to_le64(cfg->adv.options);
16941 +
16942 + /* send command to mc*/
16943 + err = mc_send_command(mc_io, &cmd);
16944 + if (err)
16945 + return err;
16946 +
16947 + /* retrieve response parameters */
16948 + *obj_id = mc_cmd_hdr_read_token(&cmd);
16949 +
16950 + return 0;
16951 +}
16952 +
16953 +/**
16954 + * dpdmux_destroy() - Destroy the DPDMUX object and release all its resources.
16955 + * @mc_io: Pointer to MC portal's I/O object
16956 + * @dprc_token: Parent container token; '0' for default container
16957 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
16958 + * @object_id: The object id; it must be a valid id within the container that
16959 + * created this object;
16960 + *
16961 + * The function accepts the authentication token of the parent container that
16962 + * created the object (not the one that currently owns the object). The object
16963 + * is searched within parent using the provided 'object_id'.
16964 + * All tokens to the object must be closed before calling destroy.
16965 + *
16966 + * Return: '0' on Success; error code otherwise.
16967 + */
16968 +int dpdmux_destroy(struct fsl_mc_io *mc_io,
16969 + u16 dprc_token,
16970 + u32 cmd_flags,
16971 + u32 object_id)
16972 +{
16973 + struct mc_command cmd = { 0 };
16974 + struct dpdmux_cmd_destroy *cmd_params;
16975 +
16976 + /* prepare command */
16977 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DESTROY,
16978 + cmd_flags,
16979 + dprc_token);
16980 + cmd_params = (struct dpdmux_cmd_destroy *)cmd.params;
16981 + cmd_params->dpdmux_id = cpu_to_le32(object_id);
16982 +
16983 + /* send command to mc*/
16984 + return mc_send_command(mc_io, &cmd);
16985 +}
16986 +
16987 +/**
16988 + * dpdmux_enable() - Enable DPDMUX functionality
16989 + * @mc_io: Pointer to MC portal's I/O object
16990 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
16991 + * @token: Token of DPDMUX object
16992 + *
16993 + * Return: '0' on Success; Error code otherwise.
16994 + */
16995 +int dpdmux_enable(struct fsl_mc_io *mc_io,
16996 + u32 cmd_flags,
16997 + u16 token)
16998 +{
16999 + struct mc_command cmd = { 0 };
17000 +
17001 + /* prepare command */
17002 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_ENABLE,
17003 + cmd_flags,
17004 + token);
17005 +
17006 + /* send command to mc*/
17007 + return mc_send_command(mc_io, &cmd);
17008 +}
17009 +
17010 +/**
17011 + * dpdmux_disable() - Disable DPDMUX functionality
17012 + * @mc_io: Pointer to MC portal's I/O object
17013 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17014 + * @token: Token of DPDMUX object
17015 + *
17016 + * Return: '0' on Success; Error code otherwise.
17017 + */
17018 +int dpdmux_disable(struct fsl_mc_io *mc_io,
17019 + u32 cmd_flags,
17020 + u16 token)
17021 +{
17022 + struct mc_command cmd = { 0 };
17023 +
17024 + /* prepare command */
17025 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DISABLE,
17026 + cmd_flags,
17027 + token);
17028 +
17029 + /* send command to mc*/
17030 + return mc_send_command(mc_io, &cmd);
17031 +}
17032 +
17033 +/**
17034 + * dpdmux_is_enabled() - Check if the DPDMUX is enabled.
17035 + * @mc_io: Pointer to MC portal's I/O object
17036 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17037 + * @token: Token of DPDMUX object
17038 + * @en: Returns '1' if object is enabled; '0' otherwise
17039 + *
17040 + * Return: '0' on Success; Error code otherwise.
17041 + */
17042 +int dpdmux_is_enabled(struct fsl_mc_io *mc_io,
17043 + u32 cmd_flags,
17044 + u16 token,
17045 + int *en)
17046 +{
17047 + struct mc_command cmd = { 0 };
17048 + struct dpdmux_rsp_is_enabled *rsp_params;
17049 + int err;
17050 +
17051 + /* prepare command */
17052 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IS_ENABLED,
17053 + cmd_flags,
17054 + token);
17055 +
17056 + /* send command to mc*/
17057 + err = mc_send_command(mc_io, &cmd);
17058 + if (err)
17059 + return err;
17060 +
17061 + /* retrieve response parameters */
17062 + rsp_params = (struct dpdmux_rsp_is_enabled *)cmd.params;
17063 + *en = dpdmux_get_field(rsp_params->en, ENABLE);
17064 +
17065 + return 0;
17066 +}
17067 +
17068 +/**
17069 + * dpdmux_reset() - Reset the DPDMUX, returns the object to initial state.
17070 + * @mc_io: Pointer to MC portal's I/O object
17071 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17072 + * @token: Token of DPDMUX object
17073 + *
17074 + * Return: '0' on Success; Error code otherwise.
17075 + */
17076 +int dpdmux_reset(struct fsl_mc_io *mc_io,
17077 + u32 cmd_flags,
17078 + u16 token)
17079 +{
17080 + struct mc_command cmd = { 0 };
17081 +
17082 + /* prepare command */
17083 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_RESET,
17084 + cmd_flags,
17085 + token);
17086 +
17087 + /* send command to mc*/
17088 + return mc_send_command(mc_io, &cmd);
17089 +}
17090 +
17091 +/**
17092 + * dpdmux_set_irq_enable() - Set overall interrupt state.
17093 + * @mc_io: Pointer to MC portal's I/O object
17094 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17095 + * @token: Token of DPDMUX object
17096 + * @irq_index: The interrupt index to configure
17097 + * @en: Interrupt state - enable = 1, disable = 0
17098 + *
17099 + * Allows GPP software to control when interrupts are generated.
17100 + * Each interrupt can have up to 32 causes. The enable/disable control's the
17101 + * overall interrupt state. if the interrupt is disabled no causes will cause
17102 + * an interrupt.
17103 + *
17104 + * Return: '0' on Success; Error code otherwise.
17105 + */
17106 +int dpdmux_set_irq_enable(struct fsl_mc_io *mc_io,
17107 + u32 cmd_flags,
17108 + u16 token,
17109 + u8 irq_index,
17110 + u8 en)
17111 +{
17112 + struct mc_command cmd = { 0 };
17113 + struct dpdmux_cmd_set_irq_enable *cmd_params;
17114 +
17115 + /* prepare command */
17116 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_IRQ_ENABLE,
17117 + cmd_flags,
17118 + token);
17119 + cmd_params = (struct dpdmux_cmd_set_irq_enable *)cmd.params;
17120 + cmd_params->enable = en;
17121 + cmd_params->irq_index = irq_index;
17122 +
17123 + /* send command to mc*/
17124 + return mc_send_command(mc_io, &cmd);
17125 +}
17126 +
17127 +/**
17128 + * dpdmux_get_irq_enable() - Get overall interrupt state.
17129 + * @mc_io: Pointer to MC portal's I/O object
17130 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17131 + * @token: Token of DPDMUX object
17132 + * @irq_index: The interrupt index to configure
17133 + * @en: Returned interrupt state - enable = 1, disable = 0
17134 + *
17135 + * Return: '0' on Success; Error code otherwise.
17136 + */
17137 +int dpdmux_get_irq_enable(struct fsl_mc_io *mc_io,
17138 + u32 cmd_flags,
17139 + u16 token,
17140 + u8 irq_index,
17141 + u8 *en)
17142 +{
17143 + struct mc_command cmd = { 0 };
17144 + struct dpdmux_cmd_get_irq_enable *cmd_params;
17145 + struct dpdmux_rsp_get_irq_enable *rsp_params;
17146 + int err;
17147 +
17148 + /* prepare command */
17149 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_ENABLE,
17150 + cmd_flags,
17151 + token);
17152 + cmd_params = (struct dpdmux_cmd_get_irq_enable *)cmd.params;
17153 + cmd_params->irq_index = irq_index;
17154 +
17155 + /* send command to mc*/
17156 + err = mc_send_command(mc_io, &cmd);
17157 + if (err)
17158 + return err;
17159 +
17160 + /* retrieve response parameters */
17161 + rsp_params = (struct dpdmux_rsp_get_irq_enable *)cmd.params;
17162 + *en = rsp_params->enable;
17163 +
17164 + return 0;
17165 +}
17166 +
17167 +/**
17168 + * dpdmux_set_irq_mask() - Set interrupt mask.
17169 + * @mc_io: Pointer to MC portal's I/O object
17170 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17171 + * @token: Token of DPDMUX object
17172 + * @irq_index: The interrupt index to configure
17173 + * @mask: event mask to trigger interrupt;
17174 + * each bit:
17175 + * 0 = ignore event
17176 + * 1 = consider event for asserting IRQ
17177 + *
17178 + * Every interrupt can have up to 32 causes and the interrupt model supports
17179 + * masking/unmasking each cause independently
17180 + *
17181 + * Return: '0' on Success; Error code otherwise.
17182 + */
17183 +int dpdmux_set_irq_mask(struct fsl_mc_io *mc_io,
17184 + u32 cmd_flags,
17185 + u16 token,
17186 + u8 irq_index,
17187 + u32 mask)
17188 +{
17189 + struct mc_command cmd = { 0 };
17190 + struct dpdmux_cmd_set_irq_mask *cmd_params;
17191 +
17192 + /* prepare command */
17193 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_IRQ_MASK,
17194 + cmd_flags,
17195 + token);
17196 + cmd_params = (struct dpdmux_cmd_set_irq_mask *)cmd.params;
17197 + cmd_params->mask = cpu_to_le32(mask);
17198 + cmd_params->irq_index = irq_index;
17199 +
17200 + /* send command to mc*/
17201 + return mc_send_command(mc_io, &cmd);
17202 +}
17203 +
17204 +/**
17205 + * dpdmux_get_irq_mask() - Get interrupt mask.
17206 + * @mc_io: Pointer to MC portal's I/O object
17207 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17208 + * @token: Token of DPDMUX object
17209 + * @irq_index: The interrupt index to configure
17210 + * @mask: Returned event mask to trigger interrupt
17211 + *
17212 + * Every interrupt can have up to 32 causes and the interrupt model supports
17213 + * masking/unmasking each cause independently
17214 + *
17215 + * Return: '0' on Success; Error code otherwise.
17216 + */
17217 +int dpdmux_get_irq_mask(struct fsl_mc_io *mc_io,
17218 + u32 cmd_flags,
17219 + u16 token,
17220 + u8 irq_index,
17221 + u32 *mask)
17222 +{
17223 + struct mc_command cmd = { 0 };
17224 + struct dpdmux_cmd_get_irq_mask *cmd_params;
17225 + struct dpdmux_rsp_get_irq_mask *rsp_params;
17226 + int err;
17227 +
17228 + /* prepare command */
17229 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_MASK,
17230 + cmd_flags,
17231 + token);
17232 + cmd_params = (struct dpdmux_cmd_get_irq_mask *)cmd.params;
17233 + cmd_params->irq_index = irq_index;
17234 +
17235 + /* send command to mc*/
17236 + err = mc_send_command(mc_io, &cmd);
17237 + if (err)
17238 + return err;
17239 +
17240 + /* retrieve response parameters */
17241 + rsp_params = (struct dpdmux_rsp_get_irq_mask *)cmd.params;
17242 + *mask = le32_to_cpu(rsp_params->mask);
17243 +
17244 + return 0;
17245 +}
17246 +
17247 +/**
17248 + * dpdmux_get_irq_status() - Get the current status of any pending interrupts.
17249 + * @mc_io: Pointer to MC portal's I/O object
17250 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17251 + * @token: Token of DPDMUX object
17252 + * @irq_index: The interrupt index to configure
17253 + * @status: Returned interrupts status - one bit per cause:
17254 + * 0 = no interrupt pending
17255 + * 1 = interrupt pending
17256 + *
17257 + * Return: '0' on Success; Error code otherwise.
17258 + */
17259 +int dpdmux_get_irq_status(struct fsl_mc_io *mc_io,
17260 + u32 cmd_flags,
17261 + u16 token,
17262 + u8 irq_index,
17263 + u32 *status)
17264 +{
17265 + struct mc_command cmd = { 0 };
17266 + struct dpdmux_cmd_get_irq_status *cmd_params;
17267 + struct dpdmux_rsp_get_irq_status *rsp_params;
17268 + int err;
17269 +
17270 + /* prepare command */
17271 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_STATUS,
17272 + cmd_flags,
17273 + token);
17274 + cmd_params = (struct dpdmux_cmd_get_irq_status *)cmd.params;
17275 + cmd_params->status = cpu_to_le32(*status);
17276 + cmd_params->irq_index = irq_index;
17277 +
17278 + /* send command to mc*/
17279 + err = mc_send_command(mc_io, &cmd);
17280 + if (err)
17281 + return err;
17282 +
17283 + /* retrieve response parameters */
17284 + rsp_params = (struct dpdmux_rsp_get_irq_status *)cmd.params;
17285 + *status = le32_to_cpu(rsp_params->status);
17286 +
17287 + return 0;
17288 +}
17289 +
17290 +/**
17291 + * dpdmux_clear_irq_status() - Clear a pending interrupt's status
17292 + * @mc_io: Pointer to MC portal's I/O object
17293 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17294 + * @token: Token of DPDMUX object
17295 + * @irq_index: The interrupt index to configure
17296 + * @status: bits to clear (W1C) - one bit per cause:
17297 + * 0 = don't change
17298 + * 1 = clear status bit
17299 + *
17300 + * Return: '0' on Success; Error code otherwise.
17301 + */
17302 +int dpdmux_clear_irq_status(struct fsl_mc_io *mc_io,
17303 + u32 cmd_flags,
17304 + u16 token,
17305 + u8 irq_index,
17306 + u32 status)
17307 +{
17308 + struct mc_command cmd = { 0 };
17309 + struct dpdmux_cmd_clear_irq_status *cmd_params;
17310 +
17311 + /* prepare command */
17312 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CLEAR_IRQ_STATUS,
17313 + cmd_flags,
17314 + token);
17315 + cmd_params = (struct dpdmux_cmd_clear_irq_status *)cmd.params;
17316 + cmd_params->status = cpu_to_le32(status);
17317 + cmd_params->irq_index = irq_index;
17318 +
17319 + /* send command to mc*/
17320 + return mc_send_command(mc_io, &cmd);
17321 +}
17322 +
17323 +/**
17324 + * dpdmux_get_attributes() - Retrieve DPDMUX attributes
17325 + * @mc_io: Pointer to MC portal's I/O object
17326 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17327 + * @token: Token of DPDMUX object
17328 + * @attr: Returned object's attributes
17329 + *
17330 + * Return: '0' on Success; Error code otherwise.
17331 + */
17332 +int dpdmux_get_attributes(struct fsl_mc_io *mc_io,
17333 + u32 cmd_flags,
17334 + u16 token,
17335 + struct dpdmux_attr *attr)
17336 +{
17337 + struct mc_command cmd = { 0 };
17338 + struct dpdmux_rsp_get_attr *rsp_params;
17339 + int err;
17340 +
17341 + /* prepare command */
17342 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_ATTR,
17343 + cmd_flags,
17344 + token);
17345 +
17346 + /* send command to mc*/
17347 + err = mc_send_command(mc_io, &cmd);
17348 + if (err)
17349 + return err;
17350 +
17351 + /* retrieve response parameters */
17352 + rsp_params = (struct dpdmux_rsp_get_attr *)cmd.params;
17353 + attr->id = le32_to_cpu(rsp_params->id);
17354 + attr->options = le64_to_cpu(rsp_params->options);
17355 + attr->method = rsp_params->method;
17356 + attr->manip = rsp_params->manip;
17357 + attr->num_ifs = le16_to_cpu(rsp_params->num_ifs);
17358 + attr->mem_size = le16_to_cpu(rsp_params->mem_size);
17359 +
17360 + return 0;
17361 +}
17362 +
17363 +/**
17364 + * dpdmux_if_enable() - Enable Interface
17365 + * @mc_io: Pointer to MC portal's I/O object
17366 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17367 + * @token: Token of DPDMUX object
17368 + * @if_id: Interface Identifier
17369 + *
17370 + * Return: Completion status. '0' on Success; Error code otherwise.
17371 + */
17372 +int dpdmux_if_enable(struct fsl_mc_io *mc_io,
17373 + u32 cmd_flags,
17374 + u16 token,
17375 + u16 if_id)
17376 +{
17377 + struct dpdmux_cmd_if *cmd_params;
17378 + struct mc_command cmd = { 0 };
17379 +
17380 + /* prepare command */
17381 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_ENABLE,
17382 + cmd_flags,
17383 + token);
17384 + cmd_params = (struct dpdmux_cmd_if *)cmd.params;
17385 + cmd_params->if_id = cpu_to_le16(if_id);
17386 +
17387 + /* send command to mc*/
17388 + return mc_send_command(mc_io, &cmd);
17389 +}
17390 +
17391 +/**
17392 + * dpdmux_if_disable() - Disable Interface
17393 + * @mc_io: Pointer to MC portal's I/O object
17394 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17395 + * @token: Token of DPDMUX object
17396 + * @if_id: Interface Identifier
17397 + *
17398 + * Return: Completion status. '0' on Success; Error code otherwise.
17399 + */
17400 +int dpdmux_if_disable(struct fsl_mc_io *mc_io,
17401 + u32 cmd_flags,
17402 + u16 token,
17403 + u16 if_id)
17404 +{
17405 + struct dpdmux_cmd_if *cmd_params;
17406 + struct mc_command cmd = { 0 };
17407 +
17408 + /* prepare command */
17409 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_DISABLE,
17410 + cmd_flags,
17411 + token);
17412 + cmd_params = (struct dpdmux_cmd_if *)cmd.params;
17413 + cmd_params->if_id = cpu_to_le16(if_id);
17414 +
17415 + /* send command to mc*/
17416 + return mc_send_command(mc_io, &cmd);
17417 +}
17418 +
17419 +/**
17420 + * dpdmux_set_max_frame_length() - Set the maximum frame length in DPDMUX
17421 + * @mc_io: Pointer to MC portal's I/O object
17422 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17423 + * @token: Token of DPDMUX object
17424 + * @max_frame_length: The required maximum frame length
17425 + *
17426 + * Update the maximum frame length on all DMUX interfaces.
17427 + * In case of VEPA, the maximum frame length on all dmux interfaces
17428 + * will be updated with the minimum value of the mfls of the connected
17429 + * dpnis and the actual value of dmux mfl.
17430 + *
17431 + * Return: '0' on Success; Error code otherwise.
17432 + */
17433 +int dpdmux_set_max_frame_length(struct fsl_mc_io *mc_io,
17434 + u32 cmd_flags,
17435 + u16 token,
17436 + u16 max_frame_length)
17437 +{
17438 + struct mc_command cmd = { 0 };
17439 + struct dpdmux_cmd_set_max_frame_length *cmd_params;
17440 +
17441 + /* prepare command */
17442 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_MAX_FRAME_LENGTH,
17443 + cmd_flags,
17444 + token);
17445 + cmd_params = (struct dpdmux_cmd_set_max_frame_length *)cmd.params;
17446 + cmd_params->max_frame_length = cpu_to_le16(max_frame_length);
17447 +
17448 + /* send command to mc*/
17449 + return mc_send_command(mc_io, &cmd);
17450 +}
17451 +
17452 +/**
17453 + * dpdmux_ul_reset_counters() - Function resets the uplink counter
17454 + * @mc_io: Pointer to MC portal's I/O object
17455 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17456 + * @token: Token of DPDMUX object
17457 + *
17458 + * Return: '0' on Success; Error code otherwise.
17459 + */
17460 +int dpdmux_ul_reset_counters(struct fsl_mc_io *mc_io,
17461 + u32 cmd_flags,
17462 + u16 token)
17463 +{
17464 + struct mc_command cmd = { 0 };
17465 +
17466 + /* prepare command */
17467 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_UL_RESET_COUNTERS,
17468 + cmd_flags,
17469 + token);
17470 +
17471 + /* send command to mc*/
17472 + return mc_send_command(mc_io, &cmd);
17473 +}
17474 +
17475 +/**
17476 + * dpdmux_if_set_accepted_frames() - Set the accepted frame types
17477 + * @mc_io: Pointer to MC portal's I/O object
17478 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17479 + * @token: Token of DPDMUX object
17480 + * @if_id: Interface ID (0 for uplink, or 1-num_ifs);
17481 + * @cfg: Frame types configuration
17482 + *
17483 + * if 'DPDMUX_ADMIT_ONLY_VLAN_TAGGED' is set - untagged frames or
17484 + * priority-tagged frames are discarded.
17485 + * if 'DPDMUX_ADMIT_ONLY_UNTAGGED' is set - untagged frames or
17486 + * priority-tagged frames are accepted.
17487 + * if 'DPDMUX_ADMIT_ALL' is set (default mode) - all VLAN tagged,
17488 + * untagged and priority-tagged frame are accepted;
17489 + *
17490 + * Return: '0' on Success; Error code otherwise.
17491 + */
17492 +int dpdmux_if_set_accepted_frames(struct fsl_mc_io *mc_io,
17493 + u32 cmd_flags,
17494 + u16 token,
17495 + u16 if_id,
17496 + const struct dpdmux_accepted_frames *cfg)
17497 +{
17498 + struct mc_command cmd = { 0 };
17499 + struct dpdmux_cmd_if_set_accepted_frames *cmd_params;
17500 +
17501 + /* prepare command */
17502 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_ACCEPTED_FRAMES,
17503 + cmd_flags,
17504 + token);
17505 + cmd_params = (struct dpdmux_cmd_if_set_accepted_frames *)cmd.params;
17506 + cmd_params->if_id = cpu_to_le16(if_id);
17507 + dpdmux_set_field(cmd_params->frames_options, ACCEPTED_FRAMES_TYPE,
17508 + cfg->type);
17509 + dpdmux_set_field(cmd_params->frames_options, UNACCEPTED_FRAMES_ACTION,
17510 + cfg->unaccept_act);
17511 +
17512 + /* send command to mc*/
17513 + return mc_send_command(mc_io, &cmd);
17514 +}
17515 +
17516 +/**
17517 + * dpdmux_if_get_attributes() - Obtain DPDMUX interface attributes
17518 + * @mc_io: Pointer to MC portal's I/O object
17519 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17520 + * @token: Token of DPDMUX object
17521 + * @if_id: Interface ID (0 for uplink, or 1-num_ifs);
17522 + * @attr: Interface attributes
17523 + *
17524 + * Return: '0' on Success; Error code otherwise.
17525 + */
17526 +int dpdmux_if_get_attributes(struct fsl_mc_io *mc_io,
17527 + u32 cmd_flags,
17528 + u16 token,
17529 + u16 if_id,
17530 + struct dpdmux_if_attr *attr)
17531 +{
17532 + struct mc_command cmd = { 0 };
17533 + struct dpdmux_cmd_if *cmd_params;
17534 + struct dpdmux_rsp_if_get_attr *rsp_params;
17535 + int err;
17536 +
17537 + /* prepare command */
17538 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_ATTR,
17539 + cmd_flags,
17540 + token);
17541 + cmd_params = (struct dpdmux_cmd_if *)cmd.params;
17542 + cmd_params->if_id = cpu_to_le16(if_id);
17543 +
17544 + /* send command to mc*/
17545 + err = mc_send_command(mc_io, &cmd);
17546 + if (err)
17547 + return err;
17548 +
17549 + /* retrieve response parameters */
17550 + rsp_params = (struct dpdmux_rsp_if_get_attr *)cmd.params;
17551 + attr->rate = le32_to_cpu(rsp_params->rate);
17552 + attr->enabled = dpdmux_get_field(rsp_params->enabled, ENABLE);
17553 + attr->accept_frame_type =
17554 + dpdmux_get_field(rsp_params->accepted_frames_type,
17555 + ACCEPTED_FRAMES_TYPE);
17556 +
17557 + return 0;
17558 +}
17559 +
17560 +/**
17561 + * dpdmux_if_remove_l2_rule() - Remove L2 rule from DPDMUX table
17562 + * @mc_io: Pointer to MC portal's I/O object
17563 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17564 + * @token: Token of DPDMUX object
17565 + * @if_id: Destination interface ID
17566 + * @rule: L2 rule
17567 + *
17568 + * Function removes a L2 rule from DPDMUX table
17569 + * or adds an interface to an existing multicast address
17570 + *
17571 + * Return: '0' on Success; Error code otherwise.
17572 + */
17573 +int dpdmux_if_remove_l2_rule(struct fsl_mc_io *mc_io,
17574 + u32 cmd_flags,
17575 + u16 token,
17576 + u16 if_id,
17577 + const struct dpdmux_l2_rule *rule)
17578 +{
17579 + struct mc_command cmd = { 0 };
17580 + struct dpdmux_cmd_if_l2_rule *cmd_params;
17581 +
17582 + /* prepare command */
17583 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_REMOVE_L2_RULE,
17584 + cmd_flags,
17585 + token);
17586 + cmd_params = (struct dpdmux_cmd_if_l2_rule *)cmd.params;
17587 + cmd_params->if_id = cpu_to_le16(if_id);
17588 + cmd_params->vlan_id = cpu_to_le16(rule->vlan_id);
17589 + cmd_params->mac_addr5 = rule->mac_addr[5];
17590 + cmd_params->mac_addr4 = rule->mac_addr[4];
17591 + cmd_params->mac_addr3 = rule->mac_addr[3];
17592 + cmd_params->mac_addr2 = rule->mac_addr[2];
17593 + cmd_params->mac_addr1 = rule->mac_addr[1];
17594 + cmd_params->mac_addr0 = rule->mac_addr[0];
17595 +
17596 + /* send command to mc*/
17597 + return mc_send_command(mc_io, &cmd);
17598 +}
17599 +
17600 +/**
17601 + * dpdmux_if_add_l2_rule() - Add L2 rule into DPDMUX table
17602 + * @mc_io: Pointer to MC portal's I/O object
17603 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17604 + * @token: Token of DPDMUX object
17605 + * @if_id: Destination interface ID
17606 + * @rule: L2 rule
17607 + *
17608 + * Function adds a L2 rule into DPDMUX table
17609 + * or adds an interface to an existing multicast address
17610 + *
17611 + * Return: '0' on Success; Error code otherwise.
17612 + */
17613 +int dpdmux_if_add_l2_rule(struct fsl_mc_io *mc_io,
17614 + u32 cmd_flags,
17615 + u16 token,
17616 + u16 if_id,
17617 + const struct dpdmux_l2_rule *rule)
17618 +{
17619 + struct mc_command cmd = { 0 };
17620 + struct dpdmux_cmd_if_l2_rule *cmd_params;
17621 +
17622 + /* prepare command */
17623 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_ADD_L2_RULE,
17624 + cmd_flags,
17625 + token);
17626 + cmd_params = (struct dpdmux_cmd_if_l2_rule *)cmd.params;
17627 + cmd_params->if_id = cpu_to_le16(if_id);
17628 + cmd_params->vlan_id = cpu_to_le16(rule->vlan_id);
17629 + cmd_params->mac_addr5 = rule->mac_addr[5];
17630 + cmd_params->mac_addr4 = rule->mac_addr[4];
17631 + cmd_params->mac_addr3 = rule->mac_addr[3];
17632 + cmd_params->mac_addr2 = rule->mac_addr[2];
17633 + cmd_params->mac_addr1 = rule->mac_addr[1];
17634 + cmd_params->mac_addr0 = rule->mac_addr[0];
17635 +
17636 + /* send command to mc*/
17637 + return mc_send_command(mc_io, &cmd);
17638 +}
17639 +
17640 +/**
17641 + * dpdmux_if_get_counter() - Functions obtains specific counter of an interface
17642 + * @mc_io: Pointer to MC portal's I/O object
17643 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17644 + * @token: Token of DPDMUX object
17645 + * @if_id: Interface Id
17646 + * @counter_type: counter type
17647 + * @counter: Returned specific counter information
17648 + *
17649 + * Return: '0' on Success; Error code otherwise.
17650 + */
17651 +int dpdmux_if_get_counter(struct fsl_mc_io *mc_io,
17652 + u32 cmd_flags,
17653 + u16 token,
17654 + u16 if_id,
17655 + enum dpdmux_counter_type counter_type,
17656 + u64 *counter)
17657 +{
17658 + struct mc_command cmd = { 0 };
17659 + struct dpdmux_cmd_if_get_counter *cmd_params;
17660 + struct dpdmux_rsp_if_get_counter *rsp_params;
17661 + int err;
17662 +
17663 + /* prepare command */
17664 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_COUNTER,
17665 + cmd_flags,
17666 + token);
17667 + cmd_params = (struct dpdmux_cmd_if_get_counter *)cmd.params;
17668 + cmd_params->if_id = cpu_to_le16(if_id);
17669 + cmd_params->counter_type = counter_type;
17670 +
17671 + /* send command to mc*/
17672 + err = mc_send_command(mc_io, &cmd);
17673 + if (err)
17674 + return err;
17675 +
17676 + /* retrieve response parameters */
17677 + rsp_params = (struct dpdmux_rsp_if_get_counter *)cmd.params;
17678 + *counter = le64_to_cpu(rsp_params->counter);
17679 +
17680 + return 0;
17681 +}
17682 +
17683 +/**
17684 + * dpdmux_if_set_link_cfg() - set the link configuration.
17685 + * @mc_io: Pointer to MC portal's I/O object
17686 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17687 + * @token: Token of DPSW object
17688 + * @if_id: interface id
17689 + * @cfg: Link configuration
17690 + *
17691 + * Return: '0' on Success; Error code otherwise.
17692 + */
17693 +int dpdmux_if_set_link_cfg(struct fsl_mc_io *mc_io,
17694 + u32 cmd_flags,
17695 + u16 token,
17696 + u16 if_id,
17697 + struct dpdmux_link_cfg *cfg)
17698 +{
17699 + struct mc_command cmd = { 0 };
17700 + struct dpdmux_cmd_if_set_link_cfg *cmd_params;
17701 +
17702 + /* prepare command */
17703 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_LINK_CFG,
17704 + cmd_flags,
17705 + token);
17706 + cmd_params = (struct dpdmux_cmd_if_set_link_cfg *)cmd.params;
17707 + cmd_params->if_id = cpu_to_le16(if_id);
17708 + cmd_params->rate = cpu_to_le32(cfg->rate);
17709 + cmd_params->options = cpu_to_le64(cfg->options);
17710 +
17711 + /* send command to mc*/
17712 + return mc_send_command(mc_io, &cmd);
17713 +}
17714 +
17715 +/**
17716 + * dpdmux_if_get_link_state - Return the link state
17717 + * @mc_io: Pointer to MC portal's I/O object
17718 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17719 + * @token: Token of DPSW object
17720 + * @if_id: interface id
17721 + * @state: link state
17722 + *
17723 + * @returns '0' on Success; Error code otherwise.
17724 + */
17725 +int dpdmux_if_get_link_state(struct fsl_mc_io *mc_io,
17726 + u32 cmd_flags,
17727 + u16 token,
17728 + u16 if_id,
17729 + struct dpdmux_link_state *state)
17730 +{
17731 + struct mc_command cmd = { 0 };
17732 + struct dpdmux_cmd_if_get_link_state *cmd_params;
17733 + struct dpdmux_rsp_if_get_link_state *rsp_params;
17734 + int err;
17735 +
17736 + /* prepare command */
17737 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_LINK_STATE,
17738 + cmd_flags,
17739 + token);
17740 + cmd_params = (struct dpdmux_cmd_if_get_link_state *)cmd.params;
17741 + cmd_params->if_id = cpu_to_le16(if_id);
17742 +
17743 + /* send command to mc*/
17744 + err = mc_send_command(mc_io, &cmd);
17745 + if (err)
17746 + return err;
17747 +
17748 + /* retrieve response parameters */
17749 + rsp_params = (struct dpdmux_rsp_if_get_link_state *)cmd.params;
17750 + state->rate = le32_to_cpu(rsp_params->rate);
17751 + state->options = le64_to_cpu(rsp_params->options);
17752 + state->up = dpdmux_get_field(rsp_params->up, ENABLE);
17753 +
17754 + return 0;
17755 +}
17756 +
17757 +/**
17758 + * dpdmux_set_custom_key - Set a custom classification key.
17759 + *
17760 + * This API is only available for DPDMUX instance created with
17761 + * DPDMUX_METHOD_CUSTOM. This API must be called before populating the
17762 + * classification table using dpdmux_add_custom_cls_entry.
17763 + *
17764 + * Calls to dpdmux_set_custom_key remove all existing classification entries
17765 + * that may have been added previously using dpdmux_add_custom_cls_entry.
17766 + *
17767 + * @mc_io: Pointer to MC portal's I/O object
17768 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17769 + * @token: Token of DPSW object
17770 + * @if_id: interface id
17771 + * @key_cfg_iova: DMA address of a configuration structure set up using
17772 + * dpkg_prepare_key_cfg. Maximum key size is 24 bytes.
17773 + *
17774 + * @returns '0' on Success; Error code otherwise.
17775 + */
17776 +int dpdmux_set_custom_key(struct fsl_mc_io *mc_io,
17777 + u32 cmd_flags,
17778 + u16 token,
17779 + u64 key_cfg_iova)
17780 +{
17781 + struct dpdmux_set_custom_key *cmd_params;
17782 + struct mc_command cmd = { 0 };
17783 +
17784 + /* prepare command */
17785 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_CUSTOM_KEY,
17786 + cmd_flags,
17787 + token);
17788 + cmd_params = (struct dpdmux_set_custom_key *)cmd.params;
17789 + cmd_params->key_cfg_iova = cpu_to_le64(key_cfg_iova);
17790 +
17791 + /* send command to mc*/
17792 + return mc_send_command(mc_io, &cmd);
17793 +}
17794 +
17795 +/**
17796 + * dpdmux_add_custom_cls_entry - Adds a custom classification entry.
17797 + *
17798 + * This API is only available for DPDMUX instances created with
17799 + * DPDMUX_METHOD_CUSTOM. Before calling this function a classification key
17800 + * composition rule must be set up using dpdmux_set_custom_key.
17801 + *
17802 + * @mc_io: Pointer to MC portal's I/O object
17803 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17804 + * @token: Token of DPSW object
17805 + * @rule: Classification rule to insert. Rules cannot be duplicated, if a
17806 + * matching rule already exists, the action will be replaced.
17807 + * @action: Action to perform for matching traffic.
17808 + *
17809 + * @returns '0' on Success; Error code otherwise.
17810 + */
17811 +int dpdmux_add_custom_cls_entry(struct fsl_mc_io *mc_io,
17812 + u32 cmd_flags,
17813 + u16 token,
17814 + struct dpdmux_rule_cfg *rule,
17815 + struct dpdmux_cls_action *action)
17816 +{
17817 + struct dpdmux_cmd_add_custom_cls_entry *cmd_params;
17818 + struct mc_command cmd = { 0 };
17819 +
17820 + /* prepare command */
17821 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_ADD_CUSTOM_CLS_ENTRY,
17822 + cmd_flags,
17823 + token);
17824 +
17825 + cmd_params = (struct dpdmux_cmd_add_custom_cls_entry *)cmd.params;
17826 + cmd_params->key_size = rule->key_size;
17827 + cmd_params->dest_if = cpu_to_le16(action->dest_if);
17828 + cmd_params->key_iova = cpu_to_le64(rule->key_iova);
17829 + cmd_params->mask_iova = cpu_to_le64(rule->mask_iova);
17830 +
17831 + /* send command to mc*/
17832 + return mc_send_command(mc_io, &cmd);
17833 +}
17834 +
17835 +/**
17836 + * dpdmux_remove_custom_cls_entry - Removes a custom classification entry.
17837 + *
17838 + * This API is only available for DPDMUX instances created with
17839 + * DPDMUX_METHOD_CUSTOM. The API can be used to remove classification
17840 + * entries previously inserted using dpdmux_add_custom_cls_entry.
17841 + *
17842 + * @mc_io: Pointer to MC portal's I/O object
17843 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17844 + * @token: Token of DPSW object
17845 + * @rule: Classification rule to remove
17846 + *
17847 + * @returns '0' on Success; Error code otherwise.
17848 + */
17849 +int dpdmux_remove_custom_cls_entry(struct fsl_mc_io *mc_io,
17850 + u32 cmd_flags,
17851 + u16 token,
17852 + struct dpdmux_rule_cfg *rule)
17853 +{
17854 + struct dpdmux_cmd_remove_custom_cls_entry *cmd_params;
17855 + struct mc_command cmd = { 0 };
17856 +
17857 + /* prepare command */
17858 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_REMOVE_CUSTOM_CLS_ENTRY,
17859 + cmd_flags,
17860 + token);
17861 + cmd_params = (struct dpdmux_cmd_remove_custom_cls_entry *)cmd.params;
17862 + cmd_params->key_size = rule->key_size;
17863 + cmd_params->key_iova = cpu_to_le64(rule->key_iova);
17864 + cmd_params->mask_iova = cpu_to_le64(rule->mask_iova);
17865 +
17866 + /* send command to mc*/
17867 + return mc_send_command(mc_io, &cmd);
17868 +}
17869 +
17870 +/**
17871 + * dpdmux_get_api_version() - Get Data Path Demux API version
17872 + * @mc_io: Pointer to MC portal's I/O object
17873 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17874 + * @major_ver: Major version of data path demux API
17875 + * @minor_ver: Minor version of data path demux API
17876 + *
17877 + * Return: '0' on Success; Error code otherwise.
17878 + */
17879 +int dpdmux_get_api_version(struct fsl_mc_io *mc_io,
17880 + u32 cmd_flags,
17881 + u16 *major_ver,
17882 + u16 *minor_ver)
17883 +{
17884 + struct mc_command cmd = { 0 };
17885 + struct dpdmux_rsp_get_api_version *rsp_params;
17886 + int err;
17887 +
17888 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_API_VERSION,
17889 + cmd_flags,
17890 + 0);
17891 +
17892 + err = mc_send_command(mc_io, &cmd);
17893 + if (err)
17894 + return err;
17895 +
17896 + rsp_params = (struct dpdmux_rsp_get_api_version *)cmd.params;
17897 + *major_ver = le16_to_cpu(rsp_params->major);
17898 + *minor_ver = le16_to_cpu(rsp_params->minor);
17899 +
17900 + return 0;
17901 +}
17902 --- /dev/null
17903 +++ b/drivers/staging/fsl-dpaa2/evb/dpdmux.h
17904 @@ -0,0 +1,453 @@
17905 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
17906 + *
17907 + * Redistribution and use in source and binary forms, with or without
17908 + * modification, are permitted provided that the following conditions are met:
17909 + * * Redistributions of source code must retain the above copyright
17910 + * notice, this list of conditions and the following disclaimer.
17911 + * * Redistributions in binary form must reproduce the above copyright
17912 + * notice, this list of conditions and the following disclaimer in the
17913 + * documentation and/or other materials provided with the distribution.
17914 + * * Neither the name of the above-listed copyright holders nor the
17915 + * names of any contributors may be used to endorse or promote products
17916 + * derived from this software without specific prior written permission.
17917 + *
17918 + *
17919 + * ALTERNATIVELY, this software may be distributed under the terms of the
17920 + * GNU General Public License ("GPL") as published by the Free Software
17921 + * Foundation, either version 2 of that License or (at your option) any
17922 + * later version.
17923 + *
17924 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17925 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17926 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17927 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
17928 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
17929 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
17930 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
17931 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
17932 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
17933 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
17934 + * POSSIBILITY OF SUCH DAMAGE.
17935 + */
17936 +#ifndef __FSL_DPDMUX_H
17937 +#define __FSL_DPDMUX_H
17938 +
17939 +struct fsl_mc_io;
17940 +
17941 +/* Data Path Demux API
17942 + * Contains API for handling DPDMUX topology and functionality
17943 + */
17944 +
17945 +int dpdmux_open(struct fsl_mc_io *mc_io,
17946 + u32 cmd_flags,
17947 + int dpdmux_id,
17948 + u16 *token);
17949 +
17950 +int dpdmux_close(struct fsl_mc_io *mc_io,
17951 + u32 cmd_flags,
17952 + u16 token);
17953 +
17954 +/**
17955 + * DPDMUX general options
17956 + */
17957 +
17958 +/**
17959 + * Enable bridging between internal interfaces
17960 + */
17961 +#define DPDMUX_OPT_BRIDGE_EN 0x0000000000000002ULL
17962 +
17963 +/**
17964 + * Mask support for classification
17965 + */
17966 +#define DPDMUX_OPT_CLS_MASK_SUPPORT 0x0000000000000020ULL
17967 +
17968 +#define DPDMUX_IRQ_INDEX_IF 0x0000
17969 +#define DPDMUX_IRQ_INDEX 0x0001
17970 +
17971 +/**
17972 + * IRQ event - Indicates that the link state changed
17973 + */
17974 +#define DPDMUX_IRQ_EVENT_LINK_CHANGED 0x0001
17975 +
17976 +/**
17977 + * enum dpdmux_manip - DPDMUX manipulation operations
17978 + * @DPDMUX_MANIP_NONE: No manipulation on frames
17979 + * @DPDMUX_MANIP_ADD_REMOVE_S_VLAN: Add S-VLAN on egress, remove it on ingress
17980 + */
17981 +enum dpdmux_manip {
17982 + DPDMUX_MANIP_NONE = 0x0,
17983 + DPDMUX_MANIP_ADD_REMOVE_S_VLAN = 0x1
17984 +};
17985 +
17986 +/**
17987 + * enum dpdmux_method - DPDMUX method options
17988 + * @DPDMUX_METHOD_NONE: no DPDMUX method
17989 + * @DPDMUX_METHOD_C_VLAN_MAC: DPDMUX based on C-VLAN and MAC address
17990 + * @DPDMUX_METHOD_MAC: DPDMUX based on MAC address
17991 + * @DPDMUX_METHOD_C_VLAN: DPDMUX based on C-VLAN
17992 + * @DPDMUX_METHOD_S_VLAN: DPDMUX based on S-VLAN
17993 + */
17994 +enum dpdmux_method {
17995 + DPDMUX_METHOD_NONE = 0x0,
17996 + DPDMUX_METHOD_C_VLAN_MAC = 0x1,
17997 + DPDMUX_METHOD_MAC = 0x2,
17998 + DPDMUX_METHOD_C_VLAN = 0x3,
17999 + DPDMUX_METHOD_S_VLAN = 0x4,
18000 + DPDMUX_METHOD_CUSTOM = 0x5
18001 +};
18002 +
18003 +/**
18004 + * struct dpdmux_cfg - DPDMUX configuration parameters
18005 + * @method: Defines the operation method for the DPDMUX address table
18006 + * @manip: Required manipulation operation
18007 + * @num_ifs: Number of interfaces (excluding the uplink interface)
18008 + * @adv: Advanced parameters; default is all zeros;
18009 + * use this structure to change default settings
18010 + */
18011 +struct dpdmux_cfg {
18012 + enum dpdmux_method method;
18013 + enum dpdmux_manip manip;
18014 + u16 num_ifs;
18015 + /**
18016 + * struct adv - Advanced parameters
18017 + * @options: DPDMUX options - combination of 'DPDMUX_OPT_<X>' flags
18018 + * @max_dmat_entries: Maximum entries in DPDMUX address table
18019 + * 0 - indicates default: 64 entries per interface.
18020 + * @max_mc_groups: Number of multicast groups in DPDMUX table
18021 + * 0 - indicates default: 32 multicast groups
18022 + * @max_vlan_ids: max vlan ids allowed in the system -
18023 + * relevant only case of working in mac+vlan method.
18024 + * 0 - indicates default 16 vlan ids.
18025 + */
18026 + struct {
18027 + u64 options;
18028 + u16 max_dmat_entries;
18029 + u16 max_mc_groups;
18030 + u16 max_vlan_ids;
18031 + } adv;
18032 +};
18033 +
18034 +int dpdmux_create(struct fsl_mc_io *mc_io,
18035 + u16 dprc_token,
18036 + u32 cmd_flags,
18037 + const struct dpdmux_cfg *cfg,
18038 + u32 *obj_id);
18039 +
18040 +int dpdmux_destroy(struct fsl_mc_io *mc_io,
18041 + u16 dprc_token,
18042 + u32 cmd_flags,
18043 + u32 object_id);
18044 +
18045 +int dpdmux_enable(struct fsl_mc_io *mc_io,
18046 + u32 cmd_flags,
18047 + u16 token);
18048 +
18049 +int dpdmux_disable(struct fsl_mc_io *mc_io,
18050 + u32 cmd_flags,
18051 + u16 token);
18052 +
18053 +int dpdmux_is_enabled(struct fsl_mc_io *mc_io,
18054 + u32 cmd_flags,
18055 + u16 token,
18056 + int *en);
18057 +
18058 +int dpdmux_reset(struct fsl_mc_io *mc_io,
18059 + u32 cmd_flags,
18060 + u16 token);
18061 +
18062 +int dpdmux_set_irq_enable(struct fsl_mc_io *mc_io,
18063 + u32 cmd_flags,
18064 + u16 token,
18065 + u8 irq_index,
18066 + u8 en);
18067 +
18068 +int dpdmux_get_irq_enable(struct fsl_mc_io *mc_io,
18069 + u32 cmd_flags,
18070 + u16 token,
18071 + u8 irq_index,
18072 + u8 *en);
18073 +
18074 +int dpdmux_set_irq_mask(struct fsl_mc_io *mc_io,
18075 + u32 cmd_flags,
18076 + u16 token,
18077 + u8 irq_index,
18078 + u32 mask);
18079 +
18080 +int dpdmux_get_irq_mask(struct fsl_mc_io *mc_io,
18081 + u32 cmd_flags,
18082 + u16 token,
18083 + u8 irq_index,
18084 + u32 *mask);
18085 +
18086 +int dpdmux_get_irq_status(struct fsl_mc_io *mc_io,
18087 + u32 cmd_flags,
18088 + u16 token,
18089 + u8 irq_index,
18090 + u32 *status);
18091 +
18092 +int dpdmux_clear_irq_status(struct fsl_mc_io *mc_io,
18093 + u32 cmd_flags,
18094 + u16 token,
18095 + u8 irq_index,
18096 + u32 status);
18097 +
18098 +/**
18099 + * struct dpdmux_attr - Structure representing DPDMUX attributes
18100 + * @id: DPDMUX object ID
18101 + * @options: Configuration options (bitmap)
18102 + * @method: DPDMUX address table method
18103 + * @manip: DPDMUX manipulation type
18104 + * @num_ifs: Number of interfaces (excluding the uplink interface)
18105 + * @mem_size: DPDMUX frame storage memory size
18106 + */
18107 +struct dpdmux_attr {
18108 + int id;
18109 + u64 options;
18110 + enum dpdmux_method method;
18111 + enum dpdmux_manip manip;
18112 + u16 num_ifs;
18113 + u16 mem_size;
18114 +};
18115 +
18116 +int dpdmux_get_attributes(struct fsl_mc_io *mc_io,
18117 + u32 cmd_flags,
18118 + u16 token,
18119 + struct dpdmux_attr *attr);
18120 +
18121 +int dpdmux_set_max_frame_length(struct fsl_mc_io *mc_io,
18122 + u32 cmd_flags,
18123 + u16 token,
18124 + u16 max_frame_length);
18125 +
18126 +/**
18127 + * enum dpdmux_counter_type - Counter types
18128 + * @DPDMUX_CNT_ING_FRAME: Counts ingress frames
18129 + * @DPDMUX_CNT_ING_BYTE: Counts ingress bytes
18130 + * @DPDMUX_CNT_ING_FLTR_FRAME: Counts filtered ingress frames
18131 + * @DPDMUX_CNT_ING_FRAME_DISCARD: Counts discarded ingress frames
18132 + * @DPDMUX_CNT_ING_MCAST_FRAME: Counts ingress multicast frames
18133 + * @DPDMUX_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes
18134 + * @DPDMUX_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames
18135 + * @DPDMUX_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes
18136 + * @DPDMUX_CNT_EGR_FRAME: Counts egress frames
18137 + * @DPDMUX_CNT_EGR_BYTE: Counts egress bytes
18138 + * @DPDMUX_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames
18139 + */
18140 +enum dpdmux_counter_type {
18141 + DPDMUX_CNT_ING_FRAME = 0x0,
18142 + DPDMUX_CNT_ING_BYTE = 0x1,
18143 + DPDMUX_CNT_ING_FLTR_FRAME = 0x2,
18144 + DPDMUX_CNT_ING_FRAME_DISCARD = 0x3,
18145 + DPDMUX_CNT_ING_MCAST_FRAME = 0x4,
18146 + DPDMUX_CNT_ING_MCAST_BYTE = 0x5,
18147 + DPDMUX_CNT_ING_BCAST_FRAME = 0x6,
18148 + DPDMUX_CNT_ING_BCAST_BYTES = 0x7,
18149 + DPDMUX_CNT_EGR_FRAME = 0x8,
18150 + DPDMUX_CNT_EGR_BYTE = 0x9,
18151 + DPDMUX_CNT_EGR_FRAME_DISCARD = 0xa
18152 +};
18153 +
18154 +/**
18155 + * enum dpdmux_accepted_frames_type - DPDMUX frame types
18156 + * @DPDMUX_ADMIT_ALL: The device accepts VLAN tagged, untagged and
18157 + * priority-tagged frames
18158 + * @DPDMUX_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or
18159 + * priority-tagged frames that are received on this
18160 + * interface
18161 + * @DPDMUX_ADMIT_ONLY_UNTAGGED: Untagged frames or priority-tagged frames
18162 + * received on this interface are accepted
18163 + */
18164 +enum dpdmux_accepted_frames_type {
18165 + DPDMUX_ADMIT_ALL = 0,
18166 + DPDMUX_ADMIT_ONLY_VLAN_TAGGED = 1,
18167 + DPDMUX_ADMIT_ONLY_UNTAGGED = 2
18168 +};
18169 +
18170 +/**
18171 + * enum dpdmux_action - DPDMUX action for un-accepted frames
18172 + * @DPDMUX_ACTION_DROP: Drop un-accepted frames
18173 + * @DPDMUX_ACTION_REDIRECT_TO_CTRL: Redirect un-accepted frames to the
18174 + * control interface
18175 + */
18176 +enum dpdmux_action {
18177 + DPDMUX_ACTION_DROP = 0,
18178 + DPDMUX_ACTION_REDIRECT_TO_CTRL = 1
18179 +};
18180 +
18181 +/**
18182 + * struct dpdmux_accepted_frames - Frame types configuration
18183 + * @type: Defines ingress accepted frames
18184 + * @unaccept_act: Defines action on frames not accepted
18185 + */
18186 +struct dpdmux_accepted_frames {
18187 + enum dpdmux_accepted_frames_type type;
18188 + enum dpdmux_action unaccept_act;
18189 +};
18190 +
18191 +int dpdmux_if_set_accepted_frames(struct fsl_mc_io *mc_io,
18192 + u32 cmd_flags,
18193 + u16 token,
18194 + u16 if_id,
18195 + const struct dpdmux_accepted_frames *cfg);
18196 +
18197 +/**
18198 + * struct dpdmux_if_attr - Structure representing frame types configuration
18199 + * @rate: Configured interface rate (in bits per second)
18200 + * @enabled: Indicates if interface is enabled
18201 + * @accept_frame_type: Indicates type of accepted frames for the interface
18202 + */
18203 +struct dpdmux_if_attr {
18204 + u32 rate;
18205 + int enabled;
18206 + enum dpdmux_accepted_frames_type accept_frame_type;
18207 +};
18208 +
18209 +int dpdmux_if_get_attributes(struct fsl_mc_io *mc_io,
18210 + u32 cmd_flags,
18211 + u16 token,
18212 + u16 if_id,
18213 + struct dpdmux_if_attr *attr);
18214 +
18215 +int dpdmux_if_enable(struct fsl_mc_io *mc_io,
18216 + u32 cmd_flags,
18217 + u16 token,
18218 + u16 if_id);
18219 +
18220 +int dpdmux_if_disable(struct fsl_mc_io *mc_io,
18221 + u32 cmd_flags,
18222 + u16 token,
18223 + u16 if_id);
18224 +
18225 +/**
18226 + * struct dpdmux_l2_rule - Structure representing L2 rule
18227 + * @mac_addr: MAC address
18228 + * @vlan_id: VLAN ID
18229 + */
18230 +struct dpdmux_l2_rule {
18231 + u8 mac_addr[6];
18232 + u16 vlan_id;
18233 +};
18234 +
18235 +int dpdmux_if_remove_l2_rule(struct fsl_mc_io *mc_io,
18236 + u32 cmd_flags,
18237 + u16 token,
18238 + u16 if_id,
18239 + const struct dpdmux_l2_rule *rule);
18240 +
18241 +int dpdmux_if_add_l2_rule(struct fsl_mc_io *mc_io,
18242 + u32 cmd_flags,
18243 + u16 token,
18244 + u16 if_id,
18245 + const struct dpdmux_l2_rule *rule);
18246 +
18247 +int dpdmux_if_get_counter(struct fsl_mc_io *mc_io,
18248 + u32 cmd_flags,
18249 + u16 token,
18250 + u16 if_id,
18251 + enum dpdmux_counter_type counter_type,
18252 + u64 *counter);
18253 +
18254 +int dpdmux_ul_reset_counters(struct fsl_mc_io *mc_io,
18255 + u32 cmd_flags,
18256 + u16 token);
18257 +
18258 +/**
18259 + * Enable auto-negotiation
18260 + */
18261 +#define DPDMUX_LINK_OPT_AUTONEG 0x0000000000000001ULL
18262 +/**
18263 + * Enable half-duplex mode
18264 + */
18265 +#define DPDMUX_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
18266 +/**
18267 + * Enable pause frames
18268 + */
18269 +#define DPDMUX_LINK_OPT_PAUSE 0x0000000000000004ULL
18270 +/**
18271 + * Enable a-symmetric pause frames
18272 + */
18273 +#define DPDMUX_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
18274 +
18275 +/**
18276 + * struct dpdmux_link_cfg - Structure representing DPDMUX link configuration
18277 + * @rate: Rate
18278 + * @options: Mask of available options; use 'DPDMUX_LINK_OPT_<X>' values
18279 + */
18280 +struct dpdmux_link_cfg {
18281 + u32 rate;
18282 + u64 options;
18283 +};
18284 +
18285 +int dpdmux_if_set_link_cfg(struct fsl_mc_io *mc_io,
18286 + u32 cmd_flags,
18287 + u16 token,
18288 + u16 if_id,
18289 + struct dpdmux_link_cfg *cfg);
18290 +/**
18291 + * struct dpdmux_link_state - Structure representing DPDMUX link state
18292 + * @rate: Rate
18293 + * @options: Mask of available options; use 'DPDMUX_LINK_OPT_<X>' values
18294 + * @up: 0 - down, 1 - up
18295 + */
18296 +struct dpdmux_link_state {
18297 + u32 rate;
18298 + u64 options;
18299 + int up;
18300 +};
18301 +
18302 +int dpdmux_if_get_link_state(struct fsl_mc_io *mc_io,
18303 + u32 cmd_flags,
18304 + u16 token,
18305 + u16 if_id,
18306 + struct dpdmux_link_state *state);
18307 +
18308 +int dpdmux_set_custom_key(struct fsl_mc_io *mc_io,
18309 + u32 cmd_flags,
18310 + u16 token,
18311 + u64 key_cfg_iova);
18312 +
18313 +/**
18314 + * struct dpdmux_rule_cfg - Custom classification rule.
18315 + *
18316 + * @key_iova: DMA address of buffer storing the look-up value
18317 + * @mask_iova: DMA address of the mask used for TCAM classification
18318 + * @key_size: size, in bytes, of the look-up value. This must match the size
18319 + * of the look-up key defined using dpdmux_set_custom_key, otherwise the
18320 + * entry will never be hit
18321 + */
18322 +struct dpdmux_rule_cfg {
18323 + u64 key_iova;
18324 + u64 mask_iova;
18325 + u8 key_size;
18326 +};
18327 +
18328 +/**
18329 + * struct dpdmux_cls_action - Action to execute for frames matching the
18330 + * classification entry
18331 + *
18332 + * @dest_if: Interface to forward the frames to. Port numbering is similar to
18333 + * the one used to connect interfaces:
18334 + * - 0 is the uplink port,
18335 + * - all others are downlink ports.
18336 + */
18337 +struct dpdmux_cls_action {
18338 + u16 dest_if;
18339 +};
18340 +
18341 +int dpdmux_add_custom_cls_entry(struct fsl_mc_io *mc_io,
18342 + u32 cmd_flags,
18343 + u16 token,
18344 + struct dpdmux_rule_cfg *rule,
18345 + struct dpdmux_cls_action *action);
18346 +
18347 +int dpdmux_remove_custom_cls_entry(struct fsl_mc_io *mc_io,
18348 + u32 cmd_flags,
18349 + u16 token,
18350 + struct dpdmux_rule_cfg *rule);
18351 +
18352 +int dpdmux_get_api_version(struct fsl_mc_io *mc_io,
18353 + u32 cmd_flags,
18354 + u16 *major_ver,
18355 + u16 *minor_ver);
18356 +
18357 +#endif /* __FSL_DPDMUX_H */
18358 --- /dev/null
18359 +++ b/drivers/staging/fsl-dpaa2/evb/evb.c
18360 @@ -0,0 +1,1350 @@
18361 +/* Copyright 2015 Freescale Semiconductor Inc.
18362 + *
18363 + * Redistribution and use in source and binary forms, with or without
18364 + * modification, are permitted provided that the following conditions are met:
18365 + * * Redistributions of source code must retain the above copyright
18366 + * notice, this list of conditions and the following disclaimer.
18367 + * * Redistributions in binary form must reproduce the above copyright
18368 + * notice, this list of conditions and the following disclaimer in the
18369 + * documentation and/or other materials provided with the distribution.
18370 + * * Neither the name of Freescale Semiconductor nor the
18371 + * names of its contributors may be used to endorse or promote products
18372 + * derived from this software without specific prior written permission.
18373 + *
18374 + *
18375 + * ALTERNATIVELY, this software may be distributed under the terms of the
18376 + * GNU General Public License ("GPL") as published by the Free Software
18377 + * Foundation, either version 2 of that License or (at your option) any
18378 + * later version.
18379 + *
18380 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
18381 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18382 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18383 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
18384 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
18385 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
18386 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
18387 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
18388 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
18389 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
18390 + */
18391 +#include <linux/module.h>
18392 +#include <linux/msi.h>
18393 +#include <linux/netdevice.h>
18394 +#include <linux/etherdevice.h>
18395 +#include <linux/rtnetlink.h>
18396 +#include <linux/if_vlan.h>
18397 +
18398 +#include <uapi/linux/if_bridge.h>
18399 +#include <net/netlink.h>
18400 +
18401 +#include "../../fsl-mc/include/mc.h"
18402 +
18403 +#include "dpdmux.h"
18404 +#include "dpdmux-cmd.h"
18405 +
18406 +static const char evb_drv_version[] = "0.1";
18407 +
18408 +/* Minimal supported DPDMUX version */
18409 +#define DPDMUX_MIN_VER_MAJOR 6
18410 +#define DPDMUX_MIN_VER_MINOR 0
18411 +
18412 +/* IRQ index */
18413 +#define DPDMUX_MAX_IRQ_NUM 2
18414 +
18415 +/* MAX FRAME LENGTH (currently 10k) */
18416 +#define EVB_MAX_FRAME_LENGTH (10 * 1024)
18417 +/* MIN FRAME LENGTH (64 bytes + 4 bytes CRC) */
18418 +#define EVB_MIN_FRAME_LENGTH 68
18419 +
18420 +struct evb_port_priv {
18421 + struct net_device *netdev;
18422 + struct list_head list;
18423 + u16 port_index;
18424 + struct evb_priv *evb_priv;
18425 + u8 vlans[VLAN_VID_MASK + 1];
18426 +};
18427 +
18428 +struct evb_priv {
18429 + /* keep first */
18430 + struct evb_port_priv uplink;
18431 +
18432 + struct fsl_mc_io *mc_io;
18433 + struct list_head port_list;
18434 + struct dpdmux_attr attr;
18435 + u16 mux_handle;
18436 + int dev_id;
18437 +};
18438 +
18439 +static int _evb_port_carrier_state_sync(struct net_device *netdev)
18440 +{
18441 + struct evb_port_priv *port_priv = netdev_priv(netdev);
18442 + struct dpdmux_link_state state;
18443 + int err;
18444 +
18445 + err = dpdmux_if_get_link_state(port_priv->evb_priv->mc_io, 0,
18446 + port_priv->evb_priv->mux_handle,
18447 + port_priv->port_index, &state);
18448 + if (unlikely(err)) {
18449 + netdev_err(netdev, "dpdmux_if_get_link_state() err %d\n", err);
18450 + return err;
18451 + }
18452 +
18453 + WARN_ONCE(state.up > 1, "Garbage read into link_state");
18454 +
18455 + if (state.up)
18456 + netif_carrier_on(port_priv->netdev);
18457 + else
18458 + netif_carrier_off(port_priv->netdev);
18459 +
18460 + return 0;
18461 +}
18462 +
18463 +static int evb_port_open(struct net_device *netdev)
18464 +{
18465 + int err;
18466 +
18467 + /* FIXME: enable port when support added */
18468 +
18469 + err = _evb_port_carrier_state_sync(netdev);
18470 + if (err) {
18471 + netdev_err(netdev, "ethsw_port_carrier_state_sync err %d\n",
18472 + err);
18473 + return err;
18474 + }
18475 +
18476 + return 0;
18477 +}
18478 +
18479 +static netdev_tx_t evb_dropframe(struct sk_buff *skb, struct net_device *dev)
18480 +{
18481 + /* we don't support I/O for now, drop the frame */
18482 + dev_kfree_skb_any(skb);
18483 + return NETDEV_TX_OK;
18484 +}
18485 +
18486 +static int evb_links_state_update(struct evb_priv *priv)
18487 +{
18488 + struct evb_port_priv *port_priv;
18489 + struct list_head *pos;
18490 + int err;
18491 +
18492 + list_for_each(pos, &priv->port_list) {
18493 + port_priv = list_entry(pos, struct evb_port_priv, list);
18494 +
18495 + err = _evb_port_carrier_state_sync(port_priv->netdev);
18496 + if (err)
18497 + netdev_err(port_priv->netdev,
18498 + "_evb_port_carrier_state_sync err %d\n",
18499 + err);
18500 + }
18501 +
18502 + return 0;
18503 +}
18504 +
18505 +static irqreturn_t evb_irq0_handler(int irq_num, void *arg)
18506 +{
18507 + return IRQ_WAKE_THREAD;
18508 +}
18509 +
18510 +static irqreturn_t _evb_irq0_handler_thread(int irq_num, void *arg)
18511 +{
18512 + struct device *dev = (struct device *)arg;
18513 + struct fsl_mc_device *evb_dev = to_fsl_mc_device(dev);
18514 + struct net_device *netdev = dev_get_drvdata(dev);
18515 + struct evb_priv *priv = netdev_priv(netdev);
18516 + struct fsl_mc_io *io = priv->mc_io;
18517 + u16 token = priv->mux_handle;
18518 + int irq_index = DPDMUX_IRQ_INDEX_IF;
18519 +
18520 + /* Mask the events and the if_id reserved bits to be cleared on read */
18521 + u32 status = DPDMUX_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000;
18522 + int err;
18523 +
18524 + /* Sanity check */
18525 + if (WARN_ON(!evb_dev || !evb_dev->irqs || !evb_dev->irqs[irq_index]))
18526 + goto out;
18527 + if (WARN_ON(evb_dev->irqs[irq_index]->msi_desc->irq != (u32)irq_num))
18528 + goto out;
18529 +
18530 + err = dpdmux_get_irq_status(io, 0, token, irq_index, &status);
18531 + if (unlikely(err)) {
18532 + netdev_err(netdev, "Can't get irq status (err %d)", err);
18533 + err = dpdmux_clear_irq_status(io, 0, token, irq_index,
18534 + 0xFFFFFFFF);
18535 + if (unlikely(err))
18536 + netdev_err(netdev, "Can't clear irq status (err %d)",
18537 + err);
18538 + goto out;
18539 + }
18540 +
18541 + if (status & DPDMUX_IRQ_EVENT_LINK_CHANGED) {
18542 + err = evb_links_state_update(priv);
18543 + if (unlikely(err))
18544 + goto out;
18545 + }
18546 +
18547 +out:
18548 + return IRQ_HANDLED;
18549 +}
18550 +
18551 +static int evb_setup_irqs(struct fsl_mc_device *evb_dev)
18552 +{
18553 + struct device *dev = &evb_dev->dev;
18554 + struct net_device *netdev = dev_get_drvdata(dev);
18555 + struct evb_priv *priv = netdev_priv(netdev);
18556 + int err = 0;
18557 + struct fsl_mc_device_irq *irq;
18558 + const int irq_index = DPDMUX_IRQ_INDEX_IF;
18559 + u32 mask = DPDMUX_IRQ_EVENT_LINK_CHANGED;
18560 +
18561 + err = fsl_mc_allocate_irqs(evb_dev);
18562 + if (unlikely(err)) {
18563 + dev_err(dev, "MC irqs allocation failed\n");
18564 + return err;
18565 + }
18566 +
18567 + if (WARN_ON(evb_dev->obj_desc.irq_count != DPDMUX_MAX_IRQ_NUM)) {
18568 + err = -EINVAL;
18569 + goto free_irq;
18570 + }
18571 +
18572 + err = dpdmux_set_irq_enable(priv->mc_io, 0, priv->mux_handle,
18573 + irq_index, 0);
18574 + if (unlikely(err)) {
18575 + dev_err(dev, "dpdmux_set_irq_enable err %d\n", err);
18576 + goto free_irq;
18577 + }
18578 +
18579 + irq = evb_dev->irqs[irq_index];
18580 +
18581 + err = devm_request_threaded_irq(dev, irq->msi_desc->irq,
18582 + evb_irq0_handler,
18583 + _evb_irq0_handler_thread,
18584 + IRQF_NO_SUSPEND | IRQF_ONESHOT,
18585 + dev_name(dev), dev);
18586 + if (unlikely(err)) {
18587 + dev_err(dev, "devm_request_threaded_irq(): %d", err);
18588 + goto free_irq;
18589 + }
18590 +
18591 + err = dpdmux_set_irq_mask(priv->mc_io, 0, priv->mux_handle,
18592 + irq_index, mask);
18593 + if (unlikely(err)) {
18594 + dev_err(dev, "dpdmux_set_irq_mask(): %d", err);
18595 + goto free_devm_irq;
18596 + }
18597 +
18598 + err = dpdmux_set_irq_enable(priv->mc_io, 0, priv->mux_handle,
18599 + irq_index, 1);
18600 + if (unlikely(err)) {
18601 + dev_err(dev, "dpdmux_set_irq_enable(): %d", err);
18602 + goto free_devm_irq;
18603 + }
18604 +
18605 + return 0;
18606 +
18607 +free_devm_irq:
18608 + devm_free_irq(dev, irq->msi_desc->irq, dev);
18609 +free_irq:
18610 + fsl_mc_free_irqs(evb_dev);
18611 + return err;
18612 +}
18613 +
18614 +static void evb_teardown_irqs(struct fsl_mc_device *evb_dev)
18615 +{
18616 + struct device *dev = &evb_dev->dev;
18617 + struct net_device *netdev = dev_get_drvdata(dev);
18618 + struct evb_priv *priv = netdev_priv(netdev);
18619 +
18620 + dpdmux_set_irq_enable(priv->mc_io, 0, priv->mux_handle,
18621 + DPDMUX_IRQ_INDEX_IF, 0);
18622 +
18623 + devm_free_irq(dev,
18624 + evb_dev->irqs[DPDMUX_IRQ_INDEX_IF]->msi_desc->irq,
18625 + dev);
18626 + fsl_mc_free_irqs(evb_dev);
18627 +}
18628 +
18629 +static int evb_port_add_rule(struct net_device *netdev,
18630 + const unsigned char *addr, u16 vid)
18631 +{
18632 + struct evb_port_priv *port_priv = netdev_priv(netdev);
18633 + struct dpdmux_l2_rule rule = { .vlan_id = vid };
18634 + int err;
18635 +
18636 + if (addr)
18637 + ether_addr_copy(rule.mac_addr, addr);
18638 +
18639 + err = dpdmux_if_add_l2_rule(port_priv->evb_priv->mc_io,
18640 + 0,
18641 + port_priv->evb_priv->mux_handle,
18642 + port_priv->port_index, &rule);
18643 + if (unlikely(err))
18644 + netdev_err(netdev, "dpdmux_if_add_l2_rule err %d\n", err);
18645 + return err;
18646 +}
18647 +
18648 +static int evb_port_del_rule(struct net_device *netdev,
18649 + const unsigned char *addr, u16 vid)
18650 +{
18651 + struct evb_port_priv *port_priv = netdev_priv(netdev);
18652 + struct dpdmux_l2_rule rule = { .vlan_id = vid };
18653 + int err;
18654 +
18655 + if (addr)
18656 + ether_addr_copy(rule.mac_addr, addr);
18657 +
18658 + err = dpdmux_if_remove_l2_rule(port_priv->evb_priv->mc_io,
18659 + 0,
18660 + port_priv->evb_priv->mux_handle,
18661 + port_priv->port_index, &rule);
18662 + if (unlikely(err))
18663 + netdev_err(netdev, "dpdmux_if_remove_l2_rule err %d\n", err);
18664 + return err;
18665 +}
18666 +
18667 +static bool _lookup_address(struct net_device *netdev,
18668 + const unsigned char *addr)
18669 +{
18670 + struct netdev_hw_addr *ha;
18671 + struct netdev_hw_addr_list *list = (is_unicast_ether_addr(addr)) ?
18672 + &netdev->uc : &netdev->mc;
18673 +
18674 + netif_addr_lock_bh(netdev);
18675 + list_for_each_entry(ha, &list->list, list) {
18676 + if (ether_addr_equal(ha->addr, addr)) {
18677 + netif_addr_unlock_bh(netdev);
18678 + return true;
18679 + }
18680 + }
18681 + netif_addr_unlock_bh(netdev);
18682 + return false;
18683 +}
18684 +
18685 +static inline int evb_port_fdb_prep(struct nlattr *tb[],
18686 + struct net_device *netdev,
18687 + const unsigned char *addr, u16 *vid,
18688 + bool del)
18689 +{
18690 + struct evb_port_priv *port_priv = netdev_priv(netdev);
18691 + struct evb_priv *evb_priv = port_priv->evb_priv;
18692 +
18693 + *vid = 0;
18694 +
18695 + if (evb_priv->attr.method != DPDMUX_METHOD_MAC &&
18696 + evb_priv->attr.method != DPDMUX_METHOD_C_VLAN_MAC) {
18697 + netdev_err(netdev,
18698 + "EVB mode does not support MAC classification\n");
18699 + return -EOPNOTSUPP;
18700 + }
18701 +
18702 + /* check if the address is configured on this port */
18703 + if (_lookup_address(netdev, addr)) {
18704 + if (!del)
18705 + return -EEXIST;
18706 + } else {
18707 + if (del)
18708 + return -ENOENT;
18709 + }
18710 +
18711 + if (tb[NDA_VLAN] && evb_priv->attr.method == DPDMUX_METHOD_C_VLAN_MAC) {
18712 + if (nla_len(tb[NDA_VLAN]) != sizeof(unsigned short)) {
18713 + netdev_err(netdev, "invalid vlan size %d\n",
18714 + nla_len(tb[NDA_VLAN]));
18715 + return -EINVAL;
18716 + }
18717 +
18718 + *vid = nla_get_u16(tb[NDA_VLAN]);
18719 +
18720 + if (!*vid || *vid >= VLAN_VID_MASK) {
18721 + netdev_err(netdev, "invalid vid value 0x%04x\n", *vid);
18722 + return -EINVAL;
18723 + }
18724 + } else if (evb_priv->attr.method == DPDMUX_METHOD_C_VLAN_MAC) {
18725 + netdev_err(netdev,
18726 + "EVB mode requires explicit VLAN configuration\n");
18727 + return -EINVAL;
18728 + } else if (tb[NDA_VLAN]) {
18729 + netdev_warn(netdev, "VLAN not supported, argument ignored\n");
18730 + }
18731 +
18732 + return 0;
18733 +}
18734 +
18735 +static int evb_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
18736 + struct net_device *netdev,
18737 + const unsigned char *addr, u16 vid, u16 flags)
18738 +{
18739 + u16 _vid;
18740 + int err;
18741 +
18742 + /* TODO: add replace support when added to iproute bridge */
18743 + if (!(flags & NLM_F_REQUEST)) {
18744 + netdev_err(netdev,
18745 + "evb_port_fdb_add unexpected flags value %08x\n",
18746 + flags);
18747 + return -EINVAL;
18748 + }
18749 +
18750 + err = evb_port_fdb_prep(tb, netdev, addr, &_vid, 0);
18751 + if (unlikely(err))
18752 + return err;
18753 +
18754 + err = evb_port_add_rule(netdev, addr, _vid);
18755 + if (unlikely(err))
18756 + return err;
18757 +
18758 + if (is_unicast_ether_addr(addr)) {
18759 + err = dev_uc_add(netdev, addr);
18760 + if (unlikely(err)) {
18761 + netdev_err(netdev, "dev_uc_add err %d\n", err);
18762 + return err;
18763 + }
18764 + } else {
18765 + err = dev_mc_add(netdev, addr);
18766 + if (unlikely(err)) {
18767 + netdev_err(netdev, "dev_mc_add err %d\n", err);
18768 + return err;
18769 + }
18770 + }
18771 +
18772 + return 0;
18773 +}
18774 +
18775 +static int evb_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
18776 + struct net_device *netdev,
18777 + const unsigned char *addr, u16 vid)
18778 +{
18779 + u16 _vid;
18780 + int err;
18781 +
18782 + err = evb_port_fdb_prep(tb, netdev, addr, &_vid, 1);
18783 + if (unlikely(err))
18784 + return err;
18785 +
18786 + err = evb_port_del_rule(netdev, addr, _vid);
18787 + if (unlikely(err))
18788 + return err;
18789 +
18790 + if (is_unicast_ether_addr(addr)) {
18791 + err = dev_uc_del(netdev, addr);
18792 + if (unlikely(err)) {
18793 + netdev_err(netdev, "dev_uc_del err %d\n", err);
18794 + return err;
18795 + }
18796 + } else {
18797 + err = dev_mc_del(netdev, addr);
18798 + if (unlikely(err)) {
18799 + netdev_err(netdev, "dev_mc_del err %d\n", err);
18800 + return err;
18801 + }
18802 + }
18803 +
18804 + return 0;
18805 +}
18806 +
18807 +static int evb_change_mtu(struct net_device *netdev,
18808 + int mtu)
18809 +{
18810 + struct evb_port_priv *port_priv = netdev_priv(netdev);
18811 + struct evb_priv *evb_priv = port_priv->evb_priv;
18812 + struct list_head *pos;
18813 + int err = 0;
18814 +
18815 + /* This operation is not permitted on downlinks */
18816 + if (port_priv->port_index > 0)
18817 + return -EPERM;
18818 +
18819 + if (mtu < EVB_MIN_FRAME_LENGTH || mtu > EVB_MAX_FRAME_LENGTH) {
18820 + netdev_err(netdev, "Invalid MTU %d. Valid range is: %d..%d\n",
18821 + mtu, EVB_MIN_FRAME_LENGTH, EVB_MAX_FRAME_LENGTH);
18822 + return -EINVAL;
18823 + }
18824 +
18825 + err = dpdmux_set_max_frame_length(evb_priv->mc_io,
18826 + 0,
18827 + evb_priv->mux_handle,
18828 + (uint16_t)mtu);
18829 +
18830 + if (unlikely(err)) {
18831 + netdev_err(netdev, "dpdmux_ul_set_max_frame_length err %d\n",
18832 + err);
18833 + return err;
18834 + }
18835 +
18836 + /* Update the max frame length for downlinks */
18837 + list_for_each(pos, &evb_priv->port_list) {
18838 + port_priv = list_entry(pos, struct evb_port_priv, list);
18839 + port_priv->netdev->mtu = mtu;
18840 + }
18841 +
18842 + netdev->mtu = mtu;
18843 + return 0;
18844 +}
18845 +
18846 +static const struct nla_policy ifla_br_policy[IFLA_MAX + 1] = {
18847 + [IFLA_BRIDGE_FLAGS] = { .type = NLA_U16 },
18848 + [IFLA_BRIDGE_MODE] = { .type = NLA_U16 },
18849 + [IFLA_BRIDGE_VLAN_INFO] = { .type = NLA_BINARY,
18850 + .len = sizeof(struct bridge_vlan_info), },
18851 +};
18852 +
18853 +static int evb_setlink_af_spec(struct net_device *netdev,
18854 + struct nlattr **tb)
18855 +{
18856 + struct bridge_vlan_info *vinfo;
18857 + struct evb_port_priv *port_priv = netdev_priv(netdev);
18858 + int err = 0;
18859 +
18860 + if (!tb[IFLA_BRIDGE_VLAN_INFO]) {
18861 + netdev_err(netdev, "no VLAN INFO in nlmsg\n");
18862 + return -EOPNOTSUPP;
18863 + }
18864 +
18865 + vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
18866 +
18867 + if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK)
18868 + return -EINVAL;
18869 +
18870 + err = evb_port_add_rule(netdev, NULL, vinfo->vid);
18871 + if (unlikely(err))
18872 + return err;
18873 +
18874 + port_priv->vlans[vinfo->vid] = 1;
18875 +
18876 + return 0;
18877 +}
18878 +
18879 +static int evb_setlink(struct net_device *netdev,
18880 + struct nlmsghdr *nlh,
18881 + u16 flags)
18882 +{
18883 + struct evb_port_priv *port_priv = netdev_priv(netdev);
18884 + struct evb_priv *evb_priv = port_priv->evb_priv;
18885 + struct nlattr *attr;
18886 + struct nlattr *tb[(IFLA_BRIDGE_MAX > IFLA_BRPORT_MAX) ?
18887 + IFLA_BRIDGE_MAX : IFLA_BRPORT_MAX + 1];
18888 + int err = 0;
18889 +
18890 + if (evb_priv->attr.method != DPDMUX_METHOD_C_VLAN &&
18891 + evb_priv->attr.method != DPDMUX_METHOD_S_VLAN) {
18892 + netdev_err(netdev,
18893 + "EVB mode does not support VLAN only classification\n");
18894 + return -EOPNOTSUPP;
18895 + }
18896 +
18897 + attr = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
18898 + if (attr) {
18899 + err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, attr,
18900 + ifla_br_policy);
18901 + if (unlikely(err)) {
18902 + netdev_err(netdev,
18903 + "nla_parse_nested for br_policy err %d\n",
18904 + err);
18905 + return err;
18906 + }
18907 +
18908 + err = evb_setlink_af_spec(netdev, tb);
18909 + return err;
18910 + }
18911 +
18912 + netdev_err(netdev, "nlmsg_find_attr found no AF_SPEC\n");
18913 + return -EOPNOTSUPP;
18914 +}
18915 +
18916 +static int __nla_put_netdev(struct sk_buff *skb, struct net_device *netdev)
18917 +{
18918 + struct evb_port_priv *port_priv = netdev_priv(netdev);
18919 + struct evb_priv *evb_priv = port_priv->evb_priv;
18920 + u8 operstate = netif_running(netdev) ?
18921 + netdev->operstate : IF_OPER_DOWN;
18922 + int iflink;
18923 + int err;
18924 +
18925 + err = nla_put_string(skb, IFLA_IFNAME, netdev->name);
18926 + if (unlikely(err))
18927 + goto nla_put_err;
18928 + err = nla_put_u32(skb, IFLA_MASTER, evb_priv->uplink.netdev->ifindex);
18929 + if (unlikely(err))
18930 + goto nla_put_err;
18931 + err = nla_put_u32(skb, IFLA_MTU, netdev->mtu);
18932 + if (unlikely(err))
18933 + goto nla_put_err;
18934 + err = nla_put_u8(skb, IFLA_OPERSTATE, operstate);
18935 + if (unlikely(err))
18936 + goto nla_put_err;
18937 + if (netdev->addr_len) {
18938 + err = nla_put(skb, IFLA_ADDRESS, netdev->addr_len,
18939 + netdev->dev_addr);
18940 + if (unlikely(err))
18941 + goto nla_put_err;
18942 + }
18943 +
18944 + iflink = dev_get_iflink(netdev);
18945 + if (netdev->ifindex != iflink) {
18946 + err = nla_put_u32(skb, IFLA_LINK, iflink);
18947 + if (unlikely(err))
18948 + goto nla_put_err;
18949 + }
18950 +
18951 + return 0;
18952 +
18953 +nla_put_err:
18954 + netdev_err(netdev, "nla_put_ err %d\n", err);
18955 + return err;
18956 +}
18957 +
18958 +static int __nla_put_port(struct sk_buff *skb, struct net_device *netdev)
18959 +{
18960 + struct nlattr *nest;
18961 + int err;
18962 +
18963 + nest = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
18964 + if (!nest) {
18965 + netdev_err(netdev, "nla_nest_start failed\n");
18966 + return -ENOMEM;
18967 + }
18968 +
18969 + err = nla_put_u8(skb, IFLA_BRPORT_STATE, BR_STATE_FORWARDING);
18970 + if (unlikely(err))
18971 + goto nla_put_err;
18972 + err = nla_put_u16(skb, IFLA_BRPORT_PRIORITY, 0);
18973 + if (unlikely(err))
18974 + goto nla_put_err;
18975 + err = nla_put_u32(skb, IFLA_BRPORT_COST, 0);
18976 + if (unlikely(err))
18977 + goto nla_put_err;
18978 + err = nla_put_u8(skb, IFLA_BRPORT_MODE, 0);
18979 + if (unlikely(err))
18980 + goto nla_put_err;
18981 + err = nla_put_u8(skb, IFLA_BRPORT_GUARD, 0);
18982 + if (unlikely(err))
18983 + goto nla_put_err;
18984 + err = nla_put_u8(skb, IFLA_BRPORT_PROTECT, 0);
18985 + if (unlikely(err))
18986 + goto nla_put_err;
18987 + err = nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, 0);
18988 + if (unlikely(err))
18989 + goto nla_put_err;
18990 + err = nla_put_u8(skb, IFLA_BRPORT_LEARNING, 0);
18991 + if (unlikely(err))
18992 + goto nla_put_err;
18993 + err = nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, 1);
18994 + if (unlikely(err))
18995 + goto nla_put_err;
18996 + nla_nest_end(skb, nest);
18997 +
18998 + return 0;
18999 +
19000 +nla_put_err:
19001 + netdev_err(netdev, "nla_put_ err %d\n", err);
19002 + nla_nest_cancel(skb, nest);
19003 + return err;
19004 +}
19005 +
19006 +static int __nla_put_vlan(struct sk_buff *skb, struct net_device *netdev)
19007 +{
19008 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19009 + struct nlattr *nest;
19010 + struct bridge_vlan_info vinfo;
19011 + const u8 *vlans = port_priv->vlans;
19012 + u16 i;
19013 + int err;
19014 +
19015 + nest = nla_nest_start(skb, IFLA_AF_SPEC);
19016 + if (!nest) {
19017 + netdev_err(netdev, "nla_nest_start failed");
19018 + return -ENOMEM;
19019 + }
19020 +
19021 + for (i = 0; i < VLAN_VID_MASK + 1; i++) {
19022 + if (!vlans[i])
19023 + continue;
19024 +
19025 + vinfo.flags = 0;
19026 + vinfo.vid = i;
19027 +
19028 + err = nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
19029 + sizeof(vinfo), &vinfo);
19030 + if (unlikely(err))
19031 + goto nla_put_err;
19032 + }
19033 +
19034 + nla_nest_end(skb, nest);
19035 +
19036 + return 0;
19037 +
19038 +nla_put_err:
19039 + netdev_err(netdev, "nla_put_ err %d\n", err);
19040 + nla_nest_cancel(skb, nest);
19041 + return err;
19042 +}
19043 +
19044 +static int evb_getlink(struct sk_buff *skb, u32 pid, u32 seq,
19045 + struct net_device *netdev, u32 filter_mask, int nlflags)
19046 +{
19047 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19048 + struct evb_priv *evb_priv = port_priv->evb_priv;
19049 + struct ifinfomsg *hdr;
19050 + struct nlmsghdr *nlh;
19051 + int err;
19052 +
19053 + if (evb_priv->attr.method != DPDMUX_METHOD_C_VLAN &&
19054 + evb_priv->attr.method != DPDMUX_METHOD_S_VLAN) {
19055 + return 0;
19056 + }
19057 +
19058 + nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*hdr), NLM_F_MULTI);
19059 + if (!nlh)
19060 + return -EMSGSIZE;
19061 +
19062 + hdr = nlmsg_data(nlh);
19063 + memset(hdr, 0, sizeof(*hdr));
19064 + hdr->ifi_family = AF_BRIDGE;
19065 + hdr->ifi_type = netdev->type;
19066 + hdr->ifi_index = netdev->ifindex;
19067 + hdr->ifi_flags = dev_get_flags(netdev);
19068 +
19069 + err = __nla_put_netdev(skb, netdev);
19070 + if (unlikely(err))
19071 + goto nla_put_err;
19072 +
19073 + err = __nla_put_port(skb, netdev);
19074 + if (unlikely(err))
19075 + goto nla_put_err;
19076 +
19077 + /* Check if the VID information is requested */
19078 + if (filter_mask & RTEXT_FILTER_BRVLAN) {
19079 + err = __nla_put_vlan(skb, netdev);
19080 + if (unlikely(err))
19081 + goto nla_put_err;
19082 + }
19083 +
19084 + nlmsg_end(skb, nlh);
19085 + return skb->len;
19086 +
19087 +nla_put_err:
19088 + nlmsg_cancel(skb, nlh);
19089 + return -EMSGSIZE;
19090 +}
19091 +
19092 +static int evb_dellink(struct net_device *netdev,
19093 + struct nlmsghdr *nlh,
19094 + u16 flags)
19095 +{
19096 + struct nlattr *tb[IFLA_BRIDGE_MAX + 1];
19097 + struct nlattr *spec;
19098 + struct bridge_vlan_info *vinfo;
19099 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19100 + int err = 0;
19101 +
19102 + spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
19103 + if (!spec)
19104 + return 0;
19105 +
19106 + err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, spec, ifla_br_policy);
19107 + if (unlikely(err))
19108 + return err;
19109 +
19110 + if (!tb[IFLA_BRIDGE_VLAN_INFO])
19111 + return -EOPNOTSUPP;
19112 +
19113 + vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
19114 +
19115 + if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK)
19116 + return -EINVAL;
19117 +
19118 + err = evb_port_del_rule(netdev, NULL, vinfo->vid);
19119 + if (unlikely(err)) {
19120 + netdev_err(netdev, "evb_port_del_rule err %d\n", err);
19121 + return err;
19122 + }
19123 + port_priv->vlans[vinfo->vid] = 0;
19124 +
19125 + return 0;
19126 +}
19127 +
19128 +void evb_port_get_stats(struct net_device *netdev,
19129 + struct rtnl_link_stats64 *storage)
19130 +{
19131 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19132 + u64 tmp;
19133 + int err;
19134 +
19135 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19136 + 0,
19137 + port_priv->evb_priv->mux_handle,
19138 + port_priv->port_index,
19139 + DPDMUX_CNT_ING_FRAME, &storage->rx_packets);
19140 + if (unlikely(err))
19141 + goto error;
19142 +
19143 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19144 + 0,
19145 + port_priv->evb_priv->mux_handle,
19146 + port_priv->port_index,
19147 + DPDMUX_CNT_ING_BYTE, &storage->rx_bytes);
19148 + if (unlikely(err))
19149 + goto error;
19150 +
19151 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19152 + 0,
19153 + port_priv->evb_priv->mux_handle,
19154 + port_priv->port_index,
19155 + DPDMUX_CNT_ING_FLTR_FRAME, &tmp);
19156 + if (unlikely(err))
19157 + goto error;
19158 +
19159 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19160 + 0,
19161 + port_priv->evb_priv->mux_handle,
19162 + port_priv->port_index,
19163 + DPDMUX_CNT_ING_FRAME_DISCARD,
19164 + &storage->rx_dropped);
19165 + if (unlikely(err)) {
19166 + storage->rx_dropped = tmp;
19167 + goto error;
19168 + }
19169 + storage->rx_dropped += tmp;
19170 +
19171 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19172 + 0,
19173 + port_priv->evb_priv->mux_handle,
19174 + port_priv->port_index,
19175 + DPDMUX_CNT_ING_MCAST_FRAME,
19176 + &storage->multicast);
19177 + if (unlikely(err))
19178 + goto error;
19179 +
19180 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19181 + 0,
19182 + port_priv->evb_priv->mux_handle,
19183 + port_priv->port_index,
19184 + DPDMUX_CNT_EGR_FRAME, &storage->tx_packets);
19185 + if (unlikely(err))
19186 + goto error;
19187 +
19188 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19189 + 0,
19190 + port_priv->evb_priv->mux_handle,
19191 + port_priv->port_index,
19192 + DPDMUX_CNT_EGR_BYTE, &storage->tx_bytes);
19193 + if (unlikely(err))
19194 + goto error;
19195 +
19196 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19197 + 0,
19198 + port_priv->evb_priv->mux_handle,
19199 + port_priv->port_index,
19200 + DPDMUX_CNT_EGR_FRAME_DISCARD,
19201 + &storage->tx_dropped);
19202 + if (unlikely(err))
19203 + goto error;
19204 +
19205 + return;
19206 +
19207 +error:
19208 + netdev_err(netdev, "dpdmux_if_get_counter err %d\n", err);
19209 +}
19210 +
19211 +static const struct net_device_ops evb_port_ops = {
19212 + .ndo_open = &evb_port_open,
19213 +
19214 + .ndo_start_xmit = &evb_dropframe,
19215 +
19216 + .ndo_fdb_add = &evb_port_fdb_add,
19217 + .ndo_fdb_del = &evb_port_fdb_del,
19218 +
19219 + .ndo_get_stats64 = &evb_port_get_stats,
19220 + .ndo_change_mtu = &evb_change_mtu,
19221 +};
19222 +
19223 +static void evb_get_drvinfo(struct net_device *netdev,
19224 + struct ethtool_drvinfo *drvinfo)
19225 +{
19226 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19227 + u16 version_major, version_minor;
19228 + int err;
19229 +
19230 + strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
19231 + strlcpy(drvinfo->version, evb_drv_version, sizeof(drvinfo->version));
19232 +
19233 + err = dpdmux_get_api_version(port_priv->evb_priv->mc_io, 0,
19234 + &version_major,
19235 + &version_minor);
19236 + if (err)
19237 + strlcpy(drvinfo->fw_version, "N/A",
19238 + sizeof(drvinfo->fw_version));
19239 + else
19240 + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
19241 + "%u.%u", version_major, version_minor);
19242 +
19243 + strlcpy(drvinfo->bus_info, dev_name(netdev->dev.parent->parent),
19244 + sizeof(drvinfo->bus_info));
19245 +}
19246 +
19247 +static int evb_get_settings(struct net_device *netdev,
19248 + struct ethtool_cmd *cmd)
19249 +{
19250 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19251 + struct dpdmux_link_state state = {0};
19252 + int err = 0;
19253 +
19254 + err = dpdmux_if_get_link_state(port_priv->evb_priv->mc_io, 0,
19255 + port_priv->evb_priv->mux_handle,
19256 + port_priv->port_index,
19257 + &state);
19258 + if (err) {
19259 + netdev_err(netdev, "ERROR %d getting link state", err);
19260 + goto out;
19261 + }
19262 +
19263 + /* At the moment, we have no way of interrogating the DPMAC
19264 + * from the DPDMUX side or there may not exist a DPMAC at all.
19265 + * Report only autoneg state, duplexity and speed.
19266 + */
19267 + if (state.options & DPDMUX_LINK_OPT_AUTONEG)
19268 + cmd->autoneg = AUTONEG_ENABLE;
19269 + if (!(state.options & DPDMUX_LINK_OPT_HALF_DUPLEX))
19270 + cmd->duplex = DUPLEX_FULL;
19271 + ethtool_cmd_speed_set(cmd, state.rate);
19272 +
19273 +out:
19274 + return err;
19275 +}
19276 +
19277 +static int evb_set_settings(struct net_device *netdev,
19278 + struct ethtool_cmd *cmd)
19279 +{
19280 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19281 + struct dpdmux_link_state state = {0};
19282 + struct dpdmux_link_cfg cfg = {0};
19283 + int err = 0;
19284 +
19285 + netdev_dbg(netdev, "Setting link parameters...");
19286 +
19287 + err = dpdmux_if_get_link_state(port_priv->evb_priv->mc_io, 0,
19288 + port_priv->evb_priv->mux_handle,
19289 + port_priv->port_index,
19290 + &state);
19291 + if (err) {
19292 + netdev_err(netdev, "ERROR %d getting link state", err);
19293 + goto out;
19294 + }
19295 +
19296 + /* Due to a temporary MC limitation, the DPDMUX port must be down
19297 + * in order to be able to change link settings. Taking steps to let
19298 + * the user know that.
19299 + */
19300 + if (netif_running(netdev)) {
19301 + netdev_info(netdev,
19302 + "Sorry, interface must be brought down first.\n");
19303 + return -EACCES;
19304 + }
19305 +
19306 + cfg.options = state.options;
19307 + cfg.rate = ethtool_cmd_speed(cmd);
19308 + if (cmd->autoneg == AUTONEG_ENABLE)
19309 + cfg.options |= DPDMUX_LINK_OPT_AUTONEG;
19310 + else
19311 + cfg.options &= ~DPDMUX_LINK_OPT_AUTONEG;
19312 + if (cmd->duplex == DUPLEX_HALF)
19313 + cfg.options |= DPDMUX_LINK_OPT_HALF_DUPLEX;
19314 + else
19315 + cfg.options &= ~DPDMUX_LINK_OPT_HALF_DUPLEX;
19316 +
19317 + err = dpdmux_if_set_link_cfg(port_priv->evb_priv->mc_io, 0,
19318 + port_priv->evb_priv->mux_handle,
19319 + port_priv->port_index,
19320 + &cfg);
19321 + if (err)
19322 + /* ethtool will be loud enough if we return an error; no point
19323 + * in putting our own error message on the console by default
19324 + */
19325 + netdev_dbg(netdev, "ERROR %d setting link cfg", err);
19326 +
19327 +out:
19328 + return err;
19329 +}
19330 +
19331 +static struct {
19332 + enum dpdmux_counter_type id;
19333 + char name[ETH_GSTRING_LEN];
19334 +} evb_ethtool_counters[] = {
19335 + {DPDMUX_CNT_ING_FRAME, "rx frames"},
19336 + {DPDMUX_CNT_ING_BYTE, "rx bytes"},
19337 + {DPDMUX_CNT_ING_FLTR_FRAME, "rx filtered frames"},
19338 + {DPDMUX_CNT_ING_FRAME_DISCARD, "rx discarded frames"},
19339 + {DPDMUX_CNT_ING_BCAST_FRAME, "rx b-cast frames"},
19340 + {DPDMUX_CNT_ING_BCAST_BYTES, "rx b-cast bytes"},
19341 + {DPDMUX_CNT_ING_MCAST_FRAME, "rx m-cast frames"},
19342 + {DPDMUX_CNT_ING_MCAST_BYTE, "rx m-cast bytes"},
19343 + {DPDMUX_CNT_EGR_FRAME, "tx frames"},
19344 + {DPDMUX_CNT_EGR_BYTE, "tx bytes"},
19345 + {DPDMUX_CNT_EGR_FRAME_DISCARD, "tx discarded frames"},
19346 +};
19347 +
19348 +static int evb_ethtool_get_sset_count(struct net_device *dev, int sset)
19349 +{
19350 + switch (sset) {
19351 + case ETH_SS_STATS:
19352 + return ARRAY_SIZE(evb_ethtool_counters);
19353 + default:
19354 + return -EOPNOTSUPP;
19355 + }
19356 +}
19357 +
19358 +static void evb_ethtool_get_strings(struct net_device *netdev,
19359 + u32 stringset, u8 *data)
19360 +{
19361 + u32 i;
19362 +
19363 + switch (stringset) {
19364 + case ETH_SS_STATS:
19365 + for (i = 0; i < ARRAY_SIZE(evb_ethtool_counters); i++)
19366 + memcpy(data + i * ETH_GSTRING_LEN,
19367 + evb_ethtool_counters[i].name, ETH_GSTRING_LEN);
19368 + break;
19369 + }
19370 +}
19371 +
19372 +static void evb_ethtool_get_stats(struct net_device *netdev,
19373 + struct ethtool_stats *stats,
19374 + u64 *data)
19375 +{
19376 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19377 + u32 i;
19378 + int err;
19379 +
19380 + for (i = 0; i < ARRAY_SIZE(evb_ethtool_counters); i++) {
19381 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19382 + 0,
19383 + port_priv->evb_priv->mux_handle,
19384 + port_priv->port_index,
19385 + evb_ethtool_counters[i].id,
19386 + &data[i]);
19387 + if (err)
19388 + netdev_err(netdev, "dpdmux_if_get_counter[%s] err %d\n",
19389 + evb_ethtool_counters[i].name, err);
19390 + }
19391 +}
19392 +
19393 +static const struct ethtool_ops evb_port_ethtool_ops = {
19394 + .get_drvinfo = &evb_get_drvinfo,
19395 + .get_link = &ethtool_op_get_link,
19396 + .get_settings = &evb_get_settings,
19397 + .set_settings = &evb_set_settings,
19398 + .get_strings = &evb_ethtool_get_strings,
19399 + .get_ethtool_stats = &evb_ethtool_get_stats,
19400 + .get_sset_count = &evb_ethtool_get_sset_count,
19401 +};
19402 +
19403 +static int evb_open(struct net_device *netdev)
19404 +{
19405 + struct evb_priv *priv = netdev_priv(netdev);
19406 + int err = 0;
19407 +
19408 + err = dpdmux_enable(priv->mc_io, 0, priv->mux_handle);
19409 + if (unlikely(err))
19410 + netdev_err(netdev, "dpdmux_enable err %d\n", err);
19411 +
19412 + return err;
19413 +}
19414 +
19415 +static int evb_close(struct net_device *netdev)
19416 +{
19417 + struct evb_priv *priv = netdev_priv(netdev);
19418 + int err = 0;
19419 +
19420 + err = dpdmux_disable(priv->mc_io, 0, priv->mux_handle);
19421 + if (unlikely(err))
19422 + netdev_err(netdev, "dpdmux_disable err %d\n", err);
19423 +
19424 + return err;
19425 +}
19426 +
19427 +static const struct net_device_ops evb_ops = {
19428 + .ndo_start_xmit = &evb_dropframe,
19429 + .ndo_open = &evb_open,
19430 + .ndo_stop = &evb_close,
19431 +
19432 + .ndo_bridge_setlink = &evb_setlink,
19433 + .ndo_bridge_getlink = &evb_getlink,
19434 + .ndo_bridge_dellink = &evb_dellink,
19435 +
19436 + .ndo_get_stats64 = &evb_port_get_stats,
19437 + .ndo_change_mtu = &evb_change_mtu,
19438 +};
19439 +
19440 +static int evb_takedown(struct fsl_mc_device *evb_dev)
19441 +{
19442 + struct device *dev = &evb_dev->dev;
19443 + struct net_device *netdev = dev_get_drvdata(dev);
19444 + struct evb_priv *priv = netdev_priv(netdev);
19445 + int err;
19446 +
19447 + err = dpdmux_close(priv->mc_io, 0, priv->mux_handle);
19448 + if (unlikely(err))
19449 + dev_warn(dev, "dpdmux_close err %d\n", err);
19450 +
19451 + return 0;
19452 +}
19453 +
19454 +static int evb_init(struct fsl_mc_device *evb_dev)
19455 +{
19456 + struct device *dev = &evb_dev->dev;
19457 + struct net_device *netdev = dev_get_drvdata(dev);
19458 + struct evb_priv *priv = netdev_priv(netdev);
19459 + u16 version_major;
19460 + u16 version_minor;
19461 + int err = 0;
19462 +
19463 + priv->dev_id = evb_dev->obj_desc.id;
19464 +
19465 + err = dpdmux_open(priv->mc_io, 0, priv->dev_id, &priv->mux_handle);
19466 + if (unlikely(err)) {
19467 + dev_err(dev, "dpdmux_open err %d\n", err);
19468 + goto err_exit;
19469 + }
19470 + if (!priv->mux_handle) {
19471 + dev_err(dev, "dpdmux_open returned null handle but no error\n");
19472 + err = -EFAULT;
19473 + goto err_exit;
19474 + }
19475 +
19476 + err = dpdmux_get_attributes(priv->mc_io, 0, priv->mux_handle,
19477 + &priv->attr);
19478 + if (unlikely(err)) {
19479 + dev_err(dev, "dpdmux_get_attributes err %d\n", err);
19480 + goto err_close;
19481 + }
19482 +
19483 + err = dpdmux_get_api_version(priv->mc_io, 0,
19484 + &version_major,
19485 + &version_minor);
19486 + if (unlikely(err)) {
19487 + dev_err(dev, "dpdmux_get_api_version err %d\n", err);
19488 + goto err_close;
19489 + }
19490 +
19491 + /* Minimum supported DPDMUX version check */
19492 + if (version_major < DPDMUX_MIN_VER_MAJOR ||
19493 + (version_major == DPDMUX_MIN_VER_MAJOR &&
19494 + version_minor < DPDMUX_MIN_VER_MINOR)) {
19495 + dev_err(dev, "DPDMUX version %d.%d not supported. Use %d.%d or greater.\n",
19496 + version_major, version_minor,
19497 + DPDMUX_MIN_VER_MAJOR, DPDMUX_MIN_VER_MAJOR);
19498 + err = -ENOTSUPP;
19499 + goto err_close;
19500 + }
19501 +
19502 + err = dpdmux_reset(priv->mc_io, 0, priv->mux_handle);
19503 + if (unlikely(err)) {
19504 + dev_err(dev, "dpdmux_reset err %d\n", err);
19505 + goto err_close;
19506 + }
19507 +
19508 + return 0;
19509 +
19510 +err_close:
19511 + dpdmux_close(priv->mc_io, 0, priv->mux_handle);
19512 +err_exit:
19513 + return err;
19514 +}
19515 +
19516 +static int evb_remove(struct fsl_mc_device *evb_dev)
19517 +{
19518 + struct device *dev = &evb_dev->dev;
19519 + struct net_device *netdev = dev_get_drvdata(dev);
19520 + struct evb_priv *priv = netdev_priv(netdev);
19521 + struct evb_port_priv *port_priv;
19522 + struct list_head *pos;
19523 +
19524 + list_for_each(pos, &priv->port_list) {
19525 + port_priv = list_entry(pos, struct evb_port_priv, list);
19526 +
19527 + rtnl_lock();
19528 + netdev_upper_dev_unlink(port_priv->netdev, netdev);
19529 + rtnl_unlock();
19530 +
19531 + unregister_netdev(port_priv->netdev);
19532 + free_netdev(port_priv->netdev);
19533 + }
19534 +
19535 + evb_teardown_irqs(evb_dev);
19536 +
19537 + unregister_netdev(netdev);
19538 +
19539 + evb_takedown(evb_dev);
19540 + fsl_mc_portal_free(priv->mc_io);
19541 +
19542 + dev_set_drvdata(dev, NULL);
19543 + free_netdev(netdev);
19544 +
19545 + return 0;
19546 +}
19547 +
19548 +static int evb_probe(struct fsl_mc_device *evb_dev)
19549 +{
19550 + struct device *dev;
19551 + struct evb_priv *priv = NULL;
19552 + struct net_device *netdev = NULL;
19553 + char port_name[IFNAMSIZ];
19554 + int i;
19555 + int err = 0;
19556 +
19557 + dev = &evb_dev->dev;
19558 +
19559 + /* register switch device, it's for management only - no I/O */
19560 + netdev = alloc_etherdev(sizeof(*priv));
19561 + if (!netdev) {
19562 + dev_err(dev, "alloc_etherdev error\n");
19563 + return -ENOMEM;
19564 + }
19565 + netdev->netdev_ops = &evb_ops;
19566 +
19567 + dev_set_drvdata(dev, netdev);
19568 +
19569 + priv = netdev_priv(netdev);
19570 +
19571 + err = fsl_mc_portal_allocate(evb_dev, 0, &priv->mc_io);
19572 + if (unlikely(err)) {
19573 + dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
19574 + goto err_free_netdev;
19575 + }
19576 + if (!priv->mc_io) {
19577 + dev_err(dev, "fsl_mc_portal_allocate returned null handle but no error\n");
19578 + err = -EFAULT;
19579 + goto err_free_netdev;
19580 + }
19581 +
19582 + err = evb_init(evb_dev);
19583 + if (unlikely(err)) {
19584 + dev_err(dev, "evb init err %d\n", err);
19585 + goto err_free_cmdport;
19586 + }
19587 +
19588 + INIT_LIST_HEAD(&priv->port_list);
19589 + netdev->flags |= IFF_PROMISC | IFF_MASTER;
19590 +
19591 + dev_alloc_name(netdev, "evb%d");
19592 +
19593 + /* register switch ports */
19594 + snprintf(port_name, IFNAMSIZ, "%sp%%d", netdev->name);
19595 +
19596 + /* only register downlinks? */
19597 + for (i = 0; i < priv->attr.num_ifs + 1; i++) {
19598 + struct net_device *port_netdev;
19599 + struct evb_port_priv *port_priv;
19600 +
19601 + if (i) {
19602 + port_netdev =
19603 + alloc_etherdev(sizeof(struct evb_port_priv));
19604 + if (!port_netdev) {
19605 + dev_err(dev, "alloc_etherdev error\n");
19606 + goto err_takedown;
19607 + }
19608 +
19609 + port_priv = netdev_priv(port_netdev);
19610 +
19611 + port_netdev->flags |= IFF_PROMISC | IFF_SLAVE;
19612 +
19613 + dev_alloc_name(port_netdev, port_name);
19614 + } else {
19615 + port_netdev = netdev;
19616 + port_priv = &priv->uplink;
19617 + }
19618 +
19619 + port_priv->netdev = port_netdev;
19620 + port_priv->evb_priv = priv;
19621 + port_priv->port_index = i;
19622 +
19623 + SET_NETDEV_DEV(port_netdev, dev);
19624 +
19625 + if (i) {
19626 + port_netdev->netdev_ops = &evb_port_ops;
19627 +
19628 + err = register_netdev(port_netdev);
19629 + if (err < 0) {
19630 + dev_err(dev, "register_netdev err %d\n", err);
19631 + free_netdev(port_netdev);
19632 + goto err_takedown;
19633 + }
19634 +
19635 + rtnl_lock();
19636 + err = netdev_master_upper_dev_link(port_netdev, netdev,
19637 + NULL, NULL);
19638 + if (unlikely(err)) {
19639 + dev_err(dev, "netdev_master_upper_dev_link err %d\n",
19640 + err);
19641 + unregister_netdev(port_netdev);
19642 + free_netdev(port_netdev);
19643 + rtnl_unlock();
19644 + goto err_takedown;
19645 + }
19646 + rtmsg_ifinfo(RTM_NEWLINK, port_netdev,
19647 + IFF_SLAVE, GFP_KERNEL);
19648 + rtnl_unlock();
19649 +
19650 + list_add(&port_priv->list, &priv->port_list);
19651 + } else {
19652 + err = register_netdev(netdev);
19653 +
19654 + if (err < 0) {
19655 + dev_err(dev, "register_netdev error %d\n", err);
19656 + goto err_takedown;
19657 + }
19658 + }
19659 +
19660 + port_netdev->ethtool_ops = &evb_port_ethtool_ops;
19661 +
19662 + /* ports are up from init */
19663 + rtnl_lock();
19664 + err = dev_open(port_netdev);
19665 + rtnl_unlock();
19666 + if (unlikely(err))
19667 + dev_warn(dev, "dev_open err %d\n", err);
19668 + }
19669 +
19670 + /* setup irqs */
19671 + err = evb_setup_irqs(evb_dev);
19672 + if (unlikely(err)) {
19673 + dev_warn(dev, "evb_setup_irqs err %d\n", err);
19674 + goto err_takedown;
19675 + }
19676 +
19677 + dev_info(dev, "probed evb device with %d ports\n",
19678 + priv->attr.num_ifs);
19679 + return 0;
19680 +
19681 +err_takedown:
19682 + evb_remove(evb_dev);
19683 +err_free_cmdport:
19684 + fsl_mc_portal_free(priv->mc_io);
19685 +err_free_netdev:
19686 + return err;
19687 +}
19688 +
19689 +static const struct fsl_mc_device_id evb_match_id_table[] = {
19690 + {
19691 + .vendor = FSL_MC_VENDOR_FREESCALE,
19692 + .obj_type = "dpdmux",
19693 + },
19694 + {}
19695 +};
19696 +
19697 +static struct fsl_mc_driver evb_drv = {
19698 + .driver = {
19699 + .name = KBUILD_MODNAME,
19700 + .owner = THIS_MODULE,
19701 + },
19702 + .probe = evb_probe,
19703 + .remove = evb_remove,
19704 + .match_id_table = evb_match_id_table,
19705 +};
19706 +
19707 +module_fsl_mc_driver(evb_drv);
19708 +
19709 +MODULE_LICENSE("GPL");
19710 +MODULE_DESCRIPTION("Layerscape DPAA Edge Virtual Bridge driver (prototype)");
19711 --- /dev/null
19712 +++ b/drivers/staging/fsl-dpaa2/mac/Kconfig
19713 @@ -0,0 +1,23 @@
19714 +config FSL_DPAA2_MAC
19715 + tristate "DPAA2 MAC / PHY interface"
19716 + depends on FSL_MC_BUS && FSL_DPAA2
19717 + select MDIO_BUS_MUX_MMIOREG
19718 + select FSL_XGMAC_MDIO
19719 + select FIXED_PHY
19720 + ---help---
19721 + Prototype driver for DPAA2 MAC / PHY interface object.
19722 + This driver works as a proxy between phylib including phy drivers and
19723 + the MC firmware. It receives updates on link state changes from PHY
19724 + lib and forwards them to MC and receives interrupt from MC whenever
19725 + a request is made to change the link state.
19726 +
19727 +
19728 +config FSL_DPAA2_MAC_NETDEVS
19729 + bool "Expose net interfaces for PHYs"
19730 + default n
19731 + depends on FSL_DPAA2_MAC
19732 + ---help---
19733 + Exposes macX net interfaces which allow direct control over MACs and
19734 + PHYs.
19735 + .
19736 + Leave disabled if unsure.
19737 --- /dev/null
19738 +++ b/drivers/staging/fsl-dpaa2/mac/Makefile
19739 @@ -0,0 +1,10 @@
19740 +
19741 +obj-$(CONFIG_FSL_DPAA2_MAC) += dpaa2-mac.o
19742 +
19743 +dpaa2-mac-objs := mac.o dpmac.o
19744 +
19745 +all:
19746 + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
19747 +
19748 +clean:
19749 + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
19750 --- /dev/null
19751 +++ b/drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h
19752 @@ -0,0 +1,172 @@
19753 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
19754 + *
19755 + * Redistribution and use in source and binary forms, with or without
19756 + * modification, are permitted provided that the following conditions are met:
19757 + * * Redistributions of source code must retain the above copyright
19758 + * notice, this list of conditions and the following disclaimer.
19759 + * * Redistributions in binary form must reproduce the above copyright
19760 + * notice, this list of conditions and the following disclaimer in the
19761 + * documentation and/or other materials provided with the distribution.
19762 + * * Neither the name of the above-listed copyright holders nor the
19763 + * names of any contributors may be used to endorse or promote products
19764 + * derived from this software without specific prior written permission.
19765 + *
19766 + *
19767 + * ALTERNATIVELY, this software may be distributed under the terms of the
19768 + * GNU General Public License ("GPL") as published by the Free Software
19769 + * Foundation, either version 2 of that License or (at your option) any
19770 + * later version.
19771 + *
19772 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19773 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19774 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19775 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
19776 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
19777 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19778 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
19779 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
19780 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
19781 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
19782 + * POSSIBILITY OF SUCH DAMAGE.
19783 + */
19784 +#ifndef _FSL_DPMAC_CMD_H
19785 +#define _FSL_DPMAC_CMD_H
19786 +
19787 +/* DPMAC Version */
19788 +#define DPMAC_VER_MAJOR 4
19789 +#define DPMAC_VER_MINOR 2
19790 +#define DPMAC_CMD_BASE_VERSION 1
19791 +#define DPMAC_CMD_ID_OFFSET 4
19792 +
19793 +#define DPMAC_CMD(id) (((id) << DPMAC_CMD_ID_OFFSET) | DPMAC_CMD_BASE_VERSION)
19794 +
19795 +/* Command IDs */
19796 +#define DPMAC_CMDID_CLOSE DPMAC_CMD(0x800)
19797 +#define DPMAC_CMDID_OPEN DPMAC_CMD(0x80c)
19798 +#define DPMAC_CMDID_CREATE DPMAC_CMD(0x90c)
19799 +#define DPMAC_CMDID_DESTROY DPMAC_CMD(0x98c)
19800 +#define DPMAC_CMDID_GET_API_VERSION DPMAC_CMD(0xa0c)
19801 +
19802 +#define DPMAC_CMDID_GET_ATTR DPMAC_CMD(0x004)
19803 +#define DPMAC_CMDID_RESET DPMAC_CMD(0x005)
19804 +
19805 +#define DPMAC_CMDID_SET_IRQ_ENABLE DPMAC_CMD(0x012)
19806 +#define DPMAC_CMDID_GET_IRQ_ENABLE DPMAC_CMD(0x013)
19807 +#define DPMAC_CMDID_SET_IRQ_MASK DPMAC_CMD(0x014)
19808 +#define DPMAC_CMDID_GET_IRQ_MASK DPMAC_CMD(0x015)
19809 +#define DPMAC_CMDID_GET_IRQ_STATUS DPMAC_CMD(0x016)
19810 +#define DPMAC_CMDID_CLEAR_IRQ_STATUS DPMAC_CMD(0x017)
19811 +
19812 +#define DPMAC_CMDID_GET_LINK_CFG DPMAC_CMD(0x0c2)
19813 +#define DPMAC_CMDID_SET_LINK_STATE DPMAC_CMD(0x0c3)
19814 +#define DPMAC_CMDID_GET_COUNTER DPMAC_CMD(0x0c4)
19815 +
19816 +#define DPMAC_CMDID_SET_PORT_MAC_ADDR DPMAC_CMD(0x0c5)
19817 +
19818 +/* Macros for accessing command fields smaller than 1byte */
19819 +#define DPMAC_MASK(field) \
19820 + GENMASK(DPMAC_##field##_SHIFT + DPMAC_##field##_SIZE - 1, \
19821 + DPMAC_##field##_SHIFT)
19822 +#define dpmac_set_field(var, field, val) \
19823 + ((var) |= (((val) << DPMAC_##field##_SHIFT) & DPMAC_MASK(field)))
19824 +#define dpmac_get_field(var, field) \
19825 + (((var) & DPMAC_MASK(field)) >> DPMAC_##field##_SHIFT)
19826 +
19827 +struct dpmac_cmd_open {
19828 + u32 dpmac_id;
19829 +};
19830 +
19831 +struct dpmac_cmd_create {
19832 + u32 mac_id;
19833 +};
19834 +
19835 +struct dpmac_cmd_destroy {
19836 + u32 dpmac_id;
19837 +};
19838 +
19839 +struct dpmac_cmd_set_irq_enable {
19840 + u8 enable;
19841 + u8 pad[3];
19842 + u8 irq_index;
19843 +};
19844 +
19845 +struct dpmac_cmd_get_irq_enable {
19846 + u32 pad;
19847 + u8 irq_index;
19848 +};
19849 +
19850 +struct dpmac_rsp_get_irq_enable {
19851 + u8 enabled;
19852 +};
19853 +
19854 +struct dpmac_cmd_set_irq_mask {
19855 + u32 mask;
19856 + u8 irq_index;
19857 +};
19858 +
19859 +struct dpmac_cmd_get_irq_mask {
19860 + u32 pad;
19861 + u8 irq_index;
19862 +};
19863 +
19864 +struct dpmac_rsp_get_irq_mask {
19865 + u32 mask;
19866 +};
19867 +
19868 +struct dpmac_cmd_get_irq_status {
19869 + u32 status;
19870 + u8 irq_index;
19871 +};
19872 +
19873 +struct dpmac_rsp_get_irq_status {
19874 + u32 status;
19875 +};
19876 +
19877 +struct dpmac_cmd_clear_irq_status {
19878 + u32 status;
19879 + u8 irq_index;
19880 +};
19881 +
19882 +struct dpmac_rsp_get_attributes {
19883 + u8 eth_if;
19884 + u8 link_type;
19885 + u16 id;
19886 + u32 max_rate;
19887 +};
19888 +
19889 +struct dpmac_rsp_get_link_cfg {
19890 + u64 options;
19891 + u32 rate;
19892 +};
19893 +
19894 +#define DPMAC_STATE_SIZE 1
19895 +#define DPMAC_STATE_SHIFT 0
19896 +
19897 +struct dpmac_cmd_set_link_state {
19898 + u64 options;
19899 + u32 rate;
19900 + u32 pad;
19901 + /* only least significant bit is valid */
19902 + u8 up;
19903 +};
19904 +
19905 +struct dpmac_cmd_get_counter {
19906 + u8 type;
19907 +};
19908 +
19909 +struct dpmac_rsp_get_counter {
19910 + u64 pad;
19911 + u64 counter;
19912 +};
19913 +
19914 +struct dpmac_rsp_get_api_version {
19915 + u16 major;
19916 + u16 minor;
19917 +};
19918 +
19919 +struct dpmac_cmd_set_port_mac_addr {
19920 + u8 pad[2];
19921 + u8 addr[6];
19922 +};
19923 +
19924 +#endif /* _FSL_DPMAC_CMD_H */
19925 --- /dev/null
19926 +++ b/drivers/staging/fsl-dpaa2/mac/dpmac.c
19927 @@ -0,0 +1,620 @@
19928 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
19929 + *
19930 + * Redistribution and use in source and binary forms, with or without
19931 + * modification, are permitted provided that the following conditions are met:
19932 + * * Redistributions of source code must retain the above copyright
19933 + * notice, this list of conditions and the following disclaimer.
19934 + * * Redistributions in binary form must reproduce the above copyright
19935 + * notice, this list of conditions and the following disclaimer in the
19936 + * documentation and/or other materials provided with the distribution.
19937 + * * Neither the name of the above-listed copyright holders nor the
19938 + * names of any contributors may be used to endorse or promote products
19939 + * derived from this software without specific prior written permission.
19940 + *
19941 + *
19942 + * ALTERNATIVELY, this software may be distributed under the terms of the
19943 + * GNU General Public License ("GPL") as published by the Free Software
19944 + * Foundation, either version 2 of that License or (at your option) any
19945 + * later version.
19946 + *
19947 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19948 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19949 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19950 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
19951 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
19952 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19953 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
19954 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
19955 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
19956 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
19957 + * POSSIBILITY OF SUCH DAMAGE.
19958 + */
19959 +#include "../../fsl-mc/include/mc-sys.h"
19960 +#include "../../fsl-mc/include/mc-cmd.h"
19961 +#include "dpmac.h"
19962 +#include "dpmac-cmd.h"
19963 +
19964 +/**
19965 + * dpmac_open() - Open a control session for the specified object.
19966 + * @mc_io: Pointer to MC portal's I/O object
19967 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
19968 + * @dpmac_id: DPMAC unique ID
19969 + * @token: Returned token; use in subsequent API calls
19970 + *
19971 + * This function can be used to open a control session for an
19972 + * already created object; an object may have been declared in
19973 + * the DPL or by calling the dpmac_create function.
19974 + * This function returns a unique authentication token,
19975 + * associated with the specific object ID and the specific MC
19976 + * portal; this token must be used in all subsequent commands for
19977 + * this specific object
19978 + *
19979 + * Return: '0' on Success; Error code otherwise.
19980 + */
19981 +int dpmac_open(struct fsl_mc_io *mc_io,
19982 + u32 cmd_flags,
19983 + int dpmac_id,
19984 + u16 *token)
19985 +{
19986 + struct dpmac_cmd_open *cmd_params;
19987 + struct mc_command cmd = { 0 };
19988 + int err;
19989 +
19990 + /* prepare command */
19991 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_OPEN,
19992 + cmd_flags,
19993 + 0);
19994 + cmd_params = (struct dpmac_cmd_open *)cmd.params;
19995 + cmd_params->dpmac_id = cpu_to_le32(dpmac_id);
19996 +
19997 + /* send command to mc*/
19998 + err = mc_send_command(mc_io, &cmd);
19999 + if (err)
20000 + return err;
20001 +
20002 + /* retrieve response parameters */
20003 + *token = mc_cmd_hdr_read_token(&cmd);
20004 +
20005 + return err;
20006 +}
20007 +
20008 +/**
20009 + * dpmac_close() - Close the control session of the object
20010 + * @mc_io: Pointer to MC portal's I/O object
20011 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20012 + * @token: Token of DPMAC object
20013 + *
20014 + * After this function is called, no further operations are
20015 + * allowed on the object without opening a new control session.
20016 + *
20017 + * Return: '0' on Success; Error code otherwise.
20018 + */
20019 +int dpmac_close(struct fsl_mc_io *mc_io,
20020 + u32 cmd_flags,
20021 + u16 token)
20022 +{
20023 + struct mc_command cmd = { 0 };
20024 +
20025 + /* prepare command */
20026 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLOSE, cmd_flags,
20027 + token);
20028 +
20029 + /* send command to mc*/
20030 + return mc_send_command(mc_io, &cmd);
20031 +}
20032 +
20033 +/**
20034 + * dpmac_create() - Create the DPMAC object.
20035 + * @mc_io: Pointer to MC portal's I/O object
20036 + * @dprc_token: Parent container token; '0' for default container
20037 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20038 + * @cfg: Configuration structure
20039 + * @obj_id: Returned object id
20040 + *
20041 + * Create the DPMAC object, allocate required resources and
20042 + * perform required initialization.
20043 + *
20044 + * The function accepts an authentication token of a parent
20045 + * container that this object should be assigned to. The token
20046 + * can be '0' so the object will be assigned to the default container.
20047 + * The newly created object can be opened with the returned
20048 + * object id and using the container's associated tokens and MC portals.
20049 + *
20050 + * Return: '0' on Success; Error code otherwise.
20051 + */
20052 +int dpmac_create(struct fsl_mc_io *mc_io,
20053 + u16 dprc_token,
20054 + u32 cmd_flags,
20055 + const struct dpmac_cfg *cfg,
20056 + u32 *obj_id)
20057 +{
20058 + struct dpmac_cmd_create *cmd_params;
20059 + struct mc_command cmd = { 0 };
20060 + int err;
20061 +
20062 + /* prepare command */
20063 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CREATE,
20064 + cmd_flags,
20065 + dprc_token);
20066 + cmd_params = (struct dpmac_cmd_create *)cmd.params;
20067 + cmd_params->mac_id = cpu_to_le32(cfg->mac_id);
20068 +
20069 + /* send command to mc*/
20070 + err = mc_send_command(mc_io, &cmd);
20071 + if (err)
20072 + return err;
20073 +
20074 + /* retrieve response parameters */
20075 + *obj_id = mc_cmd_read_object_id(&cmd);
20076 +
20077 + return 0;
20078 +}
20079 +
20080 +/**
20081 + * dpmac_destroy() - Destroy the DPMAC object and release all its resources.
20082 + * @mc_io: Pointer to MC portal's I/O object
20083 + * @dprc_token: Parent container token; '0' for default container
20084 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20085 + * @object_id: The object id; it must be a valid id within the container that
20086 + * created this object;
20087 + *
20088 + * The function accepts the authentication token of the parent container that
20089 + * created the object (not the one that currently owns the object). The object
20090 + * is searched within parent using the provided 'object_id'.
20091 + * All tokens to the object must be closed before calling destroy.
20092 + *
20093 + * Return: '0' on Success; error code otherwise.
20094 + */
20095 +int dpmac_destroy(struct fsl_mc_io *mc_io,
20096 + u16 dprc_token,
20097 + u32 cmd_flags,
20098 + u32 object_id)
20099 +{
20100 + struct dpmac_cmd_destroy *cmd_params;
20101 + struct mc_command cmd = { 0 };
20102 +
20103 + /* prepare command */
20104 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_DESTROY,
20105 + cmd_flags,
20106 + dprc_token);
20107 + cmd_params = (struct dpmac_cmd_destroy *)cmd.params;
20108 + cmd_params->dpmac_id = cpu_to_le32(object_id);
20109 +
20110 + /* send command to mc*/
20111 + return mc_send_command(mc_io, &cmd);
20112 +}
20113 +
20114 +/**
20115 + * dpmac_set_irq_enable() - Set overall interrupt state.
20116 + * @mc_io: Pointer to MC portal's I/O object
20117 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20118 + * @token: Token of DPMAC object
20119 + * @irq_index: The interrupt index to configure
20120 + * @en: Interrupt state - enable = 1, disable = 0
20121 + *
20122 + * Allows GPP software to control when interrupts are generated.
20123 + * Each interrupt can have up to 32 causes. The enable/disable control's the
20124 + * overall interrupt state. if the interrupt is disabled no causes will cause
20125 + * an interrupt.
20126 + *
20127 + * Return: '0' on Success; Error code otherwise.
20128 + */
20129 +int dpmac_set_irq_enable(struct fsl_mc_io *mc_io,
20130 + u32 cmd_flags,
20131 + u16 token,
20132 + u8 irq_index,
20133 + u8 en)
20134 +{
20135 + struct dpmac_cmd_set_irq_enable *cmd_params;
20136 + struct mc_command cmd = { 0 };
20137 +
20138 + /* prepare command */
20139 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_ENABLE,
20140 + cmd_flags,
20141 + token);
20142 + cmd_params = (struct dpmac_cmd_set_irq_enable *)cmd.params;
20143 + cmd_params->irq_index = irq_index;
20144 + cmd_params->enable = en;
20145 +
20146 + /* send command to mc*/
20147 + return mc_send_command(mc_io, &cmd);
20148 +}
20149 +
20150 +/**
20151 + * dpmac_get_irq_enable() - Get overall interrupt state
20152 + * @mc_io: Pointer to MC portal's I/O object
20153 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20154 + * @token: Token of DPMAC object
20155 + * @irq_index: The interrupt index to configure
20156 + * @en: Returned interrupt state - enable = 1, disable = 0
20157 + *
20158 + * Return: '0' on Success; Error code otherwise.
20159 + */
20160 +int dpmac_get_irq_enable(struct fsl_mc_io *mc_io,
20161 + u32 cmd_flags,
20162 + u16 token,
20163 + u8 irq_index,
20164 + u8 *en)
20165 +{
20166 + struct dpmac_cmd_get_irq_enable *cmd_params;
20167 + struct dpmac_rsp_get_irq_enable *rsp_params;
20168 + struct mc_command cmd = { 0 };
20169 + int err;
20170 +
20171 + /* prepare command */
20172 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_ENABLE,
20173 + cmd_flags,
20174 + token);
20175 + cmd_params = (struct dpmac_cmd_get_irq_enable *)cmd.params;
20176 + cmd_params->irq_index = irq_index;
20177 +
20178 + /* send command to mc*/
20179 + err = mc_send_command(mc_io, &cmd);
20180 + if (err)
20181 + return err;
20182 +
20183 + /* retrieve response parameters */
20184 + rsp_params = (struct dpmac_rsp_get_irq_enable *)cmd.params;
20185 + *en = rsp_params->enabled;
20186 +
20187 + return 0;
20188 +}
20189 +
20190 +/**
20191 + * dpmac_set_irq_mask() - Set interrupt mask.
20192 + * @mc_io: Pointer to MC portal's I/O object
20193 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20194 + * @token: Token of DPMAC object
20195 + * @irq_index: The interrupt index to configure
20196 + * @mask: Event mask to trigger interrupt;
20197 + * each bit:
20198 + * 0 = ignore event
20199 + * 1 = consider event for asserting IRQ
20200 + *
20201 + * Every interrupt can have up to 32 causes and the interrupt model supports
20202 + * masking/unmasking each cause independently
20203 + *
20204 + * Return: '0' on Success; Error code otherwise.
20205 + */
20206 +int dpmac_set_irq_mask(struct fsl_mc_io *mc_io,
20207 + u32 cmd_flags,
20208 + u16 token,
20209 + u8 irq_index,
20210 + u32 mask)
20211 +{
20212 + struct dpmac_cmd_set_irq_mask *cmd_params;
20213 + struct mc_command cmd = { 0 };
20214 +
20215 + /* prepare command */
20216 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_MASK,
20217 + cmd_flags,
20218 + token);
20219 + cmd_params = (struct dpmac_cmd_set_irq_mask *)cmd.params;
20220 + cmd_params->mask = cpu_to_le32(mask);
20221 + cmd_params->irq_index = irq_index;
20222 +
20223 + /* send command to mc*/
20224 + return mc_send_command(mc_io, &cmd);
20225 +}
20226 +
20227 +/**
20228 + * dpmac_get_irq_mask() - Get interrupt mask.
20229 + * @mc_io: Pointer to MC portal's I/O object
20230 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20231 + * @token: Token of DPMAC object
20232 + * @irq_index: The interrupt index to configure
20233 + * @mask: Returned event mask to trigger interrupt
20234 + *
20235 + * Every interrupt can have up to 32 causes and the interrupt model supports
20236 + * masking/unmasking each cause independently
20237 + *
20238 + * Return: '0' on Success; Error code otherwise.
20239 + */
20240 +int dpmac_get_irq_mask(struct fsl_mc_io *mc_io,
20241 + u32 cmd_flags,
20242 + u16 token,
20243 + u8 irq_index,
20244 + u32 *mask)
20245 +{
20246 + struct dpmac_cmd_get_irq_mask *cmd_params;
20247 + struct dpmac_rsp_get_irq_mask *rsp_params;
20248 + struct mc_command cmd = { 0 };
20249 + int err;
20250 +
20251 + /* prepare command */
20252 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_MASK,
20253 + cmd_flags,
20254 + token);
20255 + cmd_params = (struct dpmac_cmd_get_irq_mask *)cmd.params;
20256 + cmd_params->irq_index = irq_index;
20257 +
20258 + /* send command to mc*/
20259 + err = mc_send_command(mc_io, &cmd);
20260 + if (err)
20261 + return err;
20262 +
20263 + /* retrieve response parameters */
20264 + rsp_params = (struct dpmac_rsp_get_irq_mask *)cmd.params;
20265 + *mask = le32_to_cpu(rsp_params->mask);
20266 +
20267 + return 0;
20268 +}
20269 +
20270 +/**
20271 + * dpmac_get_irq_status() - Get the current status of any pending interrupts.
20272 + *
20273 + * @mc_io: Pointer to MC portal's I/O object
20274 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20275 + * @token: Token of DPMAC object
20276 + * @irq_index: The interrupt index to configure
20277 + * @status: Returned interrupts status - one bit per cause:
20278 + * 0 = no interrupt pending
20279 + * 1 = interrupt pending
20280 + *
20281 + * Return: '0' on Success; Error code otherwise.
20282 + */
20283 +int dpmac_get_irq_status(struct fsl_mc_io *mc_io,
20284 + u32 cmd_flags,
20285 + u16 token,
20286 + u8 irq_index,
20287 + u32 *status)
20288 +{
20289 + struct dpmac_cmd_get_irq_status *cmd_params;
20290 + struct dpmac_rsp_get_irq_status *rsp_params;
20291 + struct mc_command cmd = { 0 };
20292 + int err;
20293 +
20294 + /* prepare command */
20295 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_STATUS,
20296 + cmd_flags,
20297 + token);
20298 + cmd_params = (struct dpmac_cmd_get_irq_status *)cmd.params;
20299 + cmd_params->status = cpu_to_le32(*status);
20300 + cmd_params->irq_index = irq_index;
20301 +
20302 + /* send command to mc*/
20303 + err = mc_send_command(mc_io, &cmd);
20304 + if (err)
20305 + return err;
20306 +
20307 + /* retrieve response parameters */
20308 + rsp_params = (struct dpmac_rsp_get_irq_status *)cmd.params;
20309 + *status = le32_to_cpu(rsp_params->status);
20310 +
20311 + return 0;
20312 +}
20313 +
20314 +/**
20315 + * dpmac_clear_irq_status() - Clear a pending interrupt's status
20316 + *
20317 + * @mc_io: Pointer to MC portal's I/O object
20318 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20319 + * @token: Token of DPMAC object
20320 + * @irq_index: The interrupt index to configure
20321 + * @status: Bits to clear (W1C) - one bit per cause:
20322 + * 0 = don't change
20323 + * 1 = clear status bit
20324 + *
20325 + * Return: '0' on Success; Error code otherwise.
20326 + */
20327 +int dpmac_clear_irq_status(struct fsl_mc_io *mc_io,
20328 + u32 cmd_flags,
20329 + u16 token,
20330 + u8 irq_index,
20331 + u32 status)
20332 +{
20333 + struct dpmac_cmd_clear_irq_status *cmd_params;
20334 + struct mc_command cmd = { 0 };
20335 +
20336 + /* prepare command */
20337 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLEAR_IRQ_STATUS,
20338 + cmd_flags,
20339 + token);
20340 + cmd_params = (struct dpmac_cmd_clear_irq_status *)cmd.params;
20341 + cmd_params->status = cpu_to_le32(status);
20342 + cmd_params->irq_index = irq_index;
20343 +
20344 + /* send command to mc*/
20345 + return mc_send_command(mc_io, &cmd);
20346 +}
20347 +
20348 +/**
20349 + * dpmac_get_attributes - Retrieve DPMAC attributes.
20350 + *
20351 + * @mc_io: Pointer to MC portal's I/O object
20352 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20353 + * @token: Token of DPMAC object
20354 + * @attr: Returned object's attributes
20355 + *
20356 + * Return: '0' on Success; Error code otherwise.
20357 + */
20358 +int dpmac_get_attributes(struct fsl_mc_io *mc_io,
20359 + u32 cmd_flags,
20360 + u16 token,
20361 + struct dpmac_attr *attr)
20362 +{
20363 + struct dpmac_rsp_get_attributes *rsp_params;
20364 + struct mc_command cmd = { 0 };
20365 + int err;
20366 +
20367 + /* prepare command */
20368 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_ATTR,
20369 + cmd_flags,
20370 + token);
20371 +
20372 + /* send command to mc*/
20373 + err = mc_send_command(mc_io, &cmd);
20374 + if (err)
20375 + return err;
20376 +
20377 + /* retrieve response parameters */
20378 + rsp_params = (struct dpmac_rsp_get_attributes *)cmd.params;
20379 + attr->eth_if = rsp_params->eth_if;
20380 + attr->link_type = rsp_params->link_type;
20381 + attr->id = le16_to_cpu(rsp_params->id);
20382 + attr->max_rate = le32_to_cpu(rsp_params->max_rate);
20383 +
20384 + return 0;
20385 +}
20386 +
20387 +/**
20388 + * dpmac_get_link_cfg() - Get Ethernet link configuration
20389 + * @mc_io: Pointer to opaque I/O object
20390 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20391 + * @token: Token of DPMAC object
20392 + * @cfg: Returned structure with the link configuration
20393 + *
20394 + * Return: '0' on Success; Error code otherwise.
20395 + */
20396 +int dpmac_get_link_cfg(struct fsl_mc_io *mc_io,
20397 + u32 cmd_flags,
20398 + u16 token,
20399 + struct dpmac_link_cfg *cfg)
20400 +{
20401 + struct dpmac_rsp_get_link_cfg *rsp_params;
20402 + struct mc_command cmd = { 0 };
20403 + int err = 0;
20404 +
20405 + /* prepare command */
20406 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_LINK_CFG,
20407 + cmd_flags,
20408 + token);
20409 +
20410 + /* send command to mc*/
20411 + err = mc_send_command(mc_io, &cmd);
20412 + if (err)
20413 + return err;
20414 +
20415 + rsp_params = (struct dpmac_rsp_get_link_cfg *)cmd.params;
20416 + cfg->options = le64_to_cpu(rsp_params->options);
20417 + cfg->rate = le32_to_cpu(rsp_params->rate);
20418 +
20419 + return 0;
20420 +}
20421 +
20422 +/**
20423 + * dpmac_set_link_state() - Set the Ethernet link status
20424 + * @mc_io: Pointer to opaque I/O object
20425 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20426 + * @token: Token of DPMAC object
20427 + * @link_state: Link state configuration
20428 + *
20429 + * Return: '0' on Success; Error code otherwise.
20430 + */
20431 +int dpmac_set_link_state(struct fsl_mc_io *mc_io,
20432 + u32 cmd_flags,
20433 + u16 token,
20434 + struct dpmac_link_state *link_state)
20435 +{
20436 + struct dpmac_cmd_set_link_state *cmd_params;
20437 + struct mc_command cmd = { 0 };
20438 +
20439 + /* prepare command */
20440 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_LINK_STATE,
20441 + cmd_flags,
20442 + token);
20443 + cmd_params = (struct dpmac_cmd_set_link_state *)cmd.params;
20444 + cmd_params->options = cpu_to_le64(link_state->options);
20445 + cmd_params->rate = cpu_to_le32(link_state->rate);
20446 + cmd_params->up = dpmac_get_field(link_state->up, STATE);
20447 +
20448 + /* send command to mc*/
20449 + return mc_send_command(mc_io, &cmd);
20450 +}
20451 +
20452 +/**
20453 + * dpmac_get_counter() - Read a specific DPMAC counter
20454 + * @mc_io: Pointer to opaque I/O object
20455 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20456 + * @token: Token of DPMAC object
20457 + * @type: The requested counter
20458 + * @counter: Returned counter value
20459 + *
20460 + * Return: The requested counter; '0' otherwise.
20461 + */
20462 +int dpmac_get_counter(struct fsl_mc_io *mc_io,
20463 + u32 cmd_flags,
20464 + u16 token,
20465 + enum dpmac_counter type,
20466 + u64 *counter)
20467 +{
20468 + struct dpmac_cmd_get_counter *dpmac_cmd;
20469 + struct dpmac_rsp_get_counter *dpmac_rsp;
20470 + struct mc_command cmd = { 0 };
20471 + int err = 0;
20472 +
20473 + /* prepare command */
20474 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_COUNTER,
20475 + cmd_flags,
20476 + token);
20477 + dpmac_cmd = (struct dpmac_cmd_get_counter *)cmd.params;
20478 + dpmac_cmd->type = type;
20479 +
20480 + /* send command to mc*/
20481 + err = mc_send_command(mc_io, &cmd);
20482 + if (err)
20483 + return err;
20484 +
20485 + dpmac_rsp = (struct dpmac_rsp_get_counter *)cmd.params;
20486 + *counter = le64_to_cpu(dpmac_rsp->counter);
20487 +
20488 + return 0;
20489 +}
20490 +
20491 +/* untested */
20492 +int dpmac_set_port_mac_addr(struct fsl_mc_io *mc_io,
20493 + u32 cmd_flags,
20494 + u16 token,
20495 + const u8 addr[6])
20496 +{
20497 + struct dpmac_cmd_set_port_mac_addr *dpmac_cmd;
20498 + struct mc_command cmd = { 0 };
20499 +
20500 + /* prepare command */
20501 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_PORT_MAC_ADDR,
20502 + cmd_flags,
20503 + token);
20504 + dpmac_cmd = (struct dpmac_cmd_set_port_mac_addr *)cmd.params;
20505 + dpmac_cmd->addr[0] = addr[5];
20506 + dpmac_cmd->addr[1] = addr[4];
20507 + dpmac_cmd->addr[2] = addr[3];
20508 + dpmac_cmd->addr[3] = addr[2];
20509 + dpmac_cmd->addr[4] = addr[1];
20510 + dpmac_cmd->addr[5] = addr[0];
20511 +
20512 + /* send command to mc*/
20513 + return mc_send_command(mc_io, &cmd);
20514 +}
20515 +
20516 +/**
20517 + * dpmac_get_api_version() - Get Data Path MAC version
20518 + * @mc_io: Pointer to MC portal's I/O object
20519 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20520 + * @major_ver: Major version of data path mac API
20521 + * @minor_ver: Minor version of data path mac API
20522 + *
20523 + * Return: '0' on Success; Error code otherwise.
20524 + */
20525 +int dpmac_get_api_version(struct fsl_mc_io *mc_io,
20526 + u32 cmd_flags,
20527 + u16 *major_ver,
20528 + u16 *minor_ver)
20529 +{
20530 + struct dpmac_rsp_get_api_version *rsp_params;
20531 + struct mc_command cmd = { 0 };
20532 + int err;
20533 +
20534 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_API_VERSION,
20535 + cmd_flags,
20536 + 0);
20537 +
20538 + err = mc_send_command(mc_io, &cmd);
20539 + if (err)
20540 + return err;
20541 +
20542 + rsp_params = (struct dpmac_rsp_get_api_version *)cmd.params;
20543 + *major_ver = le16_to_cpu(rsp_params->major);
20544 + *minor_ver = le16_to_cpu(rsp_params->minor);
20545 +
20546 + return 0;
20547 +}
20548 --- /dev/null
20549 +++ b/drivers/staging/fsl-dpaa2/mac/dpmac.h
20550 @@ -0,0 +1,342 @@
20551 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
20552 + *
20553 + * Redistribution and use in source and binary forms, with or without
20554 + * modification, are permitted provided that the following conditions are met:
20555 + * * Redistributions of source code must retain the above copyright
20556 + * notice, this list of conditions and the following disclaimer.
20557 + * * Redistributions in binary form must reproduce the above copyright
20558 + * notice, this list of conditions and the following disclaimer in the
20559 + * documentation and/or other materials provided with the distribution.
20560 + * * Neither the name of the above-listed copyright holders nor the
20561 + * names of any contributors may be used to endorse or promote products
20562 + * derived from this software without specific prior written permission.
20563 + *
20564 + *
20565 + * ALTERNATIVELY, this software may be distributed under the terms of the
20566 + * GNU General Public License ("GPL") as published by the Free Software
20567 + * Foundation, either version 2 of that License or (at your option) any
20568 + * later version.
20569 + *
20570 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20571 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20572 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20573 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
20574 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20575 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
20576 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20577 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
20578 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
20579 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
20580 + * POSSIBILITY OF SUCH DAMAGE.
20581 + */
20582 +#ifndef __FSL_DPMAC_H
20583 +#define __FSL_DPMAC_H
20584 +
20585 +/* Data Path MAC API
20586 + * Contains initialization APIs and runtime control APIs for DPMAC
20587 + */
20588 +
20589 +struct fsl_mc_io;
20590 +
20591 +int dpmac_open(struct fsl_mc_io *mc_io,
20592 + u32 cmd_flags,
20593 + int dpmac_id,
20594 + u16 *token);
20595 +
20596 +int dpmac_close(struct fsl_mc_io *mc_io,
20597 + u32 cmd_flags,
20598 + u16 token);
20599 +
20600 +/**
20601 + * enum dpmac_link_type - DPMAC link type
20602 + * @DPMAC_LINK_TYPE_NONE: No link
20603 + * @DPMAC_LINK_TYPE_FIXED: Link is fixed type
20604 + * @DPMAC_LINK_TYPE_PHY: Link by PHY ID
20605 + * @DPMAC_LINK_TYPE_BACKPLANE: Backplane link type
20606 + */
20607 +enum dpmac_link_type {
20608 + DPMAC_LINK_TYPE_NONE,
20609 + DPMAC_LINK_TYPE_FIXED,
20610 + DPMAC_LINK_TYPE_PHY,
20611 + DPMAC_LINK_TYPE_BACKPLANE
20612 +};
20613 +
20614 +/**
20615 + * enum dpmac_eth_if - DPMAC Ethrnet interface
20616 + * @DPMAC_ETH_IF_MII: MII interface
20617 + * @DPMAC_ETH_IF_RMII: RMII interface
20618 + * @DPMAC_ETH_IF_SMII: SMII interface
20619 + * @DPMAC_ETH_IF_GMII: GMII interface
20620 + * @DPMAC_ETH_IF_RGMII: RGMII interface
20621 + * @DPMAC_ETH_IF_SGMII: SGMII interface
20622 + * @DPMAC_ETH_IF_QSGMII: QSGMII interface
20623 + * @DPMAC_ETH_IF_XAUI: XAUI interface
20624 + * @DPMAC_ETH_IF_XFI: XFI interface
20625 + */
20626 +enum dpmac_eth_if {
20627 + DPMAC_ETH_IF_MII,
20628 + DPMAC_ETH_IF_RMII,
20629 + DPMAC_ETH_IF_SMII,
20630 + DPMAC_ETH_IF_GMII,
20631 + DPMAC_ETH_IF_RGMII,
20632 + DPMAC_ETH_IF_SGMII,
20633 + DPMAC_ETH_IF_QSGMII,
20634 + DPMAC_ETH_IF_XAUI,
20635 + DPMAC_ETH_IF_XFI
20636 +};
20637 +
20638 +/**
20639 + * struct dpmac_cfg - Structure representing DPMAC configuration
20640 + * @mac_id: Represents the Hardware MAC ID; in case of multiple WRIOP,
20641 + * the MAC IDs are continuous.
20642 + * For example: 2 WRIOPs, 16 MACs in each:
20643 + * MAC IDs for the 1st WRIOP: 1-16,
20644 + * MAC IDs for the 2nd WRIOP: 17-32.
20645 + */
20646 +struct dpmac_cfg {
20647 + u16 mac_id;
20648 +};
20649 +
20650 +int dpmac_create(struct fsl_mc_io *mc_io,
20651 + u16 dprc_token,
20652 + u32 cmd_flags,
20653 + const struct dpmac_cfg *cfg,
20654 + u32 *obj_id);
20655 +
20656 +int dpmac_destroy(struct fsl_mc_io *mc_io,
20657 + u16 dprc_token,
20658 + u32 cmd_flags,
20659 + u32 object_id);
20660 +
20661 +/**
20662 + * DPMAC IRQ Index and Events
20663 + */
20664 +
20665 +/**
20666 + * IRQ index
20667 + */
20668 +#define DPMAC_IRQ_INDEX 0
20669 +/**
20670 + * IRQ event - indicates a change in link state
20671 + */
20672 +#define DPMAC_IRQ_EVENT_LINK_CFG_REQ 0x00000001
20673 +/**
20674 + * IRQ event - Indicates that the link state changed
20675 + */
20676 +#define DPMAC_IRQ_EVENT_LINK_CHANGED 0x00000002
20677 +
20678 +int dpmac_set_irq_enable(struct fsl_mc_io *mc_io,
20679 + u32 cmd_flags,
20680 + u16 token,
20681 + u8 irq_index,
20682 + u8 en);
20683 +
20684 +int dpmac_get_irq_enable(struct fsl_mc_io *mc_io,
20685 + u32 cmd_flags,
20686 + u16 token,
20687 + u8 irq_index,
20688 + u8 *en);
20689 +
20690 +int dpmac_set_irq_mask(struct fsl_mc_io *mc_io,
20691 + u32 cmd_flags,
20692 + u16 token,
20693 + u8 irq_index,
20694 + u32 mask);
20695 +
20696 +int dpmac_get_irq_mask(struct fsl_mc_io *mc_io,
20697 + u32 cmd_flags,
20698 + u16 token,
20699 + u8 irq_index,
20700 + u32 *mask);
20701 +
20702 +int dpmac_get_irq_status(struct fsl_mc_io *mc_io,
20703 + u32 cmd_flags,
20704 + u16 token,
20705 + u8 irq_index,
20706 + u32 *status);
20707 +
20708 +int dpmac_clear_irq_status(struct fsl_mc_io *mc_io,
20709 + u32 cmd_flags,
20710 + u16 token,
20711 + u8 irq_index,
20712 + u32 status);
20713 +
20714 +/**
20715 + * struct dpmac_attr - Structure representing DPMAC attributes
20716 + * @id: DPMAC object ID
20717 + * @max_rate: Maximum supported rate - in Mbps
20718 + * @eth_if: Ethernet interface
20719 + * @link_type: link type
20720 + */
20721 +struct dpmac_attr {
20722 + u16 id;
20723 + u32 max_rate;
20724 + enum dpmac_eth_if eth_if;
20725 + enum dpmac_link_type link_type;
20726 +};
20727 +
20728 +int dpmac_get_attributes(struct fsl_mc_io *mc_io,
20729 + u32 cmd_flags,
20730 + u16 token,
20731 + struct dpmac_attr *attr);
20732 +
20733 +/**
20734 + * DPMAC link configuration/state options
20735 + */
20736 +
20737 +/**
20738 + * Enable auto-negotiation
20739 + */
20740 +#define DPMAC_LINK_OPT_AUTONEG 0x0000000000000001ULL
20741 +/**
20742 + * Enable half-duplex mode
20743 + */
20744 +#define DPMAC_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
20745 +/**
20746 + * Enable pause frames
20747 + */
20748 +#define DPMAC_LINK_OPT_PAUSE 0x0000000000000004ULL
20749 +/**
20750 + * Enable a-symmetric pause frames
20751 + */
20752 +#define DPMAC_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
20753 +
20754 +/**
20755 + * struct dpmac_link_cfg - Structure representing DPMAC link configuration
20756 + * @rate: Link's rate - in Mbps
20757 + * @options: Enable/Disable DPMAC link cfg features (bitmap)
20758 + */
20759 +struct dpmac_link_cfg {
20760 + u32 rate;
20761 + u64 options;
20762 +};
20763 +
20764 +int dpmac_get_link_cfg(struct fsl_mc_io *mc_io,
20765 + u32 cmd_flags,
20766 + u16 token,
20767 + struct dpmac_link_cfg *cfg);
20768 +
20769 +/**
20770 + * struct dpmac_link_state - DPMAC link configuration request
20771 + * @rate: Rate in Mbps
20772 + * @options: Enable/Disable DPMAC link cfg features (bitmap)
20773 + * @up: Link state
20774 + */
20775 +struct dpmac_link_state {
20776 + u32 rate;
20777 + u64 options;
20778 + int up;
20779 +};
20780 +
20781 +int dpmac_set_link_state(struct fsl_mc_io *mc_io,
20782 + u32 cmd_flags,
20783 + u16 token,
20784 + struct dpmac_link_state *link_state);
20785 +
20786 +/**
20787 + * enum dpmac_counter - DPMAC counter types
20788 + * @DPMAC_CNT_ING_FRAME_64: counts 64-bytes frames, good or bad.
20789 + * @DPMAC_CNT_ING_FRAME_127: counts 65- to 127-bytes frames, good or bad.
20790 + * @DPMAC_CNT_ING_FRAME_255: counts 128- to 255-bytes frames, good or bad.
20791 + * @DPMAC_CNT_ING_FRAME_511: counts 256- to 511-bytes frames, good or bad.
20792 + * @DPMAC_CNT_ING_FRAME_1023: counts 512- to 1023-bytes frames, good or bad.
20793 + * @DPMAC_CNT_ING_FRAME_1518: counts 1024- to 1518-bytes frames, good or bad.
20794 + * @DPMAC_CNT_ING_FRAME_1519_MAX: counts 1519-bytes frames and larger
20795 + * (up to max frame length specified),
20796 + * good or bad.
20797 + * @DPMAC_CNT_ING_FRAG: counts frames which are shorter than 64 bytes received
20798 + * with a wrong CRC
20799 + * @DPMAC_CNT_ING_JABBER: counts frames longer than the maximum frame length
20800 + * specified, with a bad frame check sequence.
20801 + * @DPMAC_CNT_ING_FRAME_DISCARD: counts dropped frames due to internal errors.
20802 + * Occurs when a receive FIFO overflows.
20803 + * Includes also frames truncated as a result of
20804 + * the receive FIFO overflow.
20805 + * @DPMAC_CNT_ING_ALIGN_ERR: counts frames with an alignment error
20806 + * (optional used for wrong SFD).
20807 + * @DPMAC_CNT_EGR_UNDERSIZED: counts frames transmitted that was less than 64
20808 + * bytes long with a good CRC.
20809 + * @DPMAC_CNT_ING_OVERSIZED: counts frames longer than the maximum frame length
20810 + * specified, with a good frame check sequence.
20811 + * @DPMAC_CNT_ING_VALID_PAUSE_FRAME: counts valid pause frames (regular and PFC)
20812 + * @DPMAC_CNT_EGR_VALID_PAUSE_FRAME: counts valid pause frames transmitted
20813 + * (regular and PFC).
20814 + * @DPMAC_CNT_ING_BYTE: counts bytes received except preamble for all valid
20815 + * frames and valid pause frames.
20816 + * @DPMAC_CNT_ING_MCAST_FRAME: counts received multicast frames.
20817 + * @DPMAC_CNT_ING_BCAST_FRAME: counts received broadcast frames.
20818 + * @DPMAC_CNT_ING_ALL_FRAME: counts each good or bad frames received.
20819 + * @DPMAC_CNT_ING_UCAST_FRAME: counts received unicast frames.
20820 + * @DPMAC_CNT_ING_ERR_FRAME: counts frames received with an error
20821 + * (except for undersized/fragment frame).
20822 + * @DPMAC_CNT_EGR_BYTE: counts bytes transmitted except preamble for all valid
20823 + * frames and valid pause frames transmitted.
20824 + * @DPMAC_CNT_EGR_MCAST_FRAME: counts transmitted multicast frames.
20825 + * @DPMAC_CNT_EGR_BCAST_FRAME: counts transmitted broadcast frames.
20826 + * @DPMAC_CNT_EGR_UCAST_FRAME: counts transmitted unicast frames.
20827 + * @DPMAC_CNT_EGR_ERR_FRAME: counts frames transmitted with an error.
20828 + * @DPMAC_CNT_ING_GOOD_FRAME: counts frames received without error, including
20829 + * pause frames.
20830 + * @DPMAC_CNT_ENG_GOOD_FRAME: counts frames transmitted without error, including
20831 + * pause frames.
20832 + */
20833 +enum dpmac_counter {
20834 + DPMAC_CNT_ING_FRAME_64,
20835 + DPMAC_CNT_ING_FRAME_127,
20836 + DPMAC_CNT_ING_FRAME_255,
20837 + DPMAC_CNT_ING_FRAME_511,
20838 + DPMAC_CNT_ING_FRAME_1023,
20839 + DPMAC_CNT_ING_FRAME_1518,
20840 + DPMAC_CNT_ING_FRAME_1519_MAX,
20841 + DPMAC_CNT_ING_FRAG,
20842 + DPMAC_CNT_ING_JABBER,
20843 + DPMAC_CNT_ING_FRAME_DISCARD,
20844 + DPMAC_CNT_ING_ALIGN_ERR,
20845 + DPMAC_CNT_EGR_UNDERSIZED,
20846 + DPMAC_CNT_ING_OVERSIZED,
20847 + DPMAC_CNT_ING_VALID_PAUSE_FRAME,
20848 + DPMAC_CNT_EGR_VALID_PAUSE_FRAME,
20849 + DPMAC_CNT_ING_BYTE,
20850 + DPMAC_CNT_ING_MCAST_FRAME,
20851 + DPMAC_CNT_ING_BCAST_FRAME,
20852 + DPMAC_CNT_ING_ALL_FRAME,
20853 + DPMAC_CNT_ING_UCAST_FRAME,
20854 + DPMAC_CNT_ING_ERR_FRAME,
20855 + DPMAC_CNT_EGR_BYTE,
20856 + DPMAC_CNT_EGR_MCAST_FRAME,
20857 + DPMAC_CNT_EGR_BCAST_FRAME,
20858 + DPMAC_CNT_EGR_UCAST_FRAME,
20859 + DPMAC_CNT_EGR_ERR_FRAME,
20860 + DPMAC_CNT_ING_GOOD_FRAME,
20861 + DPMAC_CNT_ENG_GOOD_FRAME
20862 +};
20863 +
20864 +int dpmac_get_counter(struct fsl_mc_io *mc_io,
20865 + u32 cmd_flags,
20866 + u16 token,
20867 + enum dpmac_counter type,
20868 + u64 *counter);
20869 +
20870 +/**
20871 + * dpmac_set_port_mac_addr() - Set a MAC address associated with the physical
20872 + * port. This is not used for filtering, MAC is always in
20873 + * promiscuous mode, it is passed to DPNIs through DPNI API for
20874 + * application used.
20875 + * @mc_io: Pointer to opaque I/O object
20876 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20877 + * @token: Token of DPMAC object
20878 + * @addr: MAC address to set
20879 + *
20880 + * Return: The requested counter; '0' otherwise.
20881 + */
20882 +int dpmac_set_port_mac_addr(struct fsl_mc_io *mc_io,
20883 + u32 cmd_flags,
20884 + u16 token,
20885 + const u8 addr[6]);
20886 +
20887 +int dpmac_get_api_version(struct fsl_mc_io *mc_io,
20888 + u32 cmd_flags,
20889 + u16 *major_ver,
20890 + u16 *minor_ver);
20891 +
20892 +#endif /* __FSL_DPMAC_H */
20893 --- /dev/null
20894 +++ b/drivers/staging/fsl-dpaa2/mac/mac.c
20895 @@ -0,0 +1,666 @@
20896 +/* Copyright 2015 Freescale Semiconductor Inc.
20897 + *
20898 + * Redistribution and use in source and binary forms, with or without
20899 + * modification, are permitted provided that the following conditions are met:
20900 + * * Redistributions of source code must retain the above copyright
20901 + * notice, this list of conditions and the following disclaimer.
20902 + * * Redistributions in binary form must reproduce the above copyright
20903 + * notice, this list of conditions and the following disclaimer in the
20904 + * documentation and/or other materials provided with the distribution.
20905 + * * Neither the name of Freescale Semiconductor nor the
20906 + * names of its contributors may be used to endorse or promote products
20907 + * derived from this software without specific prior written permission.
20908 + *
20909 + *
20910 + * ALTERNATIVELY, this software may be distributed under the terms of the
20911 + * GNU General Public License ("GPL") as published by the Free Software
20912 + * Foundation, either version 2 of that License or (at your option) any
20913 + * later version.
20914 + *
20915 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20916 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20917 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20918 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
20919 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20920 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
20921 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
20922 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
20923 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
20924 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
20925 + */
20926 +
20927 +#include <linux/module.h>
20928 +
20929 +#include <linux/netdevice.h>
20930 +#include <linux/etherdevice.h>
20931 +#include <linux/msi.h>
20932 +#include <linux/rtnetlink.h>
20933 +#include <linux/if_vlan.h>
20934 +
20935 +#include <uapi/linux/if_bridge.h>
20936 +#include <net/netlink.h>
20937 +
20938 +#include <linux/of.h>
20939 +#include <linux/of_mdio.h>
20940 +#include <linux/of_net.h>
20941 +#include <linux/phy.h>
20942 +#include <linux/phy_fixed.h>
20943 +
20944 +#include "../../fsl-mc/include/mc.h"
20945 +#include "../../fsl-mc/include/mc-sys.h"
20946 +
20947 +#include "dpmac.h"
20948 +#include "dpmac-cmd.h"
20949 +
20950 +struct dpaa2_mac_priv {
20951 + struct net_device *netdev;
20952 + struct fsl_mc_device *mc_dev;
20953 + struct dpmac_attr attr;
20954 + struct dpmac_link_state old_state;
20955 +};
20956 +
20957 +/* TODO: fix the 10G modes, mapping can't be right:
20958 + * XGMII is paralel
20959 + * XAUI is serial, using 8b/10b encoding
20960 + * XFI is also serial but using 64b/66b encoding
20961 + * they can't all map to XGMII...
20962 + *
20963 + * This must be kept in sync with enum dpmac_eth_if.
20964 + */
20965 +static phy_interface_t dpaa2_mac_iface_mode[] = {
20966 + PHY_INTERFACE_MODE_MII, /* DPMAC_ETH_IF_MII */
20967 + PHY_INTERFACE_MODE_RMII, /* DPMAC_ETH_IF_RMII */
20968 + PHY_INTERFACE_MODE_SMII, /* DPMAC_ETH_IF_SMII */
20969 + PHY_INTERFACE_MODE_GMII, /* DPMAC_ETH_IF_GMII */
20970 + PHY_INTERFACE_MODE_RGMII, /* DPMAC_ETH_IF_RGMII */
20971 + PHY_INTERFACE_MODE_SGMII, /* DPMAC_ETH_IF_SGMII */
20972 + PHY_INTERFACE_MODE_QSGMII, /* DPMAC_ETH_IF_QSGMII */
20973 + PHY_INTERFACE_MODE_XGMII, /* DPMAC_ETH_IF_XAUI */
20974 + PHY_INTERFACE_MODE_XGMII, /* DPMAC_ETH_IF_XFI */
20975 +};
20976 +
20977 +static void dpaa2_mac_link_changed(struct net_device *netdev)
20978 +{
20979 + struct phy_device *phydev;
20980 + struct dpmac_link_state state = { 0 };
20981 + struct dpaa2_mac_priv *priv = netdev_priv(netdev);
20982 + int err;
20983 +
20984 + /* the PHY just notified us of link state change */
20985 + phydev = netdev->phydev;
20986 +
20987 + state.up = !!phydev->link;
20988 + if (phydev->link) {
20989 + state.rate = phydev->speed;
20990 +
20991 + if (!phydev->duplex)
20992 + state.options |= DPMAC_LINK_OPT_HALF_DUPLEX;
20993 + if (phydev->autoneg)
20994 + state.options |= DPMAC_LINK_OPT_AUTONEG;
20995 +
20996 + netif_carrier_on(netdev);
20997 + } else {
20998 + netif_carrier_off(netdev);
20999 + }
21000 +
21001 + if (priv->old_state.up != state.up ||
21002 + priv->old_state.rate != state.rate ||
21003 + priv->old_state.options != state.options) {
21004 + priv->old_state = state;
21005 + phy_print_status(phydev);
21006 + }
21007 +
21008 + /* We must interrogate MC at all times, because we don't know
21009 + * when and whether a potential DPNI may have read the link state.
21010 + */
21011 + err = dpmac_set_link_state(priv->mc_dev->mc_io, 0,
21012 + priv->mc_dev->mc_handle, &state);
21013 + if (unlikely(err))
21014 + dev_err(&priv->mc_dev->dev, "dpmac_set_link_state: %d\n", err);
21015 +}
21016 +
21017 +#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
21018 +static netdev_tx_t dpaa2_mac_drop_frame(struct sk_buff *skb,
21019 + struct net_device *dev)
21020 +{
21021 + /* we don't support I/O for now, drop the frame */
21022 + dev_kfree_skb_any(skb);
21023 + return NETDEV_TX_OK;
21024 +}
21025 +
21026 +static int dpaa2_mac_open(struct net_device *netdev)
21027 +{
21028 + /* start PHY state machine */
21029 + phy_start(netdev->phydev);
21030 +
21031 + return 0;
21032 +}
21033 +
21034 +static int dpaa2_mac_stop(struct net_device *netdev)
21035 +{
21036 + if (!netdev->phydev)
21037 + goto done;
21038 +
21039 + /* stop PHY state machine */
21040 + phy_stop(netdev->phydev);
21041 +
21042 + /* signal link down to firmware */
21043 + netdev->phydev->link = 0;
21044 + dpaa2_mac_link_changed(netdev);
21045 +
21046 +done:
21047 + return 0;
21048 +}
21049 +
21050 +static int dpaa2_mac_get_settings(struct net_device *netdev,
21051 + struct ethtool_cmd *cmd)
21052 +{
21053 + return phy_ethtool_gset(netdev->phydev, cmd);
21054 +}
21055 +
21056 +static int dpaa2_mac_set_settings(struct net_device *netdev,
21057 + struct ethtool_cmd *cmd)
21058 +{
21059 + return phy_ethtool_sset(netdev->phydev, cmd);
21060 +}
21061 +
21062 +static void dpaa2_mac_get_stats(struct net_device *netdev,
21063 + struct rtnl_link_stats64 *storage)
21064 +{
21065 + struct dpaa2_mac_priv *priv = netdev_priv(netdev);
21066 + u64 tmp;
21067 + int err;
21068 +
21069 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21070 + DPMAC_CNT_EGR_MCAST_FRAME,
21071 + &storage->tx_packets);
21072 + if (err)
21073 + goto error;
21074 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21075 + DPMAC_CNT_EGR_BCAST_FRAME, &tmp);
21076 + if (err)
21077 + goto error;
21078 + storage->tx_packets += tmp;
21079 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21080 + DPMAC_CNT_EGR_UCAST_FRAME, &tmp);
21081 + if (err)
21082 + goto error;
21083 + storage->tx_packets += tmp;
21084 +
21085 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21086 + DPMAC_CNT_EGR_UNDERSIZED, &storage->tx_dropped);
21087 + if (err)
21088 + goto error;
21089 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21090 + DPMAC_CNT_EGR_BYTE, &storage->tx_bytes);
21091 + if (err)
21092 + goto error;
21093 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21094 + DPMAC_CNT_EGR_ERR_FRAME, &storage->tx_errors);
21095 + if (err)
21096 + goto error;
21097 +
21098 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21099 + DPMAC_CNT_ING_ALL_FRAME, &storage->rx_packets);
21100 + if (err)
21101 + goto error;
21102 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21103 + DPMAC_CNT_ING_MCAST_FRAME, &storage->multicast);
21104 + if (err)
21105 + goto error;
21106 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21107 + DPMAC_CNT_ING_FRAME_DISCARD,
21108 + &storage->rx_dropped);
21109 + if (err)
21110 + goto error;
21111 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21112 + DPMAC_CNT_ING_ALIGN_ERR, &storage->rx_errors);
21113 + if (err)
21114 + goto error;
21115 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21116 + DPMAC_CNT_ING_OVERSIZED, &tmp);
21117 + if (err)
21118 + goto error;
21119 + storage->rx_errors += tmp;
21120 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21121 + DPMAC_CNT_ING_BYTE, &storage->rx_bytes);
21122 + if (err)
21123 + goto error;
21124 +
21125 + return;
21126 +error:
21127 + netdev_err(netdev, "dpmac_get_counter err %d\n", err);
21128 +}
21129 +
21130 +static struct {
21131 + enum dpmac_counter id;
21132 + char name[ETH_GSTRING_LEN];
21133 +} dpaa2_mac_counters[] = {
21134 + {DPMAC_CNT_ING_ALL_FRAME, "rx all frames"},
21135 + {DPMAC_CNT_ING_GOOD_FRAME, "rx frames ok"},
21136 + {DPMAC_CNT_ING_ERR_FRAME, "rx frame errors"},
21137 + {DPMAC_CNT_ING_FRAME_DISCARD, "rx frame discards"},
21138 + {DPMAC_CNT_ING_UCAST_FRAME, "rx u-cast"},
21139 + {DPMAC_CNT_ING_BCAST_FRAME, "rx b-cast"},
21140 + {DPMAC_CNT_ING_MCAST_FRAME, "rx m-cast"},
21141 + {DPMAC_CNT_ING_FRAME_64, "rx 64 bytes"},
21142 + {DPMAC_CNT_ING_FRAME_127, "rx 65-127 bytes"},
21143 + {DPMAC_CNT_ING_FRAME_255, "rx 128-255 bytes"},
21144 + {DPMAC_CNT_ING_FRAME_511, "rx 256-511 bytes"},
21145 + {DPMAC_CNT_ING_FRAME_1023, "rx 512-1023 bytes"},
21146 + {DPMAC_CNT_ING_FRAME_1518, "rx 1024-1518 bytes"},
21147 + {DPMAC_CNT_ING_FRAME_1519_MAX, "rx 1519-max bytes"},
21148 + {DPMAC_CNT_ING_FRAG, "rx frags"},
21149 + {DPMAC_CNT_ING_JABBER, "rx jabber"},
21150 + {DPMAC_CNT_ING_ALIGN_ERR, "rx align errors"},
21151 + {DPMAC_CNT_ING_OVERSIZED, "rx oversized"},
21152 + {DPMAC_CNT_ING_VALID_PAUSE_FRAME, "rx pause"},
21153 + {DPMAC_CNT_ING_BYTE, "rx bytes"},
21154 + {DPMAC_CNT_ENG_GOOD_FRAME, "tx frames ok"},
21155 + {DPMAC_CNT_EGR_UCAST_FRAME, "tx u-cast"},
21156 + {DPMAC_CNT_EGR_MCAST_FRAME, "tx m-cast"},
21157 + {DPMAC_CNT_EGR_BCAST_FRAME, "tx b-cast"},
21158 + {DPMAC_CNT_EGR_ERR_FRAME, "tx frame errors"},
21159 + {DPMAC_CNT_EGR_UNDERSIZED, "tx undersized"},
21160 + {DPMAC_CNT_EGR_VALID_PAUSE_FRAME, "tx b-pause"},
21161 + {DPMAC_CNT_EGR_BYTE, "tx bytes"},
21162 +
21163 +};
21164 +
21165 +static void dpaa2_mac_get_strings(struct net_device *netdev,
21166 + u32 stringset, u8 *data)
21167 +{
21168 + int i;
21169 +
21170 + switch (stringset) {
21171 + case ETH_SS_STATS:
21172 + for (i = 0; i < ARRAY_SIZE(dpaa2_mac_counters); i++)
21173 + memcpy(data + i * ETH_GSTRING_LEN,
21174 + dpaa2_mac_counters[i].name,
21175 + ETH_GSTRING_LEN);
21176 + break;
21177 + }
21178 +}
21179 +
21180 +static void dpaa2_mac_get_ethtool_stats(struct net_device *netdev,
21181 + struct ethtool_stats *stats,
21182 + u64 *data)
21183 +{
21184 + struct dpaa2_mac_priv *priv = netdev_priv(netdev);
21185 + int i;
21186 + int err;
21187 +
21188 + for (i = 0; i < ARRAY_SIZE(dpaa2_mac_counters); i++) {
21189 + err = dpmac_get_counter(priv->mc_dev->mc_io,
21190 + 0,
21191 + priv->mc_dev->mc_handle,
21192 + dpaa2_mac_counters[i].id, &data[i]);
21193 + if (err)
21194 + netdev_err(netdev, "dpmac_get_counter[%s] err %d\n",
21195 + dpaa2_mac_counters[i].name, err);
21196 + }
21197 +}
21198 +
21199 +static int dpaa2_mac_get_sset_count(struct net_device *dev, int sset)
21200 +{
21201 + switch (sset) {
21202 + case ETH_SS_STATS:
21203 + return ARRAY_SIZE(dpaa2_mac_counters);
21204 + default:
21205 + return -EOPNOTSUPP;
21206 + }
21207 +}
21208 +
21209 +static const struct net_device_ops dpaa2_mac_ndo_ops = {
21210 + .ndo_start_xmit = &dpaa2_mac_drop_frame,
21211 + .ndo_open = &dpaa2_mac_open,
21212 + .ndo_stop = &dpaa2_mac_stop,
21213 + .ndo_get_stats64 = &dpaa2_mac_get_stats,
21214 +};
21215 +
21216 +static const struct ethtool_ops dpaa2_mac_ethtool_ops = {
21217 + .get_settings = &dpaa2_mac_get_settings,
21218 + .set_settings = &dpaa2_mac_set_settings,
21219 + .get_strings = &dpaa2_mac_get_strings,
21220 + .get_ethtool_stats = &dpaa2_mac_get_ethtool_stats,
21221 + .get_sset_count = &dpaa2_mac_get_sset_count,
21222 +};
21223 +#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */
21224 +
21225 +static void configure_link(struct dpaa2_mac_priv *priv,
21226 + struct dpmac_link_cfg *cfg)
21227 +{
21228 + struct phy_device *phydev = priv->netdev->phydev;
21229 +
21230 + if (unlikely(!phydev))
21231 + return;
21232 +
21233 + phydev->speed = cfg->rate;
21234 + phydev->duplex = !!(cfg->options & DPMAC_LINK_OPT_HALF_DUPLEX);
21235 +
21236 + if (cfg->options & DPMAC_LINK_OPT_AUTONEG) {
21237 + phydev->autoneg = 1;
21238 + phydev->advertising |= ADVERTISED_Autoneg;
21239 + } else {
21240 + phydev->autoneg = 0;
21241 + phydev->advertising &= ~ADVERTISED_Autoneg;
21242 + }
21243 +
21244 + phy_start_aneg(phydev);
21245 +}
21246 +
21247 +static irqreturn_t dpaa2_mac_irq_handler(int irq_num, void *arg)
21248 +{
21249 + struct device *dev = (struct device *)arg;
21250 + struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
21251 + struct dpaa2_mac_priv *priv = dev_get_drvdata(dev);
21252 + struct dpmac_link_cfg link_cfg;
21253 + u32 status;
21254 + int err;
21255 +
21256 + err = dpmac_get_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle,
21257 + DPMAC_IRQ_INDEX, &status);
21258 + if (unlikely(err || !status))
21259 + return IRQ_NONE;
21260 +
21261 + /* DPNI-initiated link configuration; 'ifconfig up' also calls this */
21262 + if (status & DPMAC_IRQ_EVENT_LINK_CFG_REQ) {
21263 + err = dpmac_get_link_cfg(mc_dev->mc_io, 0, mc_dev->mc_handle,
21264 + &link_cfg);
21265 + if (unlikely(err))
21266 + goto out;
21267 +
21268 + configure_link(priv, &link_cfg);
21269 + }
21270 +
21271 +out:
21272 + dpmac_clear_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle,
21273 + DPMAC_IRQ_INDEX, status);
21274 +
21275 + return IRQ_HANDLED;
21276 +}
21277 +
21278 +static int setup_irqs(struct fsl_mc_device *mc_dev)
21279 +{
21280 + int err = 0;
21281 + struct fsl_mc_device_irq *irq;
21282 +
21283 + err = fsl_mc_allocate_irqs(mc_dev);
21284 + if (err) {
21285 + dev_err(&mc_dev->dev, "fsl_mc_allocate_irqs err %d\n", err);
21286 + return err;
21287 + }
21288 +
21289 + irq = mc_dev->irqs[0];
21290 + err = devm_request_threaded_irq(&mc_dev->dev, irq->msi_desc->irq,
21291 + NULL, &dpaa2_mac_irq_handler,
21292 + IRQF_NO_SUSPEND | IRQF_ONESHOT,
21293 + dev_name(&mc_dev->dev), &mc_dev->dev);
21294 + if (err) {
21295 + dev_err(&mc_dev->dev, "devm_request_threaded_irq err %d\n",
21296 + err);
21297 + goto free_irq;
21298 + }
21299 +
21300 + err = dpmac_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle,
21301 + DPMAC_IRQ_INDEX, DPMAC_IRQ_EVENT_LINK_CFG_REQ);
21302 + if (err) {
21303 + dev_err(&mc_dev->dev, "dpmac_set_irq_mask err %d\n", err);
21304 + goto free_irq;
21305 + }
21306 + err = dpmac_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle,
21307 + DPMAC_IRQ_INDEX, 1);
21308 + if (err) {
21309 + dev_err(&mc_dev->dev, "dpmac_set_irq_enable err %d\n", err);
21310 + goto free_irq;
21311 + }
21312 +
21313 + return 0;
21314 +
21315 +free_irq:
21316 + fsl_mc_free_irqs(mc_dev);
21317 +
21318 + return err;
21319 +}
21320 +
21321 +static void teardown_irqs(struct fsl_mc_device *mc_dev)
21322 +{
21323 + int err;
21324 +
21325 + err = dpmac_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle,
21326 + DPMAC_IRQ_INDEX, 0);
21327 + if (err)
21328 + dev_err(&mc_dev->dev, "dpmac_set_irq_enable err %d\n", err);
21329 +
21330 + fsl_mc_free_irqs(mc_dev);
21331 +}
21332 +
21333 +static struct device_node *find_dpmac_node(struct device *dev, u16 dpmac_id)
21334 +{
21335 + struct device_node *dpmacs, *dpmac = NULL;
21336 + struct device_node *mc_node = dev->of_node;
21337 + u32 id;
21338 + int err;
21339 +
21340 + dpmacs = of_find_node_by_name(mc_node, "dpmacs");
21341 + if (!dpmacs) {
21342 + dev_err(dev, "No dpmacs subnode in device-tree\n");
21343 + return NULL;
21344 + }
21345 +
21346 + while ((dpmac = of_get_next_child(dpmacs, dpmac))) {
21347 + err = of_property_read_u32(dpmac, "reg", &id);
21348 + if (err)
21349 + continue;
21350 + if (id == dpmac_id)
21351 + return dpmac;
21352 + }
21353 +
21354 + return NULL;
21355 +}
21356 +
21357 +static int dpaa2_mac_probe(struct fsl_mc_device *mc_dev)
21358 +{
21359 + struct device *dev;
21360 + struct dpaa2_mac_priv *priv = NULL;
21361 + struct device_node *phy_node, *dpmac_node;
21362 + struct net_device *netdev;
21363 + phy_interface_t if_mode;
21364 + int err = 0;
21365 +
21366 + dev = &mc_dev->dev;
21367 +
21368 + /* prepare a net_dev structure to make the phy lib API happy */
21369 + netdev = alloc_etherdev(sizeof(*priv));
21370 + if (!netdev) {
21371 + dev_err(dev, "alloc_etherdev error\n");
21372 + err = -ENOMEM;
21373 + goto err_exit;
21374 + }
21375 + priv = netdev_priv(netdev);
21376 + priv->mc_dev = mc_dev;
21377 + priv->netdev = netdev;
21378 +
21379 + SET_NETDEV_DEV(netdev, dev);
21380 +
21381 +#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
21382 + snprintf(netdev->name, IFNAMSIZ, "mac%d", mc_dev->obj_desc.id);
21383 +#endif
21384 +
21385 + dev_set_drvdata(dev, priv);
21386 +
21387 + err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io);
21388 + if (err || !mc_dev->mc_io) {
21389 + dev_err(dev, "fsl_mc_portal_allocate error: %d\n", err);
21390 + err = -ENODEV;
21391 + goto err_free_netdev;
21392 + }
21393 +
21394 + err = dpmac_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
21395 + &mc_dev->mc_handle);
21396 + if (err || !mc_dev->mc_handle) {
21397 + dev_err(dev, "dpmac_open error: %d\n", err);
21398 + err = -ENODEV;
21399 + goto err_free_mcp;
21400 + }
21401 +
21402 + err = dpmac_get_attributes(mc_dev->mc_io, 0,
21403 + mc_dev->mc_handle, &priv->attr);
21404 + if (err) {
21405 + dev_err(dev, "dpmac_get_attributes err %d\n", err);
21406 + err = -EINVAL;
21407 + goto err_close;
21408 + }
21409 +
21410 + /* Look up the DPMAC node in the device-tree. */
21411 + dpmac_node = find_dpmac_node(dev, priv->attr.id);
21412 + if (!dpmac_node) {
21413 + dev_err(dev, "No dpmac@%d subnode found.\n", priv->attr.id);
21414 + err = -ENODEV;
21415 + goto err_close;
21416 + }
21417 +
21418 + err = setup_irqs(mc_dev);
21419 + if (err) {
21420 + err = -EFAULT;
21421 + goto err_close;
21422 + }
21423 +
21424 +#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
21425 + /* OPTIONAL, register netdev just to make it visible to the user */
21426 + netdev->netdev_ops = &dpaa2_mac_ndo_ops;
21427 + netdev->ethtool_ops = &dpaa2_mac_ethtool_ops;
21428 +
21429 + /* phy starts up enabled so netdev should be up too */
21430 + netdev->flags |= IFF_UP;
21431 +
21432 + err = register_netdev(priv->netdev);
21433 + if (err < 0) {
21434 + dev_err(dev, "register_netdev error %d\n", err);
21435 + err = -ENODEV;
21436 + goto err_free_irq;
21437 + }
21438 +#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */
21439 +
21440 + /* probe the PHY as a fixed-link if the link type declared in DPC
21441 + * explicitly mandates this
21442 + */
21443 +
21444 + phy_node = of_parse_phandle(dpmac_node, "phy-handle", 0);
21445 + if (!phy_node) {
21446 + goto probe_fixed_link;
21447 + }
21448 +
21449 + if (priv->attr.eth_if < ARRAY_SIZE(dpaa2_mac_iface_mode)) {
21450 + if_mode = dpaa2_mac_iface_mode[priv->attr.eth_if];
21451 + dev_dbg(dev, "\tusing if mode %s for eth_if %d\n",
21452 + phy_modes(if_mode), priv->attr.eth_if);
21453 + } else {
21454 + dev_warn(dev, "Unexpected interface mode %d, will probe as fixed link\n",
21455 + priv->attr.eth_if);
21456 + goto probe_fixed_link;
21457 + }
21458 +
21459 + /* try to connect to the PHY */
21460 + netdev->phydev = of_phy_connect(netdev, phy_node,
21461 + &dpaa2_mac_link_changed, 0, if_mode);
21462 + if (!netdev->phydev) {
21463 + /* No need for dev_err(); the kernel's loud enough as it is. */
21464 + dev_dbg(dev, "Can't of_phy_connect() now.\n");
21465 + /* We might be waiting for the MDIO MUX to probe, so defer
21466 + * our own probing.
21467 + */
21468 + err = -EPROBE_DEFER;
21469 + goto err_defer;
21470 + }
21471 + dev_info(dev, "Connected to %s PHY.\n", phy_modes(if_mode));
21472 +
21473 +probe_fixed_link:
21474 + if (!netdev->phydev) {
21475 + struct fixed_phy_status status = {
21476 + .link = 1,
21477 + /* fixed-phys don't support 10Gbps speed for now */
21478 + .speed = 1000,
21479 + .duplex = 1,
21480 + };
21481 +
21482 + /* try to register a fixed link phy */
21483 + netdev->phydev = fixed_phy_register(PHY_POLL, &status, -1,
21484 + NULL);
21485 + if (!netdev->phydev || IS_ERR(netdev->phydev)) {
21486 + dev_err(dev, "error trying to register fixed PHY\n");
21487 + /* So we don't crash unregister_netdev() later on */
21488 + netdev->phydev = NULL;
21489 + err = -EFAULT;
21490 + goto err_no_phy;
21491 + }
21492 + dev_info(dev, "Registered fixed PHY.\n");
21493 + }
21494 +
21495 + /* start PHY state machine */
21496 +#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
21497 + dpaa2_mac_open(netdev);
21498 +#else /* CONFIG_FSL_DPAA2_MAC_NETDEVS */
21499 + phy_start(netdev->phydev);
21500 +#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */
21501 + return 0;
21502 +
21503 +err_defer:
21504 +err_no_phy:
21505 +#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
21506 + unregister_netdev(netdev);
21507 +err_free_irq:
21508 +#endif
21509 + teardown_irqs(mc_dev);
21510 +err_close:
21511 + dpmac_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
21512 +err_free_mcp:
21513 + fsl_mc_portal_free(mc_dev->mc_io);
21514 +err_free_netdev:
21515 + free_netdev(netdev);
21516 +err_exit:
21517 + return err;
21518 +}
21519 +
21520 +static int dpaa2_mac_remove(struct fsl_mc_device *mc_dev)
21521 +{
21522 + struct device *dev = &mc_dev->dev;
21523 + struct dpaa2_mac_priv *priv = dev_get_drvdata(dev);
21524 +
21525 +#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
21526 + unregister_netdev(priv->netdev);
21527 +#endif
21528 + teardown_irqs(priv->mc_dev);
21529 + dpmac_close(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle);
21530 + fsl_mc_portal_free(priv->mc_dev->mc_io);
21531 + free_netdev(priv->netdev);
21532 +
21533 + dev_set_drvdata(dev, NULL);
21534 + kfree(priv);
21535 +
21536 + return 0;
21537 +}
21538 +
21539 +static const struct fsl_mc_device_id dpaa2_mac_match_id_table[] = {
21540 + {
21541 + .vendor = FSL_MC_VENDOR_FREESCALE,
21542 + .obj_type = "dpmac",
21543 + },
21544 + { .vendor = 0x0 }
21545 +};
21546 +MODULE_DEVICE_TABLE(fslmc, dpaa2_mac_match_id_table);
21547 +
21548 +static struct fsl_mc_driver dpaa2_mac_drv = {
21549 + .driver = {
21550 + .name = KBUILD_MODNAME,
21551 + .owner = THIS_MODULE,
21552 + },
21553 + .probe = dpaa2_mac_probe,
21554 + .remove = dpaa2_mac_remove,
21555 + .match_id_table = dpaa2_mac_match_id_table,
21556 +};
21557 +
21558 +module_fsl_mc_driver(dpaa2_mac_drv);
21559 +
21560 +MODULE_LICENSE("GPL");
21561 +MODULE_DESCRIPTION("DPAA2 PHY proxy interface driver");
21562 --- /dev/null
21563 +++ b/drivers/staging/fsl-dpaa2/rtc/Makefile
21564 @@ -0,0 +1,10 @@
21565 +
21566 +obj-$(CONFIG_PTP_1588_CLOCK_DPAA2) += dpaa2-rtc.o
21567 +
21568 +dpaa2-rtc-objs := rtc.o dprtc.o
21569 +
21570 +all:
21571 + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
21572 +
21573 +clean:
21574 + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
21575 --- /dev/null
21576 +++ b/drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h
21577 @@ -0,0 +1,160 @@
21578 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
21579 + *
21580 + * Redistribution and use in source and binary forms, with or without
21581 + * modification, are permitted provided that the following conditions are met:
21582 + * * Redistributions of source code must retain the above copyright
21583 + * notice, this list of conditions and the following disclaimer.
21584 + * * Redistributions in binary form must reproduce the above copyright
21585 + * notice, this list of conditions and the following disclaimer in the
21586 + * documentation and/or other materials provided with the distribution.
21587 + * * Neither the name of the above-listed copyright holders nor the
21588 + * names of any contributors may be used to endorse or promote products
21589 + * derived from this software without specific prior written permission.
21590 + *
21591 + *
21592 + * ALTERNATIVELY, this software may be distributed under the terms of the
21593 + * GNU General Public License ("GPL") as published by the Free Software
21594 + * Foundation, either version 2 of that License or (at your option) any
21595 + * later version.
21596 + *
21597 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21598 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21599 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21600 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
21601 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21602 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21603 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21604 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21605 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
21606 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
21607 + * POSSIBILITY OF SUCH DAMAGE.
21608 + */
21609 +#ifndef _FSL_DPRTC_CMD_H
21610 +#define _FSL_DPRTC_CMD_H
21611 +
21612 +/* DPRTC Version */
21613 +#define DPRTC_VER_MAJOR 2
21614 +#define DPRTC_VER_MINOR 0
21615 +
21616 +/* Command versioning */
21617 +#define DPRTC_CMD_BASE_VERSION 1
21618 +#define DPRTC_CMD_ID_OFFSET 4
21619 +
21620 +#define DPRTC_CMD(id) (((id) << DPRTC_CMD_ID_OFFSET) | DPRTC_CMD_BASE_VERSION)
21621 +
21622 +/* Command IDs */
21623 +#define DPRTC_CMDID_CLOSE DPRTC_CMD(0x800)
21624 +#define DPRTC_CMDID_OPEN DPRTC_CMD(0x810)
21625 +#define DPRTC_CMDID_CREATE DPRTC_CMD(0x910)
21626 +#define DPRTC_CMDID_DESTROY DPRTC_CMD(0x990)
21627 +#define DPRTC_CMDID_GET_API_VERSION DPRTC_CMD(0xa10)
21628 +
21629 +#define DPRTC_CMDID_ENABLE DPRTC_CMD(0x002)
21630 +#define DPRTC_CMDID_DISABLE DPRTC_CMD(0x003)
21631 +#define DPRTC_CMDID_GET_ATTR DPRTC_CMD(0x004)
21632 +#define DPRTC_CMDID_RESET DPRTC_CMD(0x005)
21633 +#define DPRTC_CMDID_IS_ENABLED DPRTC_CMD(0x006)
21634 +
21635 +#define DPRTC_CMDID_SET_IRQ_ENABLE DPRTC_CMD(0x012)
21636 +#define DPRTC_CMDID_GET_IRQ_ENABLE DPRTC_CMD(0x013)
21637 +#define DPRTC_CMDID_SET_IRQ_MASK DPRTC_CMD(0x014)
21638 +#define DPRTC_CMDID_GET_IRQ_MASK DPRTC_CMD(0x015)
21639 +#define DPRTC_CMDID_GET_IRQ_STATUS DPRTC_CMD(0x016)
21640 +#define DPRTC_CMDID_CLEAR_IRQ_STATUS DPRTC_CMD(0x017)
21641 +
21642 +#define DPRTC_CMDID_SET_CLOCK_OFFSET DPRTC_CMD(0x1d0)
21643 +#define DPRTC_CMDID_SET_FREQ_COMPENSATION DPRTC_CMD(0x1d1)
21644 +#define DPRTC_CMDID_GET_FREQ_COMPENSATION DPRTC_CMD(0x1d2)
21645 +#define DPRTC_CMDID_GET_TIME DPRTC_CMD(0x1d3)
21646 +#define DPRTC_CMDID_SET_TIME DPRTC_CMD(0x1d4)
21647 +#define DPRTC_CMDID_SET_ALARM DPRTC_CMD(0x1d5)
21648 +#define DPRTC_CMDID_SET_PERIODIC_PULSE DPRTC_CMD(0x1d6)
21649 +#define DPRTC_CMDID_CLEAR_PERIODIC_PULSE DPRTC_CMD(0x1d7)
21650 +#define DPRTC_CMDID_SET_EXT_TRIGGER DPRTC_CMD(0x1d8)
21651 +#define DPRTC_CMDID_CLEAR_EXT_TRIGGER DPRTC_CMD(0x1d9)
21652 +#define DPRTC_CMDID_GET_EXT_TRIGGER_TIMESTAMP DPRTC_CMD(0x1dA)
21653 +
21654 +/* Macros for accessing command fields smaller than 1byte */
21655 +#define DPRTC_MASK(field) \
21656 + GENMASK(DPRTC_##field##_SHIFT + DPRTC_##field##_SIZE - 1, \
21657 + DPRTC_##field##_SHIFT)
21658 +#define dprtc_get_field(var, field) \
21659 + (((var) & DPRTC_MASK(field)) >> DPRTC_##field##_SHIFT)
21660 +
21661 +#pragma pack(push, 1)
21662 +struct dprtc_cmd_open {
21663 + uint32_t dprtc_id;
21664 +};
21665 +
21666 +struct dprtc_cmd_destroy {
21667 + uint32_t object_id;
21668 +};
21669 +
21670 +#define DPRTC_ENABLE_SHIFT 0
21671 +#define DPRTC_ENABLE_SIZE 1
21672 +
21673 +struct dprtc_rsp_is_enabled {
21674 + uint8_t en;
21675 +};
21676 +
21677 +struct dprtc_cmd_get_irq {
21678 + uint32_t pad;
21679 + uint8_t irq_index;
21680 +};
21681 +
21682 +struct dprtc_cmd_set_irq_enable {
21683 + uint8_t en;
21684 + uint8_t pad[3];
21685 + uint8_t irq_index;
21686 +};
21687 +
21688 +struct dprtc_rsp_get_irq_enable {
21689 + uint8_t en;
21690 +};
21691 +
21692 +struct dprtc_cmd_set_irq_mask {
21693 + uint32_t mask;
21694 + uint8_t irq_index;
21695 +};
21696 +
21697 +struct dprtc_rsp_get_irq_mask {
21698 + uint32_t mask;
21699 +};
21700 +
21701 +struct dprtc_cmd_get_irq_status {
21702 + uint32_t status;
21703 + uint8_t irq_index;
21704 +};
21705 +
21706 +struct dprtc_rsp_get_irq_status {
21707 + uint32_t status;
21708 +};
21709 +
21710 +struct dprtc_cmd_clear_irq_status {
21711 + uint32_t status;
21712 + uint8_t irq_index;
21713 +};
21714 +
21715 +struct dprtc_rsp_get_attributes {
21716 + uint32_t pad;
21717 + uint32_t id;
21718 +};
21719 +
21720 +struct dprtc_cmd_set_clock_offset {
21721 + uint64_t offset;
21722 +};
21723 +
21724 +struct dprtc_get_freq_compensation {
21725 + uint32_t freq_compensation;
21726 +};
21727 +
21728 +struct dprtc_time {
21729 + uint64_t time;
21730 +};
21731 +
21732 +struct dprtc_rsp_get_api_version {
21733 + uint16_t major;
21734 + uint16_t minor;
21735 +};
21736 +#pragma pack(pop)
21737 +#endif /* _FSL_DPRTC_CMD_H */
21738 --- /dev/null
21739 +++ b/drivers/staging/fsl-dpaa2/rtc/dprtc.c
21740 @@ -0,0 +1,746 @@
21741 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
21742 + *
21743 + * Redistribution and use in source and binary forms, with or without
21744 + * modification, are permitted provided that the following conditions are met:
21745 + * * Redistributions of source code must retain the above copyright
21746 + * notice, this list of conditions and the following disclaimer.
21747 + * * Redistributions in binary form must reproduce the above copyright
21748 + * notice, this list of conditions and the following disclaimer in the
21749 + * documentation and/or other materials provided with the distribution.
21750 + * * Neither the name of the above-listed copyright holders nor the
21751 + * names of any contributors may be used to endorse or promote products
21752 + * derived from this software without specific prior written permission.
21753 + *
21754 + *
21755 + * ALTERNATIVELY, this software may be distributed under the terms of the
21756 + * GNU General Public License ("GPL") as published by the Free Software
21757 + * Foundation, either version 2 of that License or (at your option) any
21758 + * later version.
21759 + *
21760 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21761 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21762 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21763 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
21764 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21765 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21766 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21767 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21768 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
21769 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
21770 + * POSSIBILITY OF SUCH DAMAGE.
21771 + */
21772 +#include "../../fsl-mc/include/mc-sys.h"
21773 +#include "../../fsl-mc/include/mc-cmd.h"
21774 +#include "dprtc.h"
21775 +#include "dprtc-cmd.h"
21776 +
21777 +/**
21778 + * dprtc_open() - Open a control session for the specified object.
21779 + * @mc_io: Pointer to MC portal's I/O object
21780 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
21781 + * @dprtc_id: DPRTC unique ID
21782 + * @token: Returned token; use in subsequent API calls
21783 + *
21784 + * This function can be used to open a control session for an
21785 + * already created object; an object may have been declared in
21786 + * the DPL or by calling the dprtc_create function.
21787 + * This function returns a unique authentication token,
21788 + * associated with the specific object ID and the specific MC
21789 + * portal; this token must be used in all subsequent commands for
21790 + * this specific object
21791 + *
21792 + * Return: '0' on Success; Error code otherwise.
21793 + */
21794 +int dprtc_open(struct fsl_mc_io *mc_io,
21795 + uint32_t cmd_flags,
21796 + int dprtc_id,
21797 + uint16_t *token)
21798 +{
21799 + struct dprtc_cmd_open *cmd_params;
21800 + struct mc_command cmd = { 0 };
21801 + int err;
21802 +
21803 + /* prepare command */
21804 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_OPEN,
21805 + cmd_flags,
21806 + 0);
21807 + cmd_params = (struct dprtc_cmd_open *)cmd.params;
21808 + cmd_params->dprtc_id = cpu_to_le32(dprtc_id);
21809 +
21810 + /* send command to mc*/
21811 + err = mc_send_command(mc_io, &cmd);
21812 + if (err)
21813 + return err;
21814 +
21815 + /* retrieve response parameters */
21816 + *token = mc_cmd_hdr_read_token(&cmd);
21817 +
21818 + return err;
21819 +}
21820 +
21821 +/**
21822 + * dprtc_close() - Close the control session of the object
21823 + * @mc_io: Pointer to MC portal's I/O object
21824 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
21825 + * @token: Token of DPRTC object
21826 + *
21827 + * After this function is called, no further operations are
21828 + * allowed on the object without opening a new control session.
21829 + *
21830 + * Return: '0' on Success; Error code otherwise.
21831 + */
21832 +int dprtc_close(struct fsl_mc_io *mc_io,
21833 + uint32_t cmd_flags,
21834 + uint16_t token)
21835 +{
21836 + struct mc_command cmd = { 0 };
21837 +
21838 + /* prepare command */
21839 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLOSE, cmd_flags,
21840 + token);
21841 +
21842 + /* send command to mc*/
21843 + return mc_send_command(mc_io, &cmd);
21844 +}
21845 +
21846 +/**
21847 + * dprtc_create() - Create the DPRTC object.
21848 + * @mc_io: Pointer to MC portal's I/O object
21849 + * @dprc_token: Parent container token; '0' for default container
21850 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
21851 + * @cfg: Configuration structure
21852 + * @obj_id: Returned object id
21853 + *
21854 + * Create the DPRTC object, allocate required resources and
21855 + * perform required initialization.
21856 + *
21857 + * The function accepts an authentication token of a parent
21858 + * container that this object should be assigned to. The token
21859 + * can be '0' so the object will be assigned to the default container.
21860 + * The newly created object can be opened with the returned
21861 + * object id and using the container's associated tokens and MC portals.
21862 + *
21863 + * Return: '0' on Success; Error code otherwise.
21864 + */
21865 +int dprtc_create(struct fsl_mc_io *mc_io,
21866 + uint16_t dprc_token,
21867 + uint32_t cmd_flags,
21868 + const struct dprtc_cfg *cfg,
21869 + uint32_t *obj_id)
21870 +{
21871 + struct mc_command cmd = { 0 };
21872 + int err;
21873 +
21874 + (void)(cfg); /* unused */
21875 +
21876 + /* prepare command */
21877 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CREATE,
21878 + cmd_flags,
21879 + dprc_token);
21880 +
21881 + /* send command to mc*/
21882 + err = mc_send_command(mc_io, &cmd);
21883 + if (err)
21884 + return err;
21885 +
21886 + /* retrieve response parameters */
21887 + *obj_id = mc_cmd_read_object_id(&cmd);
21888 +
21889 + return 0;
21890 +}
21891 +
21892 +/**
21893 + * dprtc_destroy() - Destroy the DPRTC object and release all its resources.
21894 + * @mc_io: Pointer to MC portal's I/O object
21895 + * @dprc_token: Parent container token; '0' for default container
21896 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
21897 + * @object_id: The object id; it must be a valid id within the container that
21898 + * created this object;
21899 + *
21900 + * The function accepts the authentication token of the parent container that
21901 + * created the object (not the one that currently owns the object). The object
21902 + * is searched within parent using the provided 'object_id'.
21903 + * All tokens to the object must be closed before calling destroy.
21904 + *
21905 + * Return: '0' on Success; error code otherwise.
21906 + */
21907 +int dprtc_destroy(struct fsl_mc_io *mc_io,
21908 + uint16_t dprc_token,
21909 + uint32_t cmd_flags,
21910 + uint32_t object_id)
21911 +{
21912 + struct dprtc_cmd_destroy *cmd_params;
21913 + struct mc_command cmd = { 0 };
21914 +
21915 + /* prepare command */
21916 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DESTROY,
21917 + cmd_flags,
21918 + dprc_token);
21919 + cmd_params = (struct dprtc_cmd_destroy *)cmd.params;
21920 + cmd_params->object_id = cpu_to_le32(object_id);
21921 +
21922 + /* send command to mc*/
21923 + return mc_send_command(mc_io, &cmd);
21924 +}
21925 +
21926 +int dprtc_enable(struct fsl_mc_io *mc_io,
21927 + uint32_t cmd_flags,
21928 + uint16_t token)
21929 +{
21930 + struct mc_command cmd = { 0 };
21931 +
21932 + /* prepare command */
21933 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_ENABLE, cmd_flags,
21934 + token);
21935 +
21936 + /* send command to mc*/
21937 + return mc_send_command(mc_io, &cmd);
21938 +}
21939 +
21940 +int dprtc_disable(struct fsl_mc_io *mc_io,
21941 + uint32_t cmd_flags,
21942 + uint16_t token)
21943 +{
21944 + struct mc_command cmd = { 0 };
21945 +
21946 + /* prepare command */
21947 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DISABLE,
21948 + cmd_flags,
21949 + token);
21950 +
21951 + /* send command to mc*/
21952 + return mc_send_command(mc_io, &cmd);
21953 +}
21954 +
21955 +int dprtc_is_enabled(struct fsl_mc_io *mc_io,
21956 + uint32_t cmd_flags,
21957 + uint16_t token,
21958 + int *en)
21959 +{
21960 + struct dprtc_rsp_is_enabled *rsp_params;
21961 + struct mc_command cmd = { 0 };
21962 + int err;
21963 +
21964 + /* prepare command */
21965 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_IS_ENABLED, cmd_flags,
21966 + token);
21967 +
21968 + /* send command to mc*/
21969 + err = mc_send_command(mc_io, &cmd);
21970 + if (err)
21971 + return err;
21972 +
21973 + /* retrieve response parameters */
21974 + rsp_params = (struct dprtc_rsp_is_enabled *)cmd.params;
21975 + *en = dprtc_get_field(rsp_params->en, ENABLE);
21976 +
21977 + return 0;
21978 +}
21979 +
21980 +int dprtc_reset(struct fsl_mc_io *mc_io,
21981 + uint32_t cmd_flags,
21982 + uint16_t token)
21983 +{
21984 + struct mc_command cmd = { 0 };
21985 +
21986 + /* prepare command */
21987 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_RESET,
21988 + cmd_flags,
21989 + token);
21990 +
21991 + /* send command to mc*/
21992 + return mc_send_command(mc_io, &cmd);
21993 +}
21994 +
21995 +/**
21996 + * dprtc_set_irq_enable() - Set overall interrupt state.
21997 + * @mc_io: Pointer to MC portal's I/O object
21998 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
21999 + * @token: Token of DPRTC object
22000 + * @irq_index: The interrupt index to configure
22001 + * @en: Interrupt state - enable = 1, disable = 0
22002 + *
22003 + * Allows GPP software to control when interrupts are generated.
22004 + * Each interrupt can have up to 32 causes. The enable/disable control's the
22005 + * overall interrupt state. if the interrupt is disabled no causes will cause
22006 + * an interrupt.
22007 + *
22008 + * Return: '0' on Success; Error code otherwise.
22009 + */
22010 +int dprtc_set_irq_enable(struct fsl_mc_io *mc_io,
22011 + uint32_t cmd_flags,
22012 + uint16_t token,
22013 + uint8_t irq_index,
22014 + uint8_t en)
22015 +{
22016 + struct dprtc_cmd_set_irq_enable *cmd_params;
22017 + struct mc_command cmd = { 0 };
22018 +
22019 + /* prepare command */
22020 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_ENABLE,
22021 + cmd_flags,
22022 + token);
22023 + cmd_params = (struct dprtc_cmd_set_irq_enable *)cmd.params;
22024 + cmd_params->irq_index = irq_index;
22025 + cmd_params->en = en;
22026 +
22027 + /* send command to mc*/
22028 + return mc_send_command(mc_io, &cmd);
22029 +}
22030 +
22031 +/**
22032 + * dprtc_get_irq_enable() - Get overall interrupt state
22033 + * @mc_io: Pointer to MC portal's I/O object
22034 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22035 + * @token: Token of DPRTC object
22036 + * @irq_index: The interrupt index to configure
22037 + * @en: Returned interrupt state - enable = 1, disable = 0
22038 + *
22039 + * Return: '0' on Success; Error code otherwise.
22040 + */
22041 +int dprtc_get_irq_enable(struct fsl_mc_io *mc_io,
22042 + uint32_t cmd_flags,
22043 + uint16_t token,
22044 + uint8_t irq_index,
22045 + uint8_t *en)
22046 +{
22047 + struct dprtc_rsp_get_irq_enable *rsp_params;
22048 + struct dprtc_cmd_get_irq *cmd_params;
22049 + struct mc_command cmd = { 0 };
22050 + int err;
22051 +
22052 + /* prepare command */
22053 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_ENABLE,
22054 + cmd_flags,
22055 + token);
22056 + cmd_params = (struct dprtc_cmd_get_irq *)cmd.params;
22057 + cmd_params->irq_index = irq_index;
22058 +
22059 + /* send command to mc*/
22060 + err = mc_send_command(mc_io, &cmd);
22061 + if (err)
22062 + return err;
22063 +
22064 + /* retrieve response parameters */
22065 + rsp_params = (struct dprtc_rsp_get_irq_enable *)cmd.params;
22066 + *en = rsp_params->en;
22067 +
22068 + return 0;
22069 +}
22070 +
22071 +/**
22072 + * dprtc_set_irq_mask() - Set interrupt mask.
22073 + * @mc_io: Pointer to MC portal's I/O object
22074 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22075 + * @token: Token of DPRTC object
22076 + * @irq_index: The interrupt index to configure
22077 + * @mask: Event mask to trigger interrupt;
22078 + * each bit:
22079 + * 0 = ignore event
22080 + * 1 = consider event for asserting IRQ
22081 + *
22082 + * Every interrupt can have up to 32 causes and the interrupt model supports
22083 + * masking/unmasking each cause independently
22084 + *
22085 + * Return: '0' on Success; Error code otherwise.
22086 + */
22087 +int dprtc_set_irq_mask(struct fsl_mc_io *mc_io,
22088 + uint32_t cmd_flags,
22089 + uint16_t token,
22090 + uint8_t irq_index,
22091 + uint32_t mask)
22092 +{
22093 + struct dprtc_cmd_set_irq_mask *cmd_params;
22094 + struct mc_command cmd = { 0 };
22095 +
22096 + /* prepare command */
22097 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_MASK,
22098 + cmd_flags,
22099 + token);
22100 + cmd_params = (struct dprtc_cmd_set_irq_mask *)cmd.params;
22101 + cmd_params->mask = cpu_to_le32(mask);
22102 + cmd_params->irq_index = irq_index;
22103 +
22104 + /* send command to mc*/
22105 + return mc_send_command(mc_io, &cmd);
22106 +}
22107 +
22108 +/**
22109 + * dprtc_get_irq_mask() - Get interrupt mask.
22110 + * @mc_io: Pointer to MC portal's I/O object
22111 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22112 + * @token: Token of DPRTC object
22113 + * @irq_index: The interrupt index to configure
22114 + * @mask: Returned event mask to trigger interrupt
22115 + *
22116 + * Every interrupt can have up to 32 causes and the interrupt model supports
22117 + * masking/unmasking each cause independently
22118 + *
22119 + * Return: '0' on Success; Error code otherwise.
22120 + */
22121 +int dprtc_get_irq_mask(struct fsl_mc_io *mc_io,
22122 + uint32_t cmd_flags,
22123 + uint16_t token,
22124 + uint8_t irq_index,
22125 + uint32_t *mask)
22126 +{
22127 + struct dprtc_rsp_get_irq_mask *rsp_params;
22128 + struct dprtc_cmd_get_irq *cmd_params;
22129 + struct mc_command cmd = { 0 };
22130 + int err;
22131 +
22132 + /* prepare command */
22133 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_MASK,
22134 + cmd_flags,
22135 + token);
22136 + cmd_params = (struct dprtc_cmd_get_irq *)cmd.params;
22137 + cmd_params->irq_index = irq_index;
22138 +
22139 + /* send command to mc*/
22140 + err = mc_send_command(mc_io, &cmd);
22141 + if (err)
22142 + return err;
22143 +
22144 + /* retrieve response parameters */
22145 + rsp_params = (struct dprtc_rsp_get_irq_mask *)cmd.params;
22146 + *mask = le32_to_cpu(rsp_params->mask);
22147 +
22148 + return 0;
22149 +}
22150 +
22151 +/**
22152 + * dprtc_get_irq_status() - Get the current status of any pending interrupts.
22153 + *
22154 + * @mc_io: Pointer to MC portal's I/O object
22155 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22156 + * @token: Token of DPRTC object
22157 + * @irq_index: The interrupt index to configure
22158 + * @status: Returned interrupts status - one bit per cause:
22159 + * 0 = no interrupt pending
22160 + * 1 = interrupt pending
22161 + *
22162 + * Return: '0' on Success; Error code otherwise.
22163 + */
22164 +int dprtc_get_irq_status(struct fsl_mc_io *mc_io,
22165 + uint32_t cmd_flags,
22166 + uint16_t token,
22167 + uint8_t irq_index,
22168 + uint32_t *status)
22169 +{
22170 + struct dprtc_cmd_get_irq_status *cmd_params;
22171 + struct dprtc_rsp_get_irq_status *rsp_params;
22172 + struct mc_command cmd = { 0 };
22173 + int err;
22174 +
22175 + /* prepare command */
22176 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_STATUS,
22177 + cmd_flags,
22178 + token);
22179 + cmd_params = (struct dprtc_cmd_get_irq_status *)cmd.params;
22180 + cmd_params->status = cpu_to_le32(*status);
22181 + cmd_params->irq_index = irq_index;
22182 +
22183 + /* send command to mc*/
22184 + err = mc_send_command(mc_io, &cmd);
22185 + if (err)
22186 + return err;
22187 +
22188 + /* retrieve response parameters */
22189 + rsp_params = (struct dprtc_rsp_get_irq_status *)cmd.params;
22190 + *status = rsp_params->status;
22191 +
22192 + return 0;
22193 +}
22194 +
22195 +/**
22196 + * dprtc_clear_irq_status() - Clear a pending interrupt's status
22197 + *
22198 + * @mc_io: Pointer to MC portal's I/O object
22199 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22200 + * @token: Token of DPRTC object
22201 + * @irq_index: The interrupt index to configure
22202 + * @status: Bits to clear (W1C) - one bit per cause:
22203 + * 0 = don't change
22204 + * 1 = clear status bit
22205 + *
22206 + * Return: '0' on Success; Error code otherwise.
22207 + */
22208 +int dprtc_clear_irq_status(struct fsl_mc_io *mc_io,
22209 + uint32_t cmd_flags,
22210 + uint16_t token,
22211 + uint8_t irq_index,
22212 + uint32_t status)
22213 +{
22214 + struct dprtc_cmd_clear_irq_status *cmd_params;
22215 + struct mc_command cmd = { 0 };
22216 +
22217 + /* prepare command */
22218 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLEAR_IRQ_STATUS,
22219 + cmd_flags,
22220 + token);
22221 + cmd_params = (struct dprtc_cmd_clear_irq_status *)cmd.params;
22222 + cmd_params->irq_index = irq_index;
22223 + cmd_params->status = cpu_to_le32(status);
22224 +
22225 + /* send command to mc*/
22226 + return mc_send_command(mc_io, &cmd);
22227 +}
22228 +
22229 +/**
22230 + * dprtc_get_attributes - Retrieve DPRTC attributes.
22231 + *
22232 + * @mc_io: Pointer to MC portal's I/O object
22233 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22234 + * @token: Token of DPRTC object
22235 + * @attr: Returned object's attributes
22236 + *
22237 + * Return: '0' on Success; Error code otherwise.
22238 + */
22239 +int dprtc_get_attributes(struct fsl_mc_io *mc_io,
22240 + uint32_t cmd_flags,
22241 + uint16_t token,
22242 + struct dprtc_attr *attr)
22243 +{
22244 + struct dprtc_rsp_get_attributes *rsp_params;
22245 + struct mc_command cmd = { 0 };
22246 + int err;
22247 +
22248 + /* prepare command */
22249 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_ATTR,
22250 + cmd_flags,
22251 + token);
22252 +
22253 + /* send command to mc*/
22254 + err = mc_send_command(mc_io, &cmd);
22255 + if (err)
22256 + return err;
22257 +
22258 + /* retrieve response parameters */
22259 + rsp_params = (struct dprtc_rsp_get_attributes *)cmd.params;
22260 + attr->id = le32_to_cpu(rsp_params->id);
22261 +
22262 + return 0;
22263 +}
22264 +
22265 +/**
22266 + * dprtc_set_clock_offset() - Sets the clock's offset
22267 + * (usually relative to another clock).
22268 + *
22269 + * @mc_io: Pointer to MC portal's I/O object
22270 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22271 + * @token: Token of DPRTC object
22272 + * @offset: New clock offset (in nanoseconds).
22273 + *
22274 + * Return: '0' on Success; Error code otherwise.
22275 + */
22276 +int dprtc_set_clock_offset(struct fsl_mc_io *mc_io,
22277 + uint32_t cmd_flags,
22278 + uint16_t token,
22279 + int64_t offset)
22280 +{
22281 + struct dprtc_cmd_set_clock_offset *cmd_params;
22282 + struct mc_command cmd = { 0 };
22283 +
22284 + /* prepare command */
22285 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_CLOCK_OFFSET,
22286 + cmd_flags,
22287 + token);
22288 + cmd_params = (struct dprtc_cmd_set_clock_offset *)cmd.params;
22289 + cmd_params->offset = cpu_to_le64(offset);
22290 +
22291 + /* send command to mc*/
22292 + return mc_send_command(mc_io, &cmd);
22293 +}
22294 +
22295 +/**
22296 + * dprtc_set_freq_compensation() - Sets a new frequency compensation value.
22297 + *
22298 + * @mc_io: Pointer to MC portal's I/O object
22299 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22300 + * @token: Token of DPRTC object
22301 + * @freq_compensation: The new frequency compensation value to set.
22302 + *
22303 + * Return: '0' on Success; Error code otherwise.
22304 + */
22305 +int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io,
22306 + uint32_t cmd_flags,
22307 + uint16_t token,
22308 + uint32_t freq_compensation)
22309 +{
22310 + struct dprtc_get_freq_compensation *cmd_params;
22311 + struct mc_command cmd = { 0 };
22312 +
22313 + /* prepare command */
22314 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_FREQ_COMPENSATION,
22315 + cmd_flags,
22316 + token);
22317 + cmd_params = (struct dprtc_get_freq_compensation *)cmd.params;
22318 + cmd_params->freq_compensation = cpu_to_le32(freq_compensation);
22319 +
22320 + /* send command to mc*/
22321 + return mc_send_command(mc_io, &cmd);
22322 +}
22323 +
22324 +/**
22325 + * dprtc_get_freq_compensation() - Retrieves the frequency compensation value
22326 + *
22327 + * @mc_io: Pointer to MC portal's I/O object
22328 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22329 + * @token: Token of DPRTC object
22330 + * @freq_compensation: Frequency compensation value
22331 + *
22332 + * Return: '0' on Success; Error code otherwise.
22333 + */
22334 +int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io,
22335 + uint32_t cmd_flags,
22336 + uint16_t token,
22337 + uint32_t *freq_compensation)
22338 +{
22339 + struct dprtc_get_freq_compensation *rsp_params;
22340 + struct mc_command cmd = { 0 };
22341 + int err;
22342 +
22343 + /* prepare command */
22344 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_FREQ_COMPENSATION,
22345 + cmd_flags,
22346 + token);
22347 +
22348 + /* send command to mc*/
22349 + err = mc_send_command(mc_io, &cmd);
22350 + if (err)
22351 + return err;
22352 +
22353 + /* retrieve response parameters */
22354 + rsp_params = (struct dprtc_get_freq_compensation *)cmd.params;
22355 + *freq_compensation = le32_to_cpu(rsp_params->freq_compensation);
22356 +
22357 + return 0;
22358 +}
22359 +
22360 +/**
22361 + * dprtc_get_time() - Returns the current RTC time.
22362 + *
22363 + * @mc_io: Pointer to MC portal's I/O object
22364 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22365 + * @token: Token of DPRTC object
22366 + * @time: Current RTC time.
22367 + *
22368 + * Return: '0' on Success; Error code otherwise.
22369 + */
22370 +int dprtc_get_time(struct fsl_mc_io *mc_io,
22371 + uint32_t cmd_flags,
22372 + uint16_t token,
22373 + uint64_t *time)
22374 +{
22375 + struct dprtc_time *rsp_params;
22376 + struct mc_command cmd = { 0 };
22377 + int err;
22378 +
22379 + /* prepare command */
22380 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_TIME,
22381 + cmd_flags,
22382 + token);
22383 +
22384 + /* send command to mc*/
22385 + err = mc_send_command(mc_io, &cmd);
22386 + if (err)
22387 + return err;
22388 +
22389 + /* retrieve response parameters */
22390 + rsp_params = (struct dprtc_time *)cmd.params;
22391 + *time = le64_to_cpu(rsp_params->time);
22392 +
22393 + return 0;
22394 +}
22395 +
22396 +/**
22397 + * dprtc_set_time() - Updates current RTC time.
22398 + *
22399 + * @mc_io: Pointer to MC portal's I/O object
22400 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22401 + * @token: Token of DPRTC object
22402 + * @time: New RTC time.
22403 + *
22404 + * Return: '0' on Success; Error code otherwise.
22405 + */
22406 +int dprtc_set_time(struct fsl_mc_io *mc_io,
22407 + uint32_t cmd_flags,
22408 + uint16_t token,
22409 + uint64_t time)
22410 +{
22411 + struct dprtc_time *cmd_params;
22412 + struct mc_command cmd = { 0 };
22413 +
22414 + /* prepare command */
22415 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_TIME,
22416 + cmd_flags,
22417 + token);
22418 + cmd_params = (struct dprtc_time *)cmd.params;
22419 + cmd_params->time = cpu_to_le64(time);
22420 +
22421 + /* send command to mc*/
22422 + return mc_send_command(mc_io, &cmd);
22423 +}
22424 +
22425 +/**
22426 + * dprtc_set_alarm() - Defines and sets alarm.
22427 + *
22428 + * @mc_io: Pointer to MC portal's I/O object
22429 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22430 + * @token: Token of DPRTC object
22431 + * @time: In nanoseconds, the time when the alarm
22432 + * should go off - must be a multiple of
22433 + * 1 microsecond
22434 + *
22435 + * Return: '0' on Success; Error code otherwise.
22436 + */
22437 +int dprtc_set_alarm(struct fsl_mc_io *mc_io,
22438 + uint32_t cmd_flags,
22439 + uint16_t token, uint64_t time)
22440 +{
22441 + struct dprtc_time *cmd_params;
22442 + struct mc_command cmd = { 0 };
22443 +
22444 + /* prepare command */
22445 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_ALARM,
22446 + cmd_flags,
22447 + token);
22448 + cmd_params = (struct dprtc_time *)cmd.params;
22449 + cmd_params->time = cpu_to_le64(time);
22450 +
22451 + /* send command to mc*/
22452 + return mc_send_command(mc_io, &cmd);
22453 +}
22454 +
22455 +/**
22456 + * dprtc_get_api_version() - Get Data Path Real Time Counter API version
22457 + * @mc_io: Pointer to MC portal's I/O object
22458 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22459 + * @major_ver: Major version of data path real time counter API
22460 + * @minor_ver: Minor version of data path real time counter API
22461 + *
22462 + * Return: '0' on Success; Error code otherwise.
22463 + */
22464 +int dprtc_get_api_version(struct fsl_mc_io *mc_io,
22465 + uint32_t cmd_flags,
22466 + uint16_t *major_ver,
22467 + uint16_t *minor_ver)
22468 +{
22469 + struct dprtc_rsp_get_api_version *rsp_params;
22470 + struct mc_command cmd = { 0 };
22471 + int err;
22472 +
22473 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_API_VERSION,
22474 + cmd_flags,
22475 + 0);
22476 +
22477 + err = mc_send_command(mc_io, &cmd);
22478 + if (err)
22479 + return err;
22480 +
22481 + rsp_params = (struct dprtc_rsp_get_api_version *)cmd.params;
22482 + *major_ver = le16_to_cpu(rsp_params->major);
22483 + *minor_ver = le16_to_cpu(rsp_params->minor);
22484 +
22485 + return 0;
22486 +}
22487 --- /dev/null
22488 +++ b/drivers/staging/fsl-dpaa2/rtc/dprtc.h
22489 @@ -0,0 +1,172 @@
22490 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
22491 + *
22492 + * Redistribution and use in source and binary forms, with or without
22493 + * modification, are permitted provided that the following conditions are met:
22494 + * * Redistributions of source code must retain the above copyright
22495 + * notice, this list of conditions and the following disclaimer.
22496 + * * Redistributions in binary form must reproduce the above copyright
22497 + * notice, this list of conditions and the following disclaimer in the
22498 + * documentation and/or other materials provided with the distribution.
22499 + * * Neither the name of the above-listed copyright holders nor the
22500 + * names of any contributors may be used to endorse or promote products
22501 + * derived from this software without specific prior written permission.
22502 + *
22503 + *
22504 + * ALTERNATIVELY, this software may be distributed under the terms of the
22505 + * GNU General Public License ("GPL") as published by the Free Software
22506 + * Foundation, either version 2 of that License or (at your option) any
22507 + * later version.
22508 + *
22509 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22510 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22511 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22512 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
22513 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22514 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22515 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22516 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22517 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22518 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
22519 + * POSSIBILITY OF SUCH DAMAGE.
22520 + */
22521 +#ifndef __FSL_DPRTC_H
22522 +#define __FSL_DPRTC_H
22523 +
22524 +/* Data Path Real Time Counter API
22525 + * Contains initialization APIs and runtime control APIs for RTC
22526 + */
22527 +
22528 +struct fsl_mc_io;
22529 +
22530 +/**
22531 + * Number of irq's
22532 + */
22533 +#define DPRTC_MAX_IRQ_NUM 1
22534 +#define DPRTC_IRQ_INDEX 0
22535 +
22536 +/**
22537 + * Interrupt event masks:
22538 + */
22539 +
22540 +/**
22541 + * Interrupt event mask indicating alarm event had occurred
22542 + */
22543 +#define DPRTC_EVENT_ALARM 0x40000000
22544 +/**
22545 + * Interrupt event mask indicating periodic pulse event had occurred
22546 + */
22547 +#define DPRTC_EVENT_PPS 0x08000000
22548 +
22549 +int dprtc_open(struct fsl_mc_io *mc_io,
22550 + uint32_t cmd_flags,
22551 + int dprtc_id,
22552 + uint16_t *token);
22553 +
22554 +int dprtc_close(struct fsl_mc_io *mc_io,
22555 + uint32_t cmd_flags,
22556 + uint16_t token);
22557 +
22558 +/**
22559 + * struct dprtc_cfg - Structure representing DPRTC configuration
22560 + * @options: place holder
22561 + */
22562 +struct dprtc_cfg {
22563 + uint32_t options;
22564 +};
22565 +
22566 +int dprtc_create(struct fsl_mc_io *mc_io,
22567 + uint16_t dprc_token,
22568 + uint32_t cmd_flags,
22569 + const struct dprtc_cfg *cfg,
22570 + uint32_t *obj_id);
22571 +
22572 +int dprtc_destroy(struct fsl_mc_io *mc_io,
22573 + uint16_t dprc_token,
22574 + uint32_t cmd_flags,
22575 + uint32_t object_id);
22576 +
22577 +int dprtc_set_clock_offset(struct fsl_mc_io *mc_io,
22578 + uint32_t cmd_flags,
22579 + uint16_t token,
22580 + int64_t offset);
22581 +
22582 +int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io,
22583 + uint32_t cmd_flags,
22584 + uint16_t token,
22585 + uint32_t freq_compensation);
22586 +
22587 +int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io,
22588 + uint32_t cmd_flags,
22589 + uint16_t token,
22590 + uint32_t *freq_compensation);
22591 +
22592 +int dprtc_get_time(struct fsl_mc_io *mc_io,
22593 + uint32_t cmd_flags,
22594 + uint16_t token,
22595 + uint64_t *time);
22596 +
22597 +int dprtc_set_time(struct fsl_mc_io *mc_io,
22598 + uint32_t cmd_flags,
22599 + uint16_t token,
22600 + uint64_t time);
22601 +
22602 +int dprtc_set_alarm(struct fsl_mc_io *mc_io,
22603 + uint32_t cmd_flags,
22604 + uint16_t token,
22605 + uint64_t time);
22606 +
22607 +int dprtc_set_irq_enable(struct fsl_mc_io *mc_io,
22608 + uint32_t cmd_flags,
22609 + uint16_t token,
22610 + uint8_t irq_index,
22611 + uint8_t en);
22612 +
22613 +int dprtc_get_irq_enable(struct fsl_mc_io *mc_io,
22614 + uint32_t cmd_flags,
22615 + uint16_t token,
22616 + uint8_t irq_index,
22617 + uint8_t *en);
22618 +
22619 +int dprtc_set_irq_mask(struct fsl_mc_io *mc_io,
22620 + uint32_t cmd_flags,
22621 + uint16_t token,
22622 + uint8_t irq_index,
22623 + uint32_t mask);
22624 +
22625 +int dprtc_get_irq_mask(struct fsl_mc_io *mc_io,
22626 + uint32_t cmd_flags,
22627 + uint16_t token,
22628 + uint8_t irq_index,
22629 + uint32_t *mask);
22630 +
22631 +int dprtc_get_irq_status(struct fsl_mc_io *mc_io,
22632 + uint32_t cmd_flags,
22633 + uint16_t token,
22634 + uint8_t irq_index,
22635 + uint32_t *status);
22636 +
22637 +int dprtc_clear_irq_status(struct fsl_mc_io *mc_io,
22638 + uint32_t cmd_flags,
22639 + uint16_t token,
22640 + uint8_t irq_index,
22641 + uint32_t status);
22642 +
22643 +/**
22644 + * struct dprtc_attr - Structure representing DPRTC attributes
22645 + * @id: DPRTC object ID
22646 + */
22647 +struct dprtc_attr {
22648 + int id;
22649 +};
22650 +
22651 +int dprtc_get_attributes(struct fsl_mc_io *mc_io,
22652 + uint32_t cmd_flags,
22653 + uint16_t token,
22654 + struct dprtc_attr *attr);
22655 +
22656 +int dprtc_get_api_version(struct fsl_mc_io *mc_io,
22657 + uint32_t cmd_flags,
22658 + uint16_t *major_ver,
22659 + uint16_t *minor_ver);
22660 +
22661 +#endif /* __FSL_DPRTC_H */
22662 --- /dev/null
22663 +++ b/drivers/staging/fsl-dpaa2/rtc/rtc.c
22664 @@ -0,0 +1,243 @@
22665 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
22666 + *
22667 + * Redistribution and use in source and binary forms, with or without
22668 + * modification, are permitted provided that the following conditions are met:
22669 + * * Redistributions of source code must retain the above copyright
22670 + * notice, this list of conditions and the following disclaimer.
22671 + * * Redistributions in binary form must reproduce the above copyright
22672 + * notice, this list of conditions and the following disclaimer in the
22673 + * documentation and/or other materials provided with the distribution.
22674 + * * Neither the name of the above-listed copyright holders nor the
22675 + * names of any contributors may be used to endorse or promote products
22676 + * derived from this software without specific prior written permission.
22677 + *
22678 + *
22679 + * ALTERNATIVELY, this software may be distributed under the terms of the
22680 + * GNU General Public License ("GPL") as published by the Free Software
22681 + * Foundation, either version 2 of that License or (at your option) any
22682 + * later version.
22683 + *
22684 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22685 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22686 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22687 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
22688 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22689 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22690 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22691 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22692 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22693 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
22694 + * POSSIBILITY OF SUCH DAMAGE.
22695 + */
22696 +
22697 +#include <linux/module.h>
22698 +#include <linux/ptp_clock_kernel.h>
22699 +
22700 +#include "../../fsl-mc/include/mc.h"
22701 +#include "../../fsl-mc/include/mc-sys.h"
22702 +
22703 +#include "dprtc.h"
22704 +#include "dprtc-cmd.h"
22705 +
22706 +#define N_EXT_TS 2
22707 +
22708 +struct ptp_clock *clock;
22709 +struct fsl_mc_device *rtc_mc_dev;
22710 +u32 freqCompensation;
22711 +
22712 +/* PTP clock operations */
22713 +static int ptp_dpaa2_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
22714 +{
22715 + u64 adj;
22716 + u32 diff, tmr_add;
22717 + int neg_adj = 0;
22718 + int err = 0;
22719 + struct fsl_mc_device *mc_dev = rtc_mc_dev;
22720 + struct device *dev = &mc_dev->dev;
22721 +
22722 + if (ppb < 0) {
22723 + neg_adj = 1;
22724 + ppb = -ppb;
22725 + }
22726 +
22727 + tmr_add = freqCompensation;
22728 + adj = tmr_add;
22729 + adj *= ppb;
22730 + diff = div_u64(adj, 1000000000ULL);
22731 +
22732 + tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff;
22733 +
22734 + err = dprtc_set_freq_compensation(mc_dev->mc_io, 0,
22735 + mc_dev->mc_handle, tmr_add);
22736 + if (err)
22737 + dev_err(dev, "dprtc_set_freq_compensation err %d\n", err);
22738 + return 0;
22739 +}
22740 +
22741 +static int ptp_dpaa2_adjtime(struct ptp_clock_info *ptp, s64 delta)
22742 +{
22743 + s64 now;
22744 + int err = 0;
22745 + struct fsl_mc_device *mc_dev = rtc_mc_dev;
22746 + struct device *dev = &mc_dev->dev;
22747 +
22748 + err = dprtc_get_time(mc_dev->mc_io, 0, mc_dev->mc_handle, &now);
22749 + if (err) {
22750 + dev_err(dev, "dprtc_get_time err %d\n", err);
22751 + return 0;
22752 + }
22753 +
22754 + now += delta;
22755 +
22756 + err = dprtc_set_time(mc_dev->mc_io, 0, mc_dev->mc_handle, now);
22757 + if (err) {
22758 + dev_err(dev, "dprtc_set_time err %d\n", err);
22759 + return 0;
22760 + }
22761 + return 0;
22762 +}
22763 +
22764 +static int ptp_dpaa2_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
22765 +{
22766 + u64 ns;
22767 + u32 remainder;
22768 + int err = 0;
22769 + struct fsl_mc_device *mc_dev = rtc_mc_dev;
22770 + struct device *dev = &mc_dev->dev;
22771 +
22772 + err = dprtc_get_time(mc_dev->mc_io, 0, mc_dev->mc_handle, &ns);
22773 + if (err) {
22774 + dev_err(dev, "dprtc_get_time err %d\n", err);
22775 + return 0;
22776 + }
22777 +
22778 + ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
22779 + ts->tv_nsec = remainder;
22780 + return 0;
22781 +}
22782 +
22783 +static int ptp_dpaa2_settime(struct ptp_clock_info *ptp,
22784 + const struct timespec *ts)
22785 +{
22786 + u64 ns;
22787 + int err = 0;
22788 + struct fsl_mc_device *mc_dev = rtc_mc_dev;
22789 + struct device *dev = &mc_dev->dev;
22790 +
22791 + ns = ts->tv_sec * 1000000000ULL;
22792 + ns += ts->tv_nsec;
22793 +
22794 + err = dprtc_set_time(mc_dev->mc_io, 0, mc_dev->mc_handle, ns);
22795 + if (err)
22796 + dev_err(dev, "dprtc_set_time err %d\n", err);
22797 + return 0;
22798 +}
22799 +
22800 +static struct ptp_clock_info ptp_dpaa2_caps = {
22801 + .owner = THIS_MODULE,
22802 + .name = "dpaa2 clock",
22803 + .max_adj = 512000,
22804 + .n_alarm = 0,
22805 + .n_ext_ts = N_EXT_TS,
22806 + .n_per_out = 0,
22807 + .n_pins = 0,
22808 + .pps = 1,
22809 + .adjfreq = ptp_dpaa2_adjfreq,
22810 + .adjtime = ptp_dpaa2_adjtime,
22811 + .gettime64 = ptp_dpaa2_gettime,
22812 + .settime64 = ptp_dpaa2_settime,
22813 +};
22814 +
22815 +static int rtc_probe(struct fsl_mc_device *mc_dev)
22816 +{
22817 + struct device *dev;
22818 + int err = 0;
22819 + int dpaa2_phc_index;
22820 + u32 tmr_add = 0;
22821 +
22822 + if (!mc_dev)
22823 + return -EFAULT;
22824 +
22825 + dev = &mc_dev->dev;
22826 +
22827 + err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io);
22828 + if (unlikely(err)) {
22829 + dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
22830 + goto err_exit;
22831 + }
22832 + if (!mc_dev->mc_io) {
22833 + dev_err(dev,
22834 + "fsl_mc_portal_allocate returned null handle but no error\n");
22835 + err = -EFAULT;
22836 + goto err_exit;
22837 + }
22838 +
22839 + err = dprtc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
22840 + &mc_dev->mc_handle);
22841 + if (err) {
22842 + dev_err(dev, "dprtc_open err %d\n", err);
22843 + goto err_free_mcp;
22844 + }
22845 + if (!mc_dev->mc_handle) {
22846 + dev_err(dev, "dprtc_open returned null handle but no error\n");
22847 + err = -EFAULT;
22848 + goto err_free_mcp;
22849 + }
22850 +
22851 + rtc_mc_dev = mc_dev;
22852 +
22853 + err = dprtc_get_freq_compensation(mc_dev->mc_io, 0,
22854 + mc_dev->mc_handle, &tmr_add);
22855 + if (err) {
22856 + dev_err(dev, "dprtc_get_freq_compensation err %d\n", err);
22857 + goto err_close;
22858 + }
22859 + freqCompensation = tmr_add;
22860 +
22861 + clock = ptp_clock_register(&ptp_dpaa2_caps, dev);
22862 + if (IS_ERR(clock)) {
22863 + err = PTR_ERR(clock);
22864 + goto err_close;
22865 + }
22866 + dpaa2_phc_index = ptp_clock_index(clock);
22867 +
22868 + return 0;
22869 +err_close:
22870 + dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
22871 +err_free_mcp:
22872 + fsl_mc_portal_free(mc_dev->mc_io);
22873 +err_exit:
22874 + return err;
22875 +}
22876 +
22877 +static int rtc_remove(struct fsl_mc_device *mc_dev)
22878 +{
22879 + ptp_clock_unregister(clock);
22880 + dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
22881 + fsl_mc_portal_free(mc_dev->mc_io);
22882 +
22883 + return 0;
22884 +}
22885 +
22886 +static const struct fsl_mc_device_id rtc_match_id_table[] = {
22887 + {
22888 + .vendor = FSL_MC_VENDOR_FREESCALE,
22889 + .obj_type = "dprtc",
22890 + },
22891 + {}
22892 +};
22893 +
22894 +static struct fsl_mc_driver rtc_drv = {
22895 + .driver = {
22896 + .name = KBUILD_MODNAME,
22897 + .owner = THIS_MODULE,
22898 + },
22899 + .probe = rtc_probe,
22900 + .remove = rtc_remove,
22901 + .match_id_table = rtc_match_id_table,
22902 +};
22903 +
22904 +module_fsl_mc_driver(rtc_drv);
22905 +
22906 +MODULE_LICENSE("GPL");
22907 +MODULE_DESCRIPTION("DPAA2 RTC (PTP 1588 clock) driver (prototype)");