layerscape: update patches-4.14 to LSDK 19.03
[openwrt/staging/mkresin.git] / target / linux / layerscape / patches-4.14 / 702-dpaa2-ethernet-support-layerscape.patch
1 From 90b3f1705785f0e30de6f41abc8764aae1391245 Mon Sep 17 00:00:00 2001
2 From: Biwen Li <biwen.li@nxp.com>
3 Date: Wed, 17 Apr 2019 18:58:28 +0800
4 Subject: [PATCH] dpaa2-ethernet: support layerscape
5 MIME-Version: 1.0
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
8
9 This is an integrated patch of dpaa2-ethernet for layerscape
10
11 Signed-off-by: Biwen Li <biwen.li@nxp.com>
12 Signed-off-by: Bogdan Purcareata <bogdan.purcareata@nxp.com>
13 Signed-off-by: Camelia Groza <camelia.groza@nxp.com>
14 Signed-off-by: David S. Miller <davem@davemloft.net>
15 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
16 Signed-off-by: Guanhua Gao <guanhua.gao@nxp.com>
17 Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
18 Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
19 Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
20 Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
21 Signed-off-by: Valentin Catalin Neacsu <valentin-catalin.neacsu@nxp.com>
22 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
23 ---
24 drivers/staging/fsl-dpaa2/Kconfig | 7 +
25 drivers/staging/fsl-dpaa2/ethernet/Makefile | 3 +
26 .../fsl-dpaa2/ethernet/dpaa2-eth-ceetm.c | 1187 ++++++++
27 .../fsl-dpaa2/ethernet/dpaa2-eth-ceetm.h | 183 ++
28 .../fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c | 356 +++
29 .../fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h | 60 +
30 .../fsl-dpaa2/ethernet/dpaa2-eth-trace.h | 29 +-
31 .../staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 2509 +++++++++++++----
32 .../staging/fsl-dpaa2/ethernet/dpaa2-eth.h | 394 ++-
33 .../fsl-dpaa2/ethernet/dpaa2-ethtool.c | 716 ++++-
34 drivers/staging/fsl-dpaa2/ethernet/dpkg.h | 380 ++-
35 drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h | 255 +-
36 drivers/staging/fsl-dpaa2/ethernet/dpni.c | 704 ++++-
37 drivers/staging/fsl-dpaa2/ethernet/dpni.h | 401 ++-
38 drivers/staging/fsl-dpaa2/ethernet/net.h | 30 +-
39 15 files changed, 6315 insertions(+), 899 deletions(-)
40 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.c
41 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.h
42 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
43 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h
44
45 --- a/drivers/staging/fsl-dpaa2/Kconfig
46 +++ b/drivers/staging/fsl-dpaa2/Kconfig
47 @@ -17,6 +17,13 @@ config FSL_DPAA2_ETH
48 Ethernet driver for Freescale DPAA2 SoCs, using the
49 Freescale MC bus driver
50
51 +config FSL_DPAA2_ETH_CEETM
52 + depends on NET_SCHED
53 + bool "DPAA2 Ethernet CEETM QoS"
54 + default n
55 + ---help---
56 + Enable QoS offloading support through the CEETM hardware block.
57 +
58 if FSL_DPAA2_ETH
59 config FSL_DPAA2_ETH_USE_ERR_QUEUE
60 bool "Enable Rx error queue"
61 --- a/drivers/staging/fsl-dpaa2/ethernet/Makefile
62 +++ b/drivers/staging/fsl-dpaa2/ethernet/Makefile
63 @@ -1,3 +1,4 @@
64 +# SPDX-License-Identifier: GPL-2.0
65 #
66 # Makefile for the Freescale DPAA2 Ethernet controller
67 #
68 @@ -5,6 +6,8 @@
69 obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
70
71 fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o
72 +fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DEBUGFS} += dpaa2-eth-debugfs.o
73 +fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_CEETM} += dpaa2-eth-ceetm.o
74
75 # Needed by the tracing framework
76 CFLAGS_dpaa2-eth.o := -I$(src)
77 --- /dev/null
78 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.c
79 @@ -0,0 +1,1187 @@
80 +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
81 +/*
82 + * Copyright 2017-2019 NXP
83 + *
84 + */
85 +
86 +#include <linux/init.h>
87 +#include <linux/module.h>
88 +
89 +#include "dpaa2-eth-ceetm.h"
90 +#include "dpaa2-eth.h"
91 +
92 +#define DPAA2_CEETM_DESCRIPTION "FSL DPAA2 CEETM qdisc"
93 +/* Conversion formula from userspace passed Bps to expected Mbit */
94 +#define dpaa2_eth_bps_to_mbit(rate) (rate >> 17)
95 +
96 +static const struct nla_policy dpaa2_ceetm_policy[DPAA2_CEETM_TCA_MAX] = {
97 + [DPAA2_CEETM_TCA_COPT] = { .len = sizeof(struct dpaa2_ceetm_tc_copt) },
98 + [DPAA2_CEETM_TCA_QOPS] = { .len = sizeof(struct dpaa2_ceetm_tc_qopt) },
99 +};
100 +
101 +struct Qdisc_ops dpaa2_ceetm_qdisc_ops;
102 +
103 +static inline int dpaa2_eth_set_ch_shaping(struct dpaa2_eth_priv *priv,
104 + struct dpni_tx_shaping_cfg *scfg,
105 + struct dpni_tx_shaping_cfg *ecfg,
106 + int coupled, int ch_id)
107 +{
108 + int err = 0;
109 +
110 + netdev_dbg(priv->net_dev, "%s: ch_id %d rate %d mbps\n", __func__,
111 + ch_id, scfg->rate_limit);
112 + err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, scfg,
113 + ecfg, coupled);
114 + if (err)
115 + netdev_err(priv->net_dev, "dpni_set_tx_shaping err\n");
116 +
117 + return err;
118 +}
119 +
120 +static inline int dpaa2_eth_reset_ch_shaping(struct dpaa2_eth_priv *priv,
121 + int ch_id)
122 +{
123 + struct dpni_tx_shaping_cfg cfg = { 0 };
124 +
125 + return dpaa2_eth_set_ch_shaping(priv, &cfg, &cfg, 0, ch_id);
126 +}
127 +
128 +static inline int
129 +dpaa2_eth_update_shaping_cfg(struct net_device *dev,
130 + struct dpaa2_ceetm_shaping_cfg cfg,
131 + struct dpni_tx_shaping_cfg *scfg,
132 + struct dpni_tx_shaping_cfg *ecfg)
133 +{
134 + scfg->rate_limit = dpaa2_eth_bps_to_mbit(cfg.cir);
135 + ecfg->rate_limit = dpaa2_eth_bps_to_mbit(cfg.eir);
136 +
137 + if (cfg.cbs > DPAA2_ETH_MAX_BURST_SIZE) {
138 + netdev_err(dev, "Committed burst size must be under %d\n",
139 + DPAA2_ETH_MAX_BURST_SIZE);
140 + return -EINVAL;
141 + }
142 +
143 + scfg->max_burst_size = cfg.cbs;
144 +
145 + if (cfg.ebs > DPAA2_ETH_MAX_BURST_SIZE) {
146 + netdev_err(dev, "Excess burst size must be under %d\n",
147 + DPAA2_ETH_MAX_BURST_SIZE);
148 + return -EINVAL;
149 + }
150 +
151 + ecfg->max_burst_size = cfg.ebs;
152 +
153 + if ((!cfg.cir || !cfg.eir) && cfg.coupled) {
154 + netdev_err(dev, "Coupling can be set when both CIR and EIR are finite\n");
155 + return -EINVAL;
156 + }
157 +
158 + return 0;
159 +}
160 +
161 +enum update_tx_prio {
162 + DPAA2_ETH_ADD_CQ,
163 + DPAA2_ETH_DEL_CQ,
164 +};
165 +
166 +/* Normalize weights based on max passed value */
167 +static inline int dpaa2_eth_normalize_tx_prio(struct dpaa2_ceetm_qdisc *priv)
168 +{
169 + struct dpni_tx_schedule_cfg *sched_cfg;
170 + struct dpaa2_ceetm_class *cl;
171 + u32 qpri;
172 + u16 weight_max = 0, increment;
173 + int i;
174 +
175 + /* Check the boundaries of the provided values */
176 + for (i = 0; i < priv->clhash.hashsize; i++)
177 + hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode)
178 + weight_max = (weight_max == 0 ? cl->prio.weight :
179 + (weight_max < cl->prio.weight ?
180 + cl->prio.weight : weight_max));
181 +
182 + /* If there are no elements, there's nothing to do */
183 + if (weight_max == 0)
184 + return 0;
185 +
186 + increment = (DPAA2_CEETM_MAX_WEIGHT - DPAA2_CEETM_MIN_WEIGHT) /
187 + weight_max;
188 +
189 + for (i = 0; i < priv->clhash.hashsize; i++) {
190 + hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode) {
191 + if (cl->prio.mode == STRICT_PRIORITY)
192 + continue;
193 +
194 + qpri = cl->prio.qpri;
195 + sched_cfg = &priv->prio.tx_prio_cfg.tc_sched[qpri];
196 +
197 + sched_cfg->delta_bandwidth =
198 + DPAA2_CEETM_MIN_WEIGHT +
199 + (cl->prio.weight * increment);
200 +
201 + pr_debug("%s: Normalized CQ qpri %d weight to %d\n",
202 + __func__, qpri, sched_cfg->delta_bandwidth);
203 + }
204 + }
205 +
206 + return 0;
207 +}
208 +
209 +static inline int dpaa2_eth_update_tx_prio(struct dpaa2_eth_priv *priv,
210 + struct dpaa2_ceetm_class *cl,
211 + enum update_tx_prio type)
212 +{
213 + struct dpaa2_ceetm_qdisc *sch = qdisc_priv(cl->parent);
214 + struct dpni_tx_schedule_cfg *sched_cfg;
215 + struct dpni_taildrop td = {0};
216 + u8 ch_id = 0, tc_id = 0;
217 + u32 qpri = 0;
218 + int err = 0;
219 +
220 + qpri = cl->prio.qpri;
221 + tc_id = DPNI_BUILD_CH_TC(ch_id, qpri);
222 +
223 + switch (type) {
224 + case DPAA2_ETH_ADD_CQ:
225 + /* Enable taildrop */
226 + td.enable = 1;
227 + td.units = DPNI_CONGESTION_UNIT_FRAMES;
228 + td.threshold = DPAA2_CEETM_TD_THRESHOLD;
229 + err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
230 + DPNI_CP_GROUP, DPNI_QUEUE_TX, tc_id,
231 + 0, &td);
232 + if (err) {
233 + netdev_err(priv->net_dev, "Error enabling Tx taildrop %d\n",
234 + err);
235 + return err;
236 + }
237 + break;
238 + case DPAA2_ETH_DEL_CQ:
239 + /* Disable taildrop */
240 + td.enable = 0;
241 + err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
242 + DPNI_CP_GROUP, DPNI_QUEUE_TX, tc_id,
243 + 0, &td);
244 + if (err) {
245 + netdev_err(priv->net_dev, "Error disabling Tx taildrop %d\n",
246 + err);
247 + return err;
248 + }
249 + break;
250 + }
251 +
252 + /* We can zero out the structure in the tx_prio_conf array */
253 + if (type == DPAA2_ETH_DEL_CQ) {
254 + sched_cfg = &sch->prio.tx_prio_cfg.tc_sched[qpri];
255 + memset(sched_cfg, 0, sizeof(*sched_cfg));
256 + }
257 +
258 + /* Normalize priorities */
259 + err = dpaa2_eth_normalize_tx_prio(sch);
260 +
261 + /* Debug print goes here */
262 + print_hex_dump_debug("tx_prio: ", DUMP_PREFIX_OFFSET, 16, 1,
263 + &sch->prio.tx_prio_cfg,
264 + sizeof(sch->prio.tx_prio_cfg), 0);
265 +
266 + /* Call dpni_set_tx_priorities for the entire prio qdisc */
267 + err = dpni_set_tx_priorities(priv->mc_io, 0, priv->mc_token,
268 + &sch->prio.tx_prio_cfg);
269 + if (err)
270 + netdev_err(priv->net_dev, "dpni_set_tx_priorities err %d\n",
271 + err);
272 +
273 + return err;
274 +}
275 +
276 +static void dpaa2_eth_ceetm_enable(struct dpaa2_eth_priv *priv)
277 +{
278 + priv->ceetm_en = true;
279 +}
280 +
281 +static void dpaa2_eth_ceetm_disable(struct dpaa2_eth_priv *priv)
282 +{
283 + priv->ceetm_en = false;
284 +}
285 +
286 +/* Find class in qdisc hash table using given handle */
287 +static inline struct dpaa2_ceetm_class *dpaa2_ceetm_find(u32 handle,
288 + struct Qdisc *sch)
289 +{
290 + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
291 + struct Qdisc_class_common *clc;
292 +
293 + pr_debug(KBUILD_BASENAME " : %s : find class %X in qdisc %X\n",
294 + __func__, handle, sch->handle);
295 +
296 + clc = qdisc_class_find(&priv->clhash, handle);
297 + return clc ? container_of(clc, struct dpaa2_ceetm_class, common) : NULL;
298 +}
299 +
300 +/* Insert a class in the qdisc's class hash */
301 +static void dpaa2_ceetm_link_class(struct Qdisc *sch,
302 + struct Qdisc_class_hash *clhash,
303 + struct Qdisc_class_common *common)
304 +{
305 + sch_tree_lock(sch);
306 + qdisc_class_hash_insert(clhash, common);
307 + sch_tree_unlock(sch);
308 + qdisc_class_hash_grow(sch, clhash);
309 +}
310 +
311 +/* Destroy a ceetm class */
312 +static void dpaa2_ceetm_cls_destroy(struct Qdisc *sch,
313 + struct dpaa2_ceetm_class *cl)
314 +{
315 + struct net_device *dev = qdisc_dev(sch);
316 + struct dpaa2_eth_priv *priv = netdev_priv(dev);
317 +
318 + if (!cl)
319 + return;
320 +
321 + pr_debug(KBUILD_BASENAME " : %s : destroy class %X from under %X\n",
322 + __func__, cl->common.classid, sch->handle);
323 +
324 + /* Recurse into child first */
325 + if (cl->child) {
326 + qdisc_destroy(cl->child);
327 + cl->child = NULL;
328 + }
329 +
330 + switch (cl->type) {
331 + case CEETM_ROOT:
332 + if (dpaa2_eth_reset_ch_shaping(priv, cl->root.ch_id))
333 + netdev_err(dev, "Error resetting channel shaping\n");
334 +
335 + break;
336 +
337 + case CEETM_PRIO:
338 + if (dpaa2_eth_update_tx_prio(priv, cl, DPAA2_ETH_DEL_CQ))
339 + netdev_err(dev, "Error resetting tx_priorities\n");
340 +
341 + if (cl->prio.cstats)
342 + free_percpu(cl->prio.cstats);
343 +
344 + break;
345 + }
346 +
347 + tcf_block_put(cl->block);
348 + kfree(cl);
349 +}
350 +
351 +/* Destroy a ceetm qdisc */
352 +static void dpaa2_ceetm_destroy(struct Qdisc *sch)
353 +{
354 + unsigned int i;
355 + struct hlist_node *next;
356 + struct dpaa2_ceetm_class *cl;
357 + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
358 + struct net_device *dev = qdisc_dev(sch);
359 + struct dpaa2_eth_priv *priv_eth = netdev_priv(dev);
360 +
361 + pr_debug(KBUILD_BASENAME " : %s : destroy qdisc %X\n",
362 + __func__, sch->handle);
363 +
364 + /* All filters need to be removed before destroying the classes */
365 + tcf_block_put(priv->block);
366 +
367 + for (i = 0; i < priv->clhash.hashsize; i++) {
368 + hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode)
369 + tcf_block_put(cl->block);
370 + }
371 +
372 + for (i = 0; i < priv->clhash.hashsize; i++) {
373 + hlist_for_each_entry_safe(cl, next, &priv->clhash.hash[i],
374 + common.hnode)
375 + dpaa2_ceetm_cls_destroy(sch, cl);
376 + }
377 +
378 + qdisc_class_hash_destroy(&priv->clhash);
379 +
380 + switch (priv->type) {
381 + case CEETM_ROOT:
382 + dpaa2_eth_ceetm_disable(priv_eth);
383 +
384 + if (priv->root.qstats)
385 + free_percpu(priv->root.qstats);
386 +
387 + if (!priv->root.qdiscs)
388 + break;
389 +
390 + /* Destroy the pfifo qdiscs in case they haven't been attached
391 + * to the netdev queues yet.
392 + */
393 + for (i = 0; i < dev->num_tx_queues; i++)
394 + if (priv->root.qdiscs[i])
395 + qdisc_destroy(priv->root.qdiscs[i]);
396 +
397 + kfree(priv->root.qdiscs);
398 + break;
399 +
400 + case CEETM_PRIO:
401 + if (priv->prio.parent)
402 + priv->prio.parent->child = NULL;
403 + break;
404 + }
405 +}
406 +
407 +static int dpaa2_ceetm_dump(struct Qdisc *sch, struct sk_buff *skb)
408 +{
409 + struct Qdisc *qdisc;
410 + unsigned int ntx, i;
411 + struct nlattr *nest;
412 + struct dpaa2_ceetm_tc_qopt qopt;
413 + struct dpaa2_ceetm_qdisc_stats *qstats;
414 + struct net_device *dev = qdisc_dev(sch);
415 + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
416 +
417 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
418 +
419 + sch_tree_lock(sch);
420 + memset(&qopt, 0, sizeof(qopt));
421 + qopt.type = priv->type;
422 + qopt.shaped = priv->shaped;
423 +
424 + switch (priv->type) {
425 + case CEETM_ROOT:
426 + /* Gather statistics from the underlying pfifo qdiscs */
427 + sch->q.qlen = 0;
428 + memset(&sch->bstats, 0, sizeof(sch->bstats));
429 + memset(&sch->qstats, 0, sizeof(sch->qstats));
430 +
431 + for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
432 + qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
433 + sch->q.qlen += qdisc->q.qlen;
434 + sch->bstats.bytes += qdisc->bstats.bytes;
435 + sch->bstats.packets += qdisc->bstats.packets;
436 + sch->qstats.qlen += qdisc->qstats.qlen;
437 + sch->qstats.backlog += qdisc->qstats.backlog;
438 + sch->qstats.drops += qdisc->qstats.drops;
439 + sch->qstats.requeues += qdisc->qstats.requeues;
440 + sch->qstats.overlimits += qdisc->qstats.overlimits;
441 + }
442 +
443 + for_each_online_cpu(i) {
444 + qstats = per_cpu_ptr(priv->root.qstats, i);
445 + sch->qstats.drops += qstats->drops;
446 + }
447 +
448 + break;
449 +
450 + case CEETM_PRIO:
451 + qopt.prio_group_A = priv->prio.tx_prio_cfg.prio_group_A;
452 + qopt.prio_group_B = priv->prio.tx_prio_cfg.prio_group_B;
453 + qopt.separate_groups = priv->prio.tx_prio_cfg.separate_groups;
454 + break;
455 +
456 + default:
457 + pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
458 + sch_tree_unlock(sch);
459 + return -EINVAL;
460 + }
461 +
462 + nest = nla_nest_start(skb, TCA_OPTIONS);
463 + if (!nest)
464 + goto nla_put_failure;
465 + if (nla_put(skb, DPAA2_CEETM_TCA_QOPS, sizeof(qopt), &qopt))
466 + goto nla_put_failure;
467 + nla_nest_end(skb, nest);
468 +
469 + sch_tree_unlock(sch);
470 + return skb->len;
471 +
472 +nla_put_failure:
473 + sch_tree_unlock(sch);
474 + nla_nest_cancel(skb, nest);
475 + return -EMSGSIZE;
476 +}
477 +
478 +static int dpaa2_ceetm_change_prio(struct Qdisc *sch,
479 + struct dpaa2_ceetm_qdisc *priv,
480 + struct dpaa2_ceetm_tc_qopt *qopt)
481 +{
482 + /* TODO: Once LX2 support is added */
483 + /* priv->shaped = parent_cl->shaped; */
484 + priv->prio.tx_prio_cfg.prio_group_A = qopt->prio_group_A;
485 + priv->prio.tx_prio_cfg.prio_group_B = qopt->prio_group_B;
486 + priv->prio.tx_prio_cfg.separate_groups = qopt->separate_groups;
487 +
488 + return 0;
489 +}
490 +
491 +/* Edit a ceetm qdisc */
492 +static int dpaa2_ceetm_change(struct Qdisc *sch, struct nlattr *opt)
493 +{
494 + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
495 + struct nlattr *tb[DPAA2_CEETM_TCA_QOPS + 1];
496 + struct dpaa2_ceetm_tc_qopt *qopt;
497 + int err;
498 +
499 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
500 +
501 + err = nla_parse_nested(tb, DPAA2_CEETM_TCA_QOPS, opt,
502 + dpaa2_ceetm_policy, NULL);
503 + if (err < 0) {
504 + pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
505 + "nla_parse_nested");
506 + return err;
507 + }
508 +
509 + if (!tb[DPAA2_CEETM_TCA_QOPS]) {
510 + pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
511 + "tb");
512 + return -EINVAL;
513 + }
514 +
515 + if (TC_H_MIN(sch->handle)) {
516 + pr_err("CEETM: a qdisc should not have a minor\n");
517 + return -EINVAL;
518 + }
519 +
520 + qopt = nla_data(tb[DPAA2_CEETM_TCA_QOPS]);
521 +
522 + if (priv->type != qopt->type) {
523 + pr_err("CEETM: qdisc %X is not of the provided type\n",
524 + sch->handle);
525 + return -EINVAL;
526 + }
527 +
528 + switch (priv->type) {
529 + case CEETM_PRIO:
530 + err = dpaa2_ceetm_change_prio(sch, priv, qopt);
531 + break;
532 + default:
533 + pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
534 + err = -EINVAL;
535 + }
536 +
537 + return err;
538 +}
539 +
540 +/* Configure a root ceetm qdisc */
541 +static int dpaa2_ceetm_init_root(struct Qdisc *sch,
542 + struct dpaa2_ceetm_qdisc *priv,
543 + struct dpaa2_ceetm_tc_qopt *qopt)
544 +{
545 + struct net_device *dev = qdisc_dev(sch);
546 + struct dpaa2_eth_priv *priv_eth = netdev_priv(dev);
547 + struct netdev_queue *dev_queue;
548 + unsigned int i, parent_id;
549 + struct Qdisc *qdisc;
550 +
551 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
552 +
553 + /* Validate inputs */
554 + if (sch->parent != TC_H_ROOT) {
555 + pr_err("CEETM: a root ceetm qdisc must be root\n");
556 + return -EINVAL;
557 + }
558 +
559 + /* Pre-allocate underlying pfifo qdiscs.
560 + *
561 + * We want to offload shaping and scheduling decisions to the hardware.
562 + * The pfifo qdiscs will be attached to the netdev queues and will
563 + * guide the traffic from the IP stack down to the driver with minimum
564 + * interference.
565 + *
566 + * The CEETM qdiscs and classes will be crossed when the traffic
567 + * reaches the driver.
568 + */
569 + priv->root.qdiscs = kcalloc(dev->num_tx_queues,
570 + sizeof(priv->root.qdiscs[0]),
571 + GFP_KERNEL);
572 + if (!priv->root.qdiscs)
573 + return -ENOMEM;
574 +
575 + for (i = 0; i < dev->num_tx_queues; i++) {
576 + dev_queue = netdev_get_tx_queue(dev, i);
577 + parent_id = TC_H_MAKE(TC_H_MAJ(sch->handle),
578 + TC_H_MIN(i + PFIFO_MIN_OFFSET));
579 +
580 + qdisc = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
581 + parent_id);
582 + if (!qdisc)
583 + return -ENOMEM;
584 +
585 + priv->root.qdiscs[i] = qdisc;
586 + qdisc->flags |= TCQ_F_ONETXQUEUE;
587 + }
588 +
589 + sch->flags |= TCQ_F_MQROOT;
590 +
591 + priv->root.qstats = alloc_percpu(struct dpaa2_ceetm_qdisc_stats);
592 + if (!priv->root.qstats) {
593 + pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
594 + __func__);
595 + return -ENOMEM;
596 + }
597 +
598 + dpaa2_eth_ceetm_enable(priv_eth);
599 + return 0;
600 +}
601 +
602 +/* Configure a prio ceetm qdisc */
603 +static int dpaa2_ceetm_init_prio(struct Qdisc *sch,
604 + struct dpaa2_ceetm_qdisc *priv,
605 + struct dpaa2_ceetm_tc_qopt *qopt)
606 +{
607 + struct net_device *dev = qdisc_dev(sch);
608 + struct dpaa2_ceetm_class *parent_cl;
609 + struct Qdisc *parent_qdisc;
610 +
611 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
612 +
613 + if (sch->parent == TC_H_ROOT) {
614 + pr_err("CEETM: a prio ceetm qdisc can not be root\n");
615 + return -EINVAL;
616 + }
617 +
618 + parent_qdisc = qdisc_lookup(dev, TC_H_MAJ(sch->parent));
619 + if (strcmp(parent_qdisc->ops->id, dpaa2_ceetm_qdisc_ops.id)) {
620 + pr_err("CEETM: a ceetm qdisc can not be attached to other qdisc/class types\n");
621 + return -EINVAL;
622 + }
623 +
624 + /* Obtain the parent root ceetm_class */
625 + parent_cl = dpaa2_ceetm_find(sch->parent, parent_qdisc);
626 +
627 + if (!parent_cl || parent_cl->type != CEETM_ROOT) {
628 + pr_err("CEETM: a prio ceetm qdiscs can be added only under a root ceetm class\n");
629 + return -EINVAL;
630 + }
631 +
632 + priv->prio.parent = parent_cl;
633 + parent_cl->child = sch;
634 +
635 + return dpaa2_ceetm_change_prio(sch, priv, qopt);
636 +}
637 +
638 +/* Configure a generic ceetm qdisc */
639 +static int dpaa2_ceetm_init(struct Qdisc *sch, struct nlattr *opt)
640 +{
641 + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
642 + struct net_device *dev = qdisc_dev(sch);
643 + struct nlattr *tb[DPAA2_CEETM_TCA_QOPS + 1];
644 + struct dpaa2_ceetm_tc_qopt *qopt;
645 + int err;
646 +
647 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
648 +
649 + if (!netif_is_multiqueue(dev))
650 + return -EOPNOTSUPP;
651 +
652 + err = tcf_block_get(&priv->block, &priv->filter_list);
653 + if (err) {
654 + pr_err("CEETM: unable to get tcf_block\n");
655 + return err;
656 + }
657 +
658 + if (!opt) {
659 + pr_err(KBUILD_BASENAME " : %s : tc error - opt = NULL\n",
660 + __func__);
661 + return -EINVAL;
662 + }
663 +
664 + err = nla_parse_nested(tb, DPAA2_CEETM_TCA_QOPS, opt,
665 + dpaa2_ceetm_policy, NULL);
666 + if (err < 0) {
667 + pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
668 + "nla_parse_nested");
669 + return err;
670 + }
671 +
672 + if (!tb[DPAA2_CEETM_TCA_QOPS]) {
673 + pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
674 + "tb");
675 + return -EINVAL;
676 + }
677 +
678 + if (TC_H_MIN(sch->handle)) {
679 + pr_err("CEETM: a qdisc should not have a minor\n");
680 + return -EINVAL;
681 + }
682 +
683 + qopt = nla_data(tb[DPAA2_CEETM_TCA_QOPS]);
684 +
685 + /* Initialize the class hash list. Each qdisc has its own class hash */
686 + err = qdisc_class_hash_init(&priv->clhash);
687 + if (err < 0) {
688 + pr_err(KBUILD_BASENAME " : %s : qdisc_class_hash_init failed\n",
689 + __func__);
690 + return err;
691 + }
692 +
693 + priv->type = qopt->type;
694 + priv->shaped = qopt->shaped;
695 +
696 + switch (priv->type) {
697 + case CEETM_ROOT:
698 + err = dpaa2_ceetm_init_root(sch, priv, qopt);
699 + break;
700 + case CEETM_PRIO:
701 + err = dpaa2_ceetm_init_prio(sch, priv, qopt);
702 + break;
703 + default:
704 + pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
705 + /* Note: dpaa2_ceetm_destroy() will be called by our caller */
706 + err = -EINVAL;
707 + }
708 +
709 + return err;
710 +}
711 +
712 +/* Attach the underlying pfifo qdiscs */
713 +static void dpaa2_ceetm_attach(struct Qdisc *sch)
714 +{
715 + struct net_device *dev = qdisc_dev(sch);
716 + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
717 + struct Qdisc *qdisc, *old_qdisc;
718 + unsigned int i;
719 +
720 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
721 +
722 + for (i = 0; i < dev->num_tx_queues; i++) {
723 + qdisc = priv->root.qdiscs[i];
724 + old_qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc);
725 + if (old_qdisc)
726 + qdisc_destroy(old_qdisc);
727 + }
728 +
729 + /* Remove the references to the pfifo qdiscs since the kernel will
730 + * destroy them when needed. No cleanup from our part is required from
731 + * this point on.
732 + */
733 + kfree(priv->root.qdiscs);
734 + priv->root.qdiscs = NULL;
735 +}
736 +
737 +static unsigned long dpaa2_ceetm_cls_find(struct Qdisc *sch, u32 classid)
738 +{
739 + struct dpaa2_ceetm_class *cl;
740 +
741 + pr_debug(KBUILD_BASENAME " : %s : classid %X from qdisc %X\n",
742 + __func__, classid, sch->handle);
743 + cl = dpaa2_ceetm_find(classid, sch);
744 +
745 + return (unsigned long)cl;
746 +}
747 +
748 +static int dpaa2_ceetm_cls_change_root(struct dpaa2_ceetm_class *cl,
749 + struct dpaa2_ceetm_tc_copt *copt,
750 + struct net_device *dev)
751 +{
752 + struct dpaa2_eth_priv *priv = netdev_priv(dev);
753 + struct dpni_tx_shaping_cfg scfg = { 0 }, ecfg = { 0 };
754 + int err = 0;
755 +
756 + pr_debug(KBUILD_BASENAME " : %s : class %X\n", __func__,
757 + cl->common.classid);
758 +
759 + if (!cl->shaped)
760 + return 0;
761 +
762 + if (dpaa2_eth_update_shaping_cfg(dev, copt->shaping_cfg,
763 + &scfg, &ecfg))
764 + return -EINVAL;
765 +
766 + err = dpaa2_eth_set_ch_shaping(priv, &scfg, &ecfg,
767 + copt->shaping_cfg.coupled,
768 + cl->root.ch_id);
769 + if (err)
770 + return err;
771 +
772 + memcpy(&cl->root.shaping_cfg, &copt->shaping_cfg,
773 + sizeof(struct dpaa2_ceetm_shaping_cfg));
774 +
775 + return err;
776 +}
777 +
778 +static int dpaa2_ceetm_cls_change_prio(struct dpaa2_ceetm_class *cl,
779 + struct dpaa2_ceetm_tc_copt *copt,
780 + struct net_device *dev)
781 +{
782 + struct dpaa2_ceetm_qdisc *sch = qdisc_priv(cl->parent);
783 + struct dpni_tx_schedule_cfg *sched_cfg;
784 + struct dpaa2_eth_priv *priv = netdev_priv(dev);
785 + int err;
786 +
787 + pr_debug(KBUILD_BASENAME " : %s : class %X mode %d weight %d\n",
788 + __func__, cl->common.classid, copt->mode, copt->weight);
789 +
790 + if (!cl->prio.cstats) {
791 + cl->prio.cstats = alloc_percpu(struct dpaa2_ceetm_class_stats);
792 + if (!cl->prio.cstats) {
793 + pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
794 + __func__);
795 + return -ENOMEM;
796 + }
797 + }
798 +
799 + cl->prio.mode = copt->mode;
800 + cl->prio.weight = copt->weight;
801 +
802 + sched_cfg = &sch->prio.tx_prio_cfg.tc_sched[cl->prio.qpri];
803 +
804 + switch (copt->mode) {
805 + case STRICT_PRIORITY:
806 + sched_cfg->mode = DPNI_TX_SCHED_STRICT_PRIORITY;
807 + break;
808 + case WEIGHTED_A:
809 + sched_cfg->mode = DPNI_TX_SCHED_WEIGHTED_A;
810 + break;
811 + case WEIGHTED_B:
812 + sched_cfg->mode = DPNI_TX_SCHED_WEIGHTED_B;
813 + break;
814 + }
815 +
816 + err = dpaa2_eth_update_tx_prio(priv, cl, DPAA2_ETH_ADD_CQ);
817 +
818 + return err;
819 +}
820 +
821 +/* Add a new ceetm class */
822 +static int dpaa2_ceetm_cls_add(struct Qdisc *sch, u32 classid,
823 + struct dpaa2_ceetm_tc_copt *copt,
824 + unsigned long *arg)
825 +{
826 + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
827 + struct net_device *dev = qdisc_dev(sch);
828 + struct dpaa2_eth_priv *priv_eth = netdev_priv(dev);
829 + struct dpaa2_ceetm_class *cl;
830 + int err;
831 +
832 + if (copt->type == CEETM_ROOT &&
833 + priv->clhash.hashelems == dpaa2_eth_ch_count(priv_eth)) {
834 + pr_err("CEETM: only %d channel%s per DPNI allowed, sorry\n",
835 + dpaa2_eth_ch_count(priv_eth),
836 + dpaa2_eth_ch_count(priv_eth) == 1 ? "" : "s");
837 + return -EINVAL;
838 + }
839 +
840 + if (copt->type == CEETM_PRIO &&
841 + priv->clhash.hashelems == dpaa2_eth_tc_count(priv_eth)) {
842 + pr_err("CEETM: only %d queue%s per channel allowed, sorry\n",
843 + dpaa2_eth_tc_count(priv_eth),
844 + dpaa2_eth_tc_count(priv_eth) == 1 ? "" : "s");
845 + return -EINVAL;
846 + }
847 +
848 + cl = kzalloc(sizeof(*cl), GFP_KERNEL);
849 + if (!cl)
850 + return -ENOMEM;
851 +
852 + err = tcf_block_get(&cl->block, &cl->filter_list);
853 + if (err) {
854 + pr_err("%s: Unable to set new root class\n", __func__);
855 + goto out_free;
856 + }
857 +
858 + cl->common.classid = classid;
859 + cl->parent = sch;
860 + cl->child = NULL;
861 +
862 + /* Add class handle in Qdisc */
863 + dpaa2_ceetm_link_class(sch, &priv->clhash, &cl->common);
864 +
865 + cl->shaped = copt->shaped;
866 + cl->type = copt->type;
867 +
868 + /* Claim a CEETM channel / tc - DPAA2. will assume transition from
869 + * classid to qdid/qpri, starting from qdid / qpri 0
870 + */
871 + switch (copt->type) {
872 + case CEETM_ROOT:
873 + cl->root.ch_id = classid - sch->handle - 1;
874 + err = dpaa2_ceetm_cls_change_root(cl, copt, dev);
875 + break;
876 + case CEETM_PRIO:
877 + cl->prio.qpri = classid - sch->handle - 1;
878 + err = dpaa2_ceetm_cls_change_prio(cl, copt, dev);
879 + break;
880 + }
881 +
882 + if (err) {
883 + pr_err("%s: Unable to set new %s class\n", __func__,
884 + (copt->type == CEETM_ROOT ? "root" : "prio"));
885 + goto out_free;
886 + }
887 +
888 + switch (copt->type) {
889 + case CEETM_ROOT:
890 + pr_debug(KBUILD_BASENAME " : %s : configured root class %X associated with channel qdid %d\n",
891 + __func__, classid, cl->root.ch_id);
892 + break;
893 + case CEETM_PRIO:
894 + pr_debug(KBUILD_BASENAME " : %s : configured prio class %X associated with queue qpri %d\n",
895 + __func__, classid, cl->prio.qpri);
896 + break;
897 + }
898 +
899 + *arg = (unsigned long)cl;
900 + return 0;
901 +
902 +out_free:
903 + kfree(cl);
904 + return err;
905 +}
906 +
907 +/* Add or configure a ceetm class */
908 +static int dpaa2_ceetm_cls_change(struct Qdisc *sch, u32 classid, u32 parentid,
909 + struct nlattr **tca, unsigned long *arg)
910 +{
911 + struct dpaa2_ceetm_qdisc *priv;
912 + struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)*arg;
913 + struct nlattr *opt = tca[TCA_OPTIONS];
914 + struct nlattr *tb[DPAA2_CEETM_TCA_MAX];
915 + struct dpaa2_ceetm_tc_copt *copt;
916 + struct net_device *dev = qdisc_dev(sch);
917 + int err;
918 +
919 + pr_debug(KBUILD_BASENAME " : %s : classid %X under qdisc %X\n",
920 + __func__, classid, sch->handle);
921 +
922 + if (strcmp(sch->ops->id, dpaa2_ceetm_qdisc_ops.id)) {
923 + pr_err("CEETM: a ceetm class can not be attached to other qdisc/class types\n");
924 + return -EINVAL;
925 + }
926 +
927 + priv = qdisc_priv(sch);
928 +
929 + if (!opt) {
930 + pr_err(KBUILD_BASENAME " : %s : tc error NULL opt\n", __func__);
931 + return -EINVAL;
932 + }
933 +
934 + err = nla_parse_nested(tb, DPAA2_CEETM_TCA_COPT, opt,
935 + dpaa2_ceetm_policy, NULL);
936 + if (err < 0) {
937 + pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
938 + "nla_parse_nested");
939 + return -EINVAL;
940 + }
941 +
942 + if (!tb[DPAA2_CEETM_TCA_COPT]) {
943 + pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
944 + "tb");
945 + return -EINVAL;
946 + }
947 +
948 + copt = nla_data(tb[DPAA2_CEETM_TCA_COPT]);
949 +
950 + /* Configure an existing ceetm class */
951 + if (cl) {
952 + if (copt->type != cl->type) {
953 + pr_err("CEETM: class %X is not of the provided type\n",
954 + cl->common.classid);
955 + return -EINVAL;
956 + }
957 +
958 + switch (copt->type) {
959 + case CEETM_ROOT:
960 + return dpaa2_ceetm_cls_change_root(cl, copt, dev);
961 + case CEETM_PRIO:
962 + return dpaa2_ceetm_cls_change_prio(cl, copt, dev);
963 +
964 + default:
965 + pr_err(KBUILD_BASENAME " : %s : invalid class\n",
966 + __func__);
967 + return -EINVAL;
968 + }
969 + }
970 +
971 + return dpaa2_ceetm_cls_add(sch, classid, copt, arg);
972 +}
973 +
974 +static void dpaa2_ceetm_cls_walk(struct Qdisc *sch, struct qdisc_walker *arg)
975 +{
976 + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
977 + struct dpaa2_ceetm_class *cl;
978 + unsigned int i;
979 +
980 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
981 +
982 + if (arg->stop)
983 + return;
984 +
985 + for (i = 0; i < priv->clhash.hashsize; i++) {
986 + hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode) {
987 + if (arg->count < arg->skip) {
988 + arg->count++;
989 + continue;
990 + }
991 + if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
992 + arg->stop = 1;
993 + return;
994 + }
995 + arg->count++;
996 + }
997 + }
998 +}
999 +
1000 +static int dpaa2_ceetm_cls_dump(struct Qdisc *sch, unsigned long arg,
1001 + struct sk_buff *skb, struct tcmsg *tcm)
1002 +{
1003 + struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
1004 + struct nlattr *nest;
1005 + struct dpaa2_ceetm_tc_copt copt;
1006 +
1007 + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
1008 + __func__, cl->common.classid, sch->handle);
1009 +
1010 + sch_tree_lock(sch);
1011 +
1012 + tcm->tcm_parent = ((struct Qdisc *)cl->parent)->handle;
1013 + tcm->tcm_handle = cl->common.classid;
1014 +
1015 + memset(&copt, 0, sizeof(copt));
1016 +
1017 + copt.shaped = cl->shaped;
1018 + copt.type = cl->type;
1019 +
1020 + switch (cl->type) {
1021 + case CEETM_ROOT:
1022 + if (cl->child)
1023 + tcm->tcm_info = cl->child->handle;
1024 +
1025 + memcpy(&copt.shaping_cfg, &cl->root.shaping_cfg,
1026 + sizeof(struct dpaa2_ceetm_shaping_cfg));
1027 +
1028 + break;
1029 +
1030 + case CEETM_PRIO:
1031 + if (cl->child)
1032 + tcm->tcm_info = cl->child->handle;
1033 +
1034 + copt.mode = cl->prio.mode;
1035 + copt.weight = cl->prio.weight;
1036 +
1037 + break;
1038 + }
1039 +
1040 + nest = nla_nest_start(skb, TCA_OPTIONS);
1041 + if (!nest)
1042 + goto nla_put_failure;
1043 + if (nla_put(skb, DPAA2_CEETM_TCA_COPT, sizeof(copt), &copt))
1044 + goto nla_put_failure;
1045 + nla_nest_end(skb, nest);
1046 + sch_tree_unlock(sch);
1047 + return skb->len;
1048 +
1049 +nla_put_failure:
1050 + sch_tree_unlock(sch);
1051 + nla_nest_cancel(skb, nest);
1052 + return -EMSGSIZE;
1053 +}
1054 +
1055 +static int dpaa2_ceetm_cls_delete(struct Qdisc *sch, unsigned long arg)
1056 +{
1057 + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
1058 + struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
1059 +
1060 + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
1061 + __func__, cl->common.classid, sch->handle);
1062 +
1063 + sch_tree_lock(sch);
1064 + qdisc_class_hash_remove(&priv->clhash, &cl->common);
1065 + sch_tree_unlock(sch);
1066 + return 0;
1067 +}
1068 +
1069 +/* Get the class' child qdisc, if any */
1070 +static struct Qdisc *dpaa2_ceetm_cls_leaf(struct Qdisc *sch, unsigned long arg)
1071 +{
1072 + struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
1073 +
1074 + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
1075 + __func__, cl->common.classid, sch->handle);
1076 +
1077 + switch (cl->type) {
1078 + case CEETM_ROOT:
1079 + case CEETM_PRIO:
1080 + return cl->child;
1081 + }
1082 +
1083 + return NULL;
1084 +}
1085 +
1086 +static int dpaa2_ceetm_cls_graft(struct Qdisc *sch, unsigned long arg,
1087 + struct Qdisc *new, struct Qdisc **old)
1088 +{
1089 + if (new && strcmp(new->ops->id, dpaa2_ceetm_qdisc_ops.id)) {
1090 + pr_err("CEETM: only ceetm qdiscs can be attached to ceetm classes\n");
1091 + return -EOPNOTSUPP;
1092 + }
1093 +
1094 + return 0;
1095 +}
1096 +
1097 +static int dpaa2_ceetm_cls_dump_stats(struct Qdisc *sch, unsigned long arg,
1098 + struct gnet_dump *d)
1099 +{
1100 + struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
1101 + struct gnet_stats_basic_packed tmp_bstats;
1102 + struct dpaa2_ceetm_tc_xstats xstats;
1103 + union dpni_statistics dpni_stats;
1104 + struct net_device *dev = qdisc_dev(sch);
1105 + struct dpaa2_eth_priv *priv_eth = netdev_priv(dev);
1106 + u8 ch_id = 0;
1107 + int err;
1108 +
1109 + memset(&xstats, 0, sizeof(xstats));
1110 + memset(&tmp_bstats, 0, sizeof(tmp_bstats));
1111 +
1112 + if (cl->type == CEETM_ROOT)
1113 + return 0;
1114 +
1115 + err = dpni_get_statistics(priv_eth->mc_io, 0, priv_eth->mc_token, 3,
1116 + DPNI_BUILD_CH_TC(ch_id, cl->prio.qpri),
1117 + &dpni_stats);
1118 + if (err)
1119 + netdev_warn(dev, "dpni_get_stats(%d) failed - %d\n", 3, err);
1120 +
1121 + xstats.ceetm_dequeue_bytes = dpni_stats.page_3.ceetm_dequeue_bytes;
1122 + xstats.ceetm_dequeue_frames = dpni_stats.page_3.ceetm_dequeue_frames;
1123 + xstats.ceetm_reject_bytes = dpni_stats.page_3.ceetm_reject_bytes;
1124 + xstats.ceetm_reject_frames = dpni_stats.page_3.ceetm_reject_frames;
1125 +
1126 + return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
1127 +}
1128 +
1129 +static struct tcf_block *dpaa2_ceetm_tcf_block(struct Qdisc *sch,
1130 + unsigned long arg)
1131 +{
1132 + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
1133 + struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
1134 +
1135 + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
1136 + cl ? cl->common.classid : 0, sch->handle);
1137 + return cl ? cl->block : priv->block;
1138 +}
1139 +
1140 +static unsigned long dpaa2_ceetm_tcf_bind(struct Qdisc *sch,
1141 + unsigned long parent,
1142 + u32 classid)
1143 +{
1144 + struct dpaa2_ceetm_class *cl = dpaa2_ceetm_find(classid, sch);
1145 +
1146 + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
1147 + cl ? cl->common.classid : 0, sch->handle);
1148 + return (unsigned long)cl;
1149 +}
1150 +
1151 +static void dpaa2_ceetm_tcf_unbind(struct Qdisc *sch, unsigned long arg)
1152 +{
1153 + struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
1154 +
1155 + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
1156 + cl ? cl->common.classid : 0, sch->handle);
1157 +}
1158 +
1159 +const struct Qdisc_class_ops dpaa2_ceetm_cls_ops = {
1160 + .graft = dpaa2_ceetm_cls_graft,
1161 + .leaf = dpaa2_ceetm_cls_leaf,
1162 + .find = dpaa2_ceetm_cls_find,
1163 + .change = dpaa2_ceetm_cls_change,
1164 + .delete = dpaa2_ceetm_cls_delete,
1165 + .walk = dpaa2_ceetm_cls_walk,
1166 + .tcf_block = dpaa2_ceetm_tcf_block,
1167 + .bind_tcf = dpaa2_ceetm_tcf_bind,
1168 + .unbind_tcf = dpaa2_ceetm_tcf_unbind,
1169 + .dump = dpaa2_ceetm_cls_dump,
1170 + .dump_stats = dpaa2_ceetm_cls_dump_stats,
1171 +};
1172 +
1173 +struct Qdisc_ops dpaa2_ceetm_qdisc_ops __read_mostly = {
1174 + .id = "ceetm",
1175 + .priv_size = sizeof(struct dpaa2_ceetm_qdisc),
1176 + .cl_ops = &dpaa2_ceetm_cls_ops,
1177 + .init = dpaa2_ceetm_init,
1178 + .destroy = dpaa2_ceetm_destroy,
1179 + .change = dpaa2_ceetm_change,
1180 + .dump = dpaa2_ceetm_dump,
1181 + .attach = dpaa2_ceetm_attach,
1182 + .owner = THIS_MODULE,
1183 +};
1184 +
1185 +/* Run the filters and classifiers attached to the qdisc on the provided skb */
1186 +int dpaa2_ceetm_classify(struct sk_buff *skb, struct Qdisc *sch,
1187 + int *qdid, u8 *qpri)
1188 +{
1189 + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
1190 + struct dpaa2_ceetm_class *cl = NULL;
1191 + struct tcf_result res;
1192 + struct tcf_proto *tcf;
1193 + int result;
1194 +
1195 + tcf = rcu_dereference_bh(priv->filter_list);
1196 + while (tcf && (result = tcf_classify(skb, tcf, &res, false)) >= 0) {
1197 +#ifdef CONFIG_NET_CLS_ACT
1198 + switch (result) {
1199 + case TC_ACT_QUEUED:
1200 + case TC_ACT_STOLEN:
1201 + case TC_ACT_SHOT:
1202 + /* No valid class found due to action */
1203 + return -1;
1204 + }
1205 +#endif
1206 + cl = (void *)res.class;
1207 + if (!cl) {
1208 + /* The filter leads to the qdisc */
1209 + if (res.classid == sch->handle)
1210 + return 0;
1211 +
1212 + cl = dpaa2_ceetm_find(res.classid, sch);
1213 + /* The filter leads to an invalid class */
1214 + if (!cl)
1215 + break;
1216 + }
1217 +
1218 + /* The class might have its own filters attached */
1219 + tcf = rcu_dereference_bh(cl->filter_list);
1220 + }
1221 +
1222 + /* No valid class found */
1223 + if (!cl)
1224 + return 0;
1225 +
1226 + switch (cl->type) {
1227 + case CEETM_ROOT:
1228 + *qdid = cl->root.ch_id;
1229 +
1230 + /* The root class does not have a child prio qdisc */
1231 + if (!cl->child)
1232 + return 0;
1233 +
1234 + /* Run the prio qdisc classifiers */
1235 + return dpaa2_ceetm_classify(skb, cl->child, qdid, qpri);
1236 +
1237 + case CEETM_PRIO:
1238 + *qpri = cl->prio.qpri;
1239 + break;
1240 + }
1241 +
1242 + return 0;
1243 +}
1244 +
1245 +int __init dpaa2_ceetm_register(void)
1246 +{
1247 + int err = 0;
1248 +
1249 + pr_debug(KBUILD_MODNAME ": " DPAA2_CEETM_DESCRIPTION "\n");
1250 +
1251 + err = register_qdisc(&dpaa2_ceetm_qdisc_ops);
1252 + if (unlikely(err))
1253 + pr_err(KBUILD_MODNAME
1254 + ": %s:%hu:%s(): register_qdisc() = %d\n",
1255 + KBUILD_BASENAME ".c", __LINE__, __func__, err);
1256 +
1257 + return err;
1258 +}
1259 +
1260 +void __exit dpaa2_ceetm_unregister(void)
1261 +{
1262 + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
1263 + KBUILD_BASENAME ".c", __func__);
1264 +
1265 + unregister_qdisc(&dpaa2_ceetm_qdisc_ops);
1266 +}
1267 --- /dev/null
1268 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.h
1269 @@ -0,0 +1,183 @@
1270 +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
1271 +/*
1272 + * Copyright 2017 NXP
1273 + *
1274 + */
1275 +
1276 +#ifndef __DPAA2_ETH_CEETM_H
1277 +#define __DPAA2_ETH_CEETM_H
1278 +
1279 +#include <net/pkt_sched.h>
1280 +#include <net/pkt_cls.h>
1281 +#include <net/netlink.h>
1282 +
1283 +#include "dpaa2-eth.h"
1284 +
1285 +/* For functional purposes, there are num_tx_queues pfifo qdiscs through which
1286 + * frames reach the driver. Their handles start from 1:21. Handles 1:1 to 1:20
1287 + * are reserved for the maximum 32 CEETM channels (majors and minors are in
1288 + * hex).
1289 + */
1290 +#define PFIFO_MIN_OFFSET 0x21
1291 +
1292 +#define DPAA2_CEETM_MIN_WEIGHT 100
1293 +#define DPAA2_CEETM_MAX_WEIGHT 24800
1294 +
1295 +#define DPAA2_CEETM_TD_THRESHOLD 1000
1296 +
1297 +enum wbfs_group_type {
1298 + WBFS_GRP_A,
1299 + WBFS_GRP_B,
1300 + WBFS_GRP_LARGE
1301 +};
1302 +
1303 +enum {
1304 + DPAA2_CEETM_TCA_UNSPEC,
1305 + DPAA2_CEETM_TCA_COPT,
1306 + DPAA2_CEETM_TCA_QOPS,
1307 + DPAA2_CEETM_TCA_MAX,
1308 +};
1309 +
1310 +/* CEETM configuration types */
1311 +enum dpaa2_ceetm_type {
1312 + CEETM_ROOT = 1,
1313 + CEETM_PRIO,
1314 +};
1315 +
1316 +enum {
1317 + STRICT_PRIORITY = 0,
1318 + WEIGHTED_A,
1319 + WEIGHTED_B,
1320 +};
1321 +
1322 +struct dpaa2_ceetm_shaping_cfg {
1323 + __u64 cir; /* committed information rate */
1324 + __u64 eir; /* excess information rate */
1325 + __u16 cbs; /* committed burst size */
1326 + __u16 ebs; /* excess burst size */
1327 + __u8 coupled; /* shaper coupling */
1328 +};
1329 +
1330 +extern const struct nla_policy ceetm_policy[DPAA2_CEETM_TCA_MAX];
1331 +
1332 +struct dpaa2_ceetm_class;
1333 +struct dpaa2_ceetm_qdisc_stats;
1334 +struct dpaa2_ceetm_class_stats;
1335 +
1336 +/* corresponds to CEETM shaping at LNI level */
1337 +struct dpaa2_root_q {
1338 + struct Qdisc **qdiscs;
1339 + struct dpaa2_ceetm_qdisc_stats __percpu *qstats;
1340 +};
1341 +
1342 +/* corresponds to the number of priorities a channel serves */
1343 +struct dpaa2_prio_q {
1344 + struct dpaa2_ceetm_class *parent;
1345 + struct dpni_tx_priorities_cfg tx_prio_cfg;
1346 +};
1347 +
1348 +struct dpaa2_ceetm_qdisc {
1349 + struct Qdisc_class_hash clhash;
1350 + struct tcf_proto *filter_list; /* qdisc attached filters */
1351 + struct tcf_block *block;
1352 +
1353 + enum dpaa2_ceetm_type type; /* ROOT/PRIO */
1354 + bool shaped;
1355 + union {
1356 + struct dpaa2_root_q root;
1357 + struct dpaa2_prio_q prio;
1358 + };
1359 +};
1360 +
1361 +/* CEETM Qdisc configuration parameters */
1362 +struct dpaa2_ceetm_tc_qopt {
1363 + enum dpaa2_ceetm_type type;
1364 + __u16 shaped;
1365 + __u8 prio_group_A;
1366 + __u8 prio_group_B;
1367 + __u8 separate_groups;
1368 +};
1369 +
1370 +/* root class - corresponds to a channel */
1371 +struct dpaa2_root_c {
1372 + struct dpaa2_ceetm_shaping_cfg shaping_cfg;
1373 + u32 ch_id;
1374 +};
1375 +
1376 +/* prio class - corresponds to a strict priority queue (group) */
1377 +struct dpaa2_prio_c {
1378 + struct dpaa2_ceetm_class_stats __percpu *cstats;
1379 + u32 qpri;
1380 + u8 mode;
1381 + u16 weight;
1382 +};
1383 +
1384 +struct dpaa2_ceetm_class {
1385 + struct Qdisc_class_common common;
1386 + struct tcf_proto *filter_list; /* class attached filters */
1387 + struct tcf_block *block;
1388 + struct Qdisc *parent;
1389 + struct Qdisc *child;
1390 +
1391 + enum dpaa2_ceetm_type type; /* ROOT/PRIO */
1392 + bool shaped;
1393 + union {
1394 + struct dpaa2_root_c root;
1395 + struct dpaa2_prio_c prio;
1396 + };
1397 +};
1398 +
1399 +/* CEETM Class configuration parameters */
1400 +struct dpaa2_ceetm_tc_copt {
1401 + enum dpaa2_ceetm_type type;
1402 + struct dpaa2_ceetm_shaping_cfg shaping_cfg;
1403 + __u16 shaped;
1404 + __u8 mode;
1405 + __u16 weight;
1406 +};
1407 +
1408 +/* CEETM stats */
1409 +struct dpaa2_ceetm_qdisc_stats {
1410 + __u32 drops;
1411 +};
1412 +
1413 +struct dpaa2_ceetm_class_stats {
1414 + /* Software counters */
1415 + struct gnet_stats_basic_packed bstats;
1416 + __u32 ern_drop_count;
1417 + __u32 congested_count;
1418 +};
1419 +
1420 +struct dpaa2_ceetm_tc_xstats {
1421 + __u64 ceetm_dequeue_bytes;
1422 + __u64 ceetm_dequeue_frames;
1423 + __u64 ceetm_reject_bytes;
1424 + __u64 ceetm_reject_frames;
1425 +};
1426 +
1427 +#ifdef CONFIG_FSL_DPAA2_ETH_CEETM
1428 +int __init dpaa2_ceetm_register(void);
1429 +void __exit dpaa2_ceetm_unregister(void);
1430 +int dpaa2_ceetm_classify(struct sk_buff *skb, struct Qdisc *sch,
1431 + int *qdid, u8 *qpri);
1432 +#else
1433 +static inline int dpaa2_ceetm_register(void)
1434 +{
1435 + return 0;
1436 +}
1437 +
1438 +static inline void dpaa2_ceetm_unregister(void) {}
1439 +
1440 +static inline int dpaa2_ceetm_classify(struct sk_buff *skb, struct Qdisc *sch,
1441 + int *qdid, u8 *qpri)
1442 +{
1443 + return 0;
1444 +}
1445 +#endif
1446 +
1447 +static inline bool dpaa2_eth_ceetm_is_enabled(struct dpaa2_eth_priv *priv)
1448 +{
1449 + return priv->ceetm_en;
1450 +}
1451 +
1452 +#endif
1453 --- /dev/null
1454 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
1455 @@ -0,0 +1,356 @@
1456 +
1457 +/* Copyright 2015 Freescale Semiconductor Inc.
1458 + *
1459 + * Redistribution and use in source and binary forms, with or without
1460 + * modification, are permitted provided that the following conditions are met:
1461 + * * Redistributions of source code must retain the above copyright
1462 + * notice, this list of conditions and the following disclaimer.
1463 + * * Redistributions in binary form must reproduce the above copyright
1464 + * notice, this list of conditions and the following disclaimer in the
1465 + * documentation and/or other materials provided with the distribution.
1466 + * * Neither the name of Freescale Semiconductor nor the
1467 + * names of its contributors may be used to endorse or promote products
1468 + * derived from this software without specific prior written permission.
1469 + *
1470 + *
1471 + * ALTERNATIVELY, this software may be distributed under the terms of the
1472 + * GNU General Public License ("GPL") as published by the Free Software
1473 + * Foundation, either version 2 of that License or (at your option) any
1474 + * later version.
1475 + *
1476 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
1477 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1478 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1479 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
1480 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1481 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1482 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1483 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1484 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1485 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1486 + */
1487 +
1488 +#include <linux/module.h>
1489 +#include <linux/debugfs.h>
1490 +#include "dpaa2-eth.h"
1491 +#include "dpaa2-eth-debugfs.h"
1492 +
1493 +#define DPAA2_ETH_DBG_ROOT "dpaa2-eth"
1494 +
1495 +static struct dentry *dpaa2_dbg_root;
1496 +
1497 +static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset)
1498 +{
1499 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
1500 + struct rtnl_link_stats64 *stats;
1501 + struct dpaa2_eth_drv_stats *extras;
1502 + int i;
1503 +
1504 + seq_printf(file, "Per-CPU stats for %s\n", priv->net_dev->name);
1505 + seq_printf(file, "%s%16s%16s%16s%16s%16s%16s%16s%16s%16s\n",
1506 + "CPU", "Rx", "Rx Err", "Rx SG", "Tx", "Tx Err", "Tx conf",
1507 + "Tx SG", "Tx realloc", "Enq busy");
1508 +
1509 + for_each_online_cpu(i) {
1510 + stats = per_cpu_ptr(priv->percpu_stats, i);
1511 + extras = per_cpu_ptr(priv->percpu_extras, i);
1512 + seq_printf(file, "%3d%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu\n",
1513 + i,
1514 + stats->rx_packets,
1515 + stats->rx_errors,
1516 + extras->rx_sg_frames,
1517 + stats->tx_packets,
1518 + stats->tx_errors,
1519 + extras->tx_conf_frames,
1520 + extras->tx_sg_frames,
1521 + extras->tx_reallocs,
1522 + extras->tx_portal_busy);
1523 + }
1524 +
1525 + return 0;
1526 +}
1527 +
1528 +static int dpaa2_dbg_cpu_open(struct inode *inode, struct file *file)
1529 +{
1530 + int err;
1531 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
1532 +
1533 + err = single_open(file, dpaa2_dbg_cpu_show, priv);
1534 + if (err < 0)
1535 + netdev_err(priv->net_dev, "single_open() failed\n");
1536 +
1537 + return err;
1538 +}
1539 +
1540 +static const struct file_operations dpaa2_dbg_cpu_ops = {
1541 + .open = dpaa2_dbg_cpu_open,
1542 + .read = seq_read,
1543 + .llseek = seq_lseek,
1544 + .release = single_release,
1545 +};
1546 +
1547 +static char *fq_type_to_str(struct dpaa2_eth_fq *fq)
1548 +{
1549 + switch (fq->type) {
1550 + case DPAA2_RX_FQ:
1551 + return "Rx";
1552 + case DPAA2_TX_CONF_FQ:
1553 + return "Tx conf";
1554 + case DPAA2_RX_ERR_FQ:
1555 + return "Rx err";
1556 + default:
1557 + return "N/A";
1558 + }
1559 +}
1560 +
1561 +static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset)
1562 +{
1563 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
1564 + struct dpaa2_eth_fq *fq;
1565 + u32 fcnt, bcnt;
1566 + int i, err;
1567 +
1568 + seq_printf(file, "non-zero FQ stats for %s:\n", priv->net_dev->name);
1569 + seq_printf(file, "%s%16s%16s%16s%16s%16s\n",
1570 + "VFQID", "CPU", "Traffic Class", "Type", "Frames",
1571 + "Pending frames");
1572 +
1573 + for (i = 0; i < priv->num_fqs; i++) {
1574 + fq = &priv->fq[i];
1575 + err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
1576 + if (err)
1577 + fcnt = 0;
1578 +
1579 + /* A lot of queues, no use displaying zero traffic ones */
1580 + if (!fq->stats.frames && !fcnt)
1581 + continue;
1582 +
1583 + seq_printf(file, "%5d%16d%16d%16s%16llu%16u\n",
1584 + fq->fqid,
1585 + fq->target_cpu,
1586 + fq->tc,
1587 + fq_type_to_str(fq),
1588 + fq->stats.frames,
1589 + fcnt);
1590 + }
1591 +
1592 + return 0;
1593 +}
1594 +
1595 +static int dpaa2_dbg_fqs_open(struct inode *inode, struct file *file)
1596 +{
1597 + int err;
1598 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
1599 +
1600 + err = single_open(file, dpaa2_dbg_fqs_show, priv);
1601 + if (err < 0)
1602 + netdev_err(priv->net_dev, "single_open() failed\n");
1603 +
1604 + return err;
1605 +}
1606 +
1607 +static const struct file_operations dpaa2_dbg_fq_ops = {
1608 + .open = dpaa2_dbg_fqs_open,
1609 + .read = seq_read,
1610 + .llseek = seq_lseek,
1611 + .release = single_release,
1612 +};
1613 +
1614 +static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset)
1615 +{
1616 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
1617 + struct dpaa2_eth_channel *ch;
1618 + int i;
1619 +
1620 + seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name);
1621 + seq_printf(file, "%s%16s%16s%16s%16s%16s%16s\n",
1622 + "CHID", "CPU", "Deq busy", "Frames", "CDANs",
1623 + "Avg frm/CDAN", "Buf count");
1624 +
1625 + for (i = 0; i < priv->num_channels; i++) {
1626 + ch = priv->channel[i];
1627 + seq_printf(file, "%4d%16d%16llu%16llu%16llu%16llu%16d\n",
1628 + ch->ch_id,
1629 + ch->nctx.desired_cpu,
1630 + ch->stats.dequeue_portal_busy,
1631 + ch->stats.frames,
1632 + ch->stats.cdan,
1633 + ch->stats.frames / ch->stats.cdan,
1634 + ch->buf_count);
1635 + }
1636 +
1637 + return 0;
1638 +}
1639 +
1640 +static int dpaa2_dbg_ch_open(struct inode *inode, struct file *file)
1641 +{
1642 + int err;
1643 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
1644 +
1645 + err = single_open(file, dpaa2_dbg_ch_show, priv);
1646 + if (err < 0)
1647 + netdev_err(priv->net_dev, "single_open() failed\n");
1648 +
1649 + return err;
1650 +}
1651 +
1652 +static const struct file_operations dpaa2_dbg_ch_ops = {
1653 + .open = dpaa2_dbg_ch_open,
1654 + .read = seq_read,
1655 + .llseek = seq_lseek,
1656 + .release = single_release,
1657 +};
1658 +
1659 +static ssize_t dpaa2_dbg_reset_write(struct file *file, const char __user *buf,
1660 + size_t count, loff_t *offset)
1661 +{
1662 + struct dpaa2_eth_priv *priv = file->private_data;
1663 + struct rtnl_link_stats64 *percpu_stats;
1664 + struct dpaa2_eth_drv_stats *percpu_extras;
1665 + struct dpaa2_eth_fq *fq;
1666 + struct dpaa2_eth_channel *ch;
1667 + int i;
1668 +
1669 + for_each_online_cpu(i) {
1670 + percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
1671 + memset(percpu_stats, 0, sizeof(*percpu_stats));
1672 +
1673 + percpu_extras = per_cpu_ptr(priv->percpu_extras, i);
1674 + memset(percpu_extras, 0, sizeof(*percpu_extras));
1675 + }
1676 +
1677 + for (i = 0; i < priv->num_fqs; i++) {
1678 + fq = &priv->fq[i];
1679 + memset(&fq->stats, 0, sizeof(fq->stats));
1680 + }
1681 +
1682 + for (i = 0; i < priv->num_channels; i++) {
1683 + ch = priv->channel[i];
1684 + memset(&ch->stats, 0, sizeof(ch->stats));
1685 + }
1686 +
1687 + return count;
1688 +}
1689 +
1690 +static const struct file_operations dpaa2_dbg_reset_ops = {
1691 + .open = simple_open,
1692 + .write = dpaa2_dbg_reset_write,
1693 +};
1694 +
1695 +static ssize_t dpaa2_dbg_reset_mc_write(struct file *file,
1696 + const char __user *buf,
1697 + size_t count, loff_t *offset)
1698 +{
1699 + struct dpaa2_eth_priv *priv = file->private_data;
1700 + int err;
1701 +
1702 + err = dpni_reset_statistics(priv->mc_io, 0, priv->mc_token);
1703 + if (err)
1704 + netdev_err(priv->net_dev,
1705 + "dpni_reset_statistics() failed %d\n", err);
1706 +
1707 + return count;
1708 +}
1709 +
1710 +static const struct file_operations dpaa2_dbg_reset_mc_ops = {
1711 + .open = simple_open,
1712 + .write = dpaa2_dbg_reset_mc_write,
1713 +};
1714 +
1715 +void dpaa2_dbg_add(struct dpaa2_eth_priv *priv)
1716 +{
1717 + if (!dpaa2_dbg_root)
1718 + return;
1719 +
1720 + /* Create a directory for the interface */
1721 + priv->dbg.dir = debugfs_create_dir(priv->net_dev->name,
1722 + dpaa2_dbg_root);
1723 + if (!priv->dbg.dir) {
1724 + netdev_err(priv->net_dev, "debugfs_create_dir() failed\n");
1725 + return;
1726 + }
1727 +
1728 + /* per-cpu stats file */
1729 + priv->dbg.cpu_stats = debugfs_create_file("cpu_stats", 0444,
1730 + priv->dbg.dir, priv,
1731 + &dpaa2_dbg_cpu_ops);
1732 + if (!priv->dbg.cpu_stats) {
1733 + netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
1734 + goto err_cpu_stats;
1735 + }
1736 +
1737 + /* per-fq stats file */
1738 + priv->dbg.fq_stats = debugfs_create_file("fq_stats", 0444,
1739 + priv->dbg.dir, priv,
1740 + &dpaa2_dbg_fq_ops);
1741 + if (!priv->dbg.fq_stats) {
1742 + netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
1743 + goto err_fq_stats;
1744 + }
1745 +
1746 + /* per-fq stats file */
1747 + priv->dbg.ch_stats = debugfs_create_file("ch_stats", 0444,
1748 + priv->dbg.dir, priv,
1749 + &dpaa2_dbg_ch_ops);
1750 + if (!priv->dbg.fq_stats) {
1751 + netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
1752 + goto err_ch_stats;
1753 + }
1754 +
1755 + /* reset stats */
1756 + priv->dbg.reset_stats = debugfs_create_file("reset_stats", 0200,
1757 + priv->dbg.dir, priv,
1758 + &dpaa2_dbg_reset_ops);
1759 + if (!priv->dbg.reset_stats) {
1760 + netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
1761 + goto err_reset_stats;
1762 + }
1763 +
1764 + /* reset MC stats */
1765 + priv->dbg.reset_mc_stats = debugfs_create_file("reset_mc_stats",
1766 + 0222, priv->dbg.dir, priv,
1767 + &dpaa2_dbg_reset_mc_ops);
1768 + if (!priv->dbg.reset_mc_stats) {
1769 + netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
1770 + goto err_reset_mc_stats;
1771 + }
1772 +
1773 + return;
1774 +
1775 +err_reset_mc_stats:
1776 + debugfs_remove(priv->dbg.reset_stats);
1777 +err_reset_stats:
1778 + debugfs_remove(priv->dbg.ch_stats);
1779 +err_ch_stats:
1780 + debugfs_remove(priv->dbg.fq_stats);
1781 +err_fq_stats:
1782 + debugfs_remove(priv->dbg.cpu_stats);
1783 +err_cpu_stats:
1784 + debugfs_remove(priv->dbg.dir);
1785 +}
1786 +
1787 +void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv)
1788 +{
1789 + debugfs_remove(priv->dbg.reset_mc_stats);
1790 + debugfs_remove(priv->dbg.reset_stats);
1791 + debugfs_remove(priv->dbg.fq_stats);
1792 + debugfs_remove(priv->dbg.ch_stats);
1793 + debugfs_remove(priv->dbg.cpu_stats);
1794 + debugfs_remove(priv->dbg.dir);
1795 +}
1796 +
1797 +void dpaa2_eth_dbg_init(void)
1798 +{
1799 + dpaa2_dbg_root = debugfs_create_dir(DPAA2_ETH_DBG_ROOT, NULL);
1800 + if (!dpaa2_dbg_root) {
1801 + pr_err("DPAA2-ETH: debugfs create failed\n");
1802 + return;
1803 + }
1804 +
1805 + pr_info("DPAA2-ETH: debugfs created\n");
1806 +}
1807 +
1808 +void __exit dpaa2_eth_dbg_exit(void)
1809 +{
1810 + debugfs_remove(dpaa2_dbg_root);
1811 +}
1812 --- /dev/null
1813 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h
1814 @@ -0,0 +1,60 @@
1815 +/* Copyright 2015 Freescale Semiconductor Inc.
1816 + *
1817 + * Redistribution and use in source and binary forms, with or without
1818 + * modification, are permitted provided that the following conditions are met:
1819 + * * Redistributions of source code must retain the above copyright
1820 + * notice, this list of conditions and the following disclaimer.
1821 + * * Redistributions in binary form must reproduce the above copyright
1822 + * notice, this list of conditions and the following disclaimer in the
1823 + * documentation and/or other materials provided with the distribution.
1824 + * * Neither the name of Freescale Semiconductor nor the
1825 + * names of its contributors may be used to endorse or promote products
1826 + * derived from this software without specific prior written permission.
1827 + *
1828 + *
1829 + * ALTERNATIVELY, this software may be distributed under the terms of the
1830 + * GNU General Public License ("GPL") as published by the Free Software
1831 + * Foundation, either version 2 of that License or (at your option) any
1832 + * later version.
1833 + *
1834 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
1835 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1836 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1837 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
1838 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1839 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1840 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1841 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1842 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1843 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1844 + */
1845 +
1846 +#ifndef DPAA2_ETH_DEBUGFS_H
1847 +#define DPAA2_ETH_DEBUGFS_H
1848 +
1849 +#include <linux/dcache.h>
1850 +
1851 +struct dpaa2_eth_priv;
1852 +
1853 +struct dpaa2_debugfs {
1854 + struct dentry *dir;
1855 + struct dentry *fq_stats;
1856 + struct dentry *ch_stats;
1857 + struct dentry *cpu_stats;
1858 + struct dentry *reset_stats;
1859 + struct dentry *reset_mc_stats;
1860 +};
1861 +
1862 +#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
1863 +void dpaa2_eth_dbg_init(void);
1864 +void dpaa2_eth_dbg_exit(void);
1865 +void dpaa2_dbg_add(struct dpaa2_eth_priv *priv);
1866 +void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv);
1867 +#else
1868 +static inline void dpaa2_eth_dbg_init(void) {}
1869 +static inline void dpaa2_eth_dbg_exit(void) {}
1870 +static inline void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) {}
1871 +static inline void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) {}
1872 +#endif /* CONFIG_FSL_DPAA2_ETH_DEBUGFS */
1873 +
1874 +#endif /* DPAA2_ETH_DEBUGFS_H */
1875 --- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
1876 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
1877 @@ -1,32 +1,5 @@
1878 +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
1879 /* Copyright 2014-2015 Freescale Semiconductor Inc.
1880 - *
1881 - * Redistribution and use in source and binary forms, with or without
1882 - * modification, are permitted provided that the following conditions are met:
1883 - * * Redistributions of source code must retain the above copyright
1884 - * notice, this list of conditions and the following disclaimer.
1885 - * * Redistributions in binary form must reproduce the above copyright
1886 - * notice, this list of conditions and the following disclaimer in the
1887 - * documentation and/or other materials provided with the distribution.
1888 - * * Neither the name of Freescale Semiconductor nor the
1889 - * names of its contributors may be used to endorse or promote products
1890 - * derived from this software without specific prior written permission.
1891 - *
1892 - *
1893 - * ALTERNATIVELY, this software may be distributed under the terms of the
1894 - * GNU General Public License ("GPL") as published by the Free Software
1895 - * Foundation, either version 2 of that License or (at your option) any
1896 - * later version.
1897 - *
1898 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
1899 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1900 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1901 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
1902 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1903 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1904 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1905 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1906 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1907 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1908 */
1909
1910 #undef TRACE_SYSTEM
1911 --- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
1912 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
1913 @@ -1,33 +1,6 @@
1914 +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
1915 /* Copyright 2014-2016 Freescale Semiconductor Inc.
1916 * Copyright 2016-2017 NXP
1917 - *
1918 - * Redistribution and use in source and binary forms, with or without
1919 - * modification, are permitted provided that the following conditions are met:
1920 - * * Redistributions of source code must retain the above copyright
1921 - * notice, this list of conditions and the following disclaimer.
1922 - * * Redistributions in binary form must reproduce the above copyright
1923 - * notice, this list of conditions and the following disclaimer in the
1924 - * documentation and/or other materials provided with the distribution.
1925 - * * Neither the name of Freescale Semiconductor nor the
1926 - * names of its contributors may be used to endorse or promote products
1927 - * derived from this software without specific prior written permission.
1928 - *
1929 - *
1930 - * ALTERNATIVELY, this software may be distributed under the terms of the
1931 - * GNU General Public License ("GPL") as published by the Free Software
1932 - * Foundation, either version 2 of that License or (at your option) any
1933 - * later version.
1934 - *
1935 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
1936 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1937 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1938 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
1939 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1940 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1941 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1942 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1943 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1944 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1945 */
1946 #include <linux/init.h>
1947 #include <linux/module.h>
1948 @@ -38,9 +11,14 @@
1949 #include <linux/msi.h>
1950 #include <linux/kthread.h>
1951 #include <linux/iommu.h>
1952 -
1953 +#include <linux/net_tstamp.h>
1954 +#include <linux/bpf.h>
1955 +#include <linux/filter.h>
1956 +#include <linux/atomic.h>
1957 +#include <net/sock.h>
1958 #include "../../fsl-mc/include/mc.h"
1959 #include "dpaa2-eth.h"
1960 +#include "dpaa2-eth-ceetm.h"
1961
1962 /* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
1963 * using trace events only need to #include <trace/events/sched.h>
1964 @@ -52,8 +30,6 @@ MODULE_LICENSE("Dual BSD/GPL");
1965 MODULE_AUTHOR("Freescale Semiconductor, Inc");
1966 MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
1967
1968 -const char dpaa2_eth_drv_version[] = "0.1";
1969 -
1970 static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
1971 dma_addr_t iova_addr)
1972 {
1973 @@ -104,26 +80,27 @@ static void free_rx_fd(struct dpaa2_eth_
1974 /* We don't support any other format */
1975 return;
1976
1977 - /* For S/G frames, we first need to free all SG entries */
1978 + /* For S/G frames, we first need to free all SG entries
1979 + * except the first one, which was taken care of already
1980 + */
1981 sgt = vaddr + dpaa2_fd_get_offset(fd);
1982 - for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
1983 + for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
1984 addr = dpaa2_sg_get_addr(&sgt[i]);
1985 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
1986 - dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
1987 - DMA_FROM_DEVICE);
1988 + dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
1989 + DMA_BIDIRECTIONAL);
1990
1991 - skb_free_frag(sg_vaddr);
1992 + free_pages((unsigned long)sg_vaddr, 0);
1993 if (dpaa2_sg_is_final(&sgt[i]))
1994 break;
1995 }
1996
1997 free_buf:
1998 - skb_free_frag(vaddr);
1999 + free_pages((unsigned long)vaddr, 0);
2000 }
2001
2002 /* Build a linear skb based on a single-buffer frame descriptor */
2003 -static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv,
2004 - struct dpaa2_eth_channel *ch,
2005 +static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch,
2006 const struct dpaa2_fd *fd,
2007 void *fd_vaddr)
2008 {
2009 @@ -133,8 +110,7 @@ static struct sk_buff *build_linear_skb(
2010
2011 ch->buf_count--;
2012
2013 - skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_SIZE +
2014 - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
2015 + skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
2016 if (unlikely(!skb))
2017 return NULL;
2018
2019 @@ -169,16 +145,20 @@ static struct sk_buff *build_frag_skb(st
2020 /* Get the address and length from the S/G entry */
2021 sg_addr = dpaa2_sg_get_addr(sge);
2022 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
2023 - dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
2024 - DMA_FROM_DEVICE);
2025 + dma_unmap_page(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
2026 + DMA_BIDIRECTIONAL);
2027
2028 sg_length = dpaa2_sg_get_len(sge);
2029
2030 if (i == 0) {
2031 /* We build the skb around the first data buffer */
2032 - skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_SIZE +
2033 - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
2034 + skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
2035 if (unlikely(!skb)) {
2036 + /* Free the first SG entry now, since we already
2037 + * unmapped it and obtained the virtual address
2038 + */
2039 + free_pages((unsigned long)sg_vaddr, 0);
2040 +
2041 /* We still need to subtract the buffers used
2042 * by this FD from our software counter
2043 */
2044 @@ -213,17 +193,172 @@ static struct sk_buff *build_frag_skb(st
2045 break;
2046 }
2047
2048 + WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT");
2049 +
2050 /* Count all data buffers + SG table buffer */
2051 ch->buf_count -= i + 2;
2052
2053 return skb;
2054 }
2055
2056 +static int dpaa2_eth_xdp_tx(struct dpaa2_eth_priv *priv,
2057 + struct dpaa2_fd *fd,
2058 + void *buf_start,
2059 + u16 queue_id)
2060 +{
2061 + struct dpaa2_eth_fq *fq;
2062 + struct rtnl_link_stats64 *percpu_stats;
2063 + struct dpaa2_eth_drv_stats *percpu_extras;
2064 + struct dpaa2_faead *faead;
2065 + u32 ctrl, frc;
2066 + int i, err;
2067 +
2068 + /* Mark the egress frame annotation area as valid */
2069 + frc = dpaa2_fd_get_frc(fd);
2070 + dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
2071 + dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL);
2072 +
2073 + ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV;
2074 + faead = dpaa2_get_faead(buf_start, false);
2075 + faead->ctrl = cpu_to_le32(ctrl);
2076 + faead->conf_fqid = 0;
2077 +
2078 + percpu_stats = this_cpu_ptr(priv->percpu_stats);
2079 + percpu_extras = this_cpu_ptr(priv->percpu_extras);
2080 +
2081 + fq = &priv->fq[queue_id];
2082 + for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
2083 + err = priv->enqueue(priv, fq, fd, 0);
2084 + if (err != -EBUSY)
2085 + break;
2086 + }
2087 +
2088 + percpu_extras->tx_portal_busy += i;
2089 + if (unlikely(err)) {
2090 + percpu_stats->tx_errors++;
2091 + } else {
2092 + percpu_stats->tx_packets++;
2093 + percpu_stats->tx_bytes += dpaa2_fd_get_len(fd);
2094 + }
2095 +
2096 + return err;
2097 +}
2098 +
2099 +/* Free buffers acquired from the buffer pool or which were meant to
2100 + * be released in the pool
2101 + */
2102 +static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
2103 +{
2104 + struct device *dev = priv->net_dev->dev.parent;
2105 + void *vaddr;
2106 + int i;
2107 +
2108 + for (i = 0; i < count; i++) {
2109 + vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
2110 + dma_unmap_page(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
2111 + DMA_BIDIRECTIONAL);
2112 + free_pages((unsigned long)vaddr, 0);
2113 + }
2114 +}
2115 +
2116 +static void release_fd_buf(struct dpaa2_eth_priv *priv,
2117 + struct dpaa2_eth_channel *ch,
2118 + dma_addr_t addr)
2119 +{
2120 + int err;
2121 +
2122 + ch->rel_buf_array[ch->rel_buf_cnt++] = addr;
2123 + if (likely(ch->rel_buf_cnt < DPAA2_ETH_BUFS_PER_CMD))
2124 + return;
2125 +
2126 + while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
2127 + ch->rel_buf_array,
2128 + ch->rel_buf_cnt)) == -EBUSY)
2129 + cpu_relax();
2130 +
2131 + if (err)
2132 + free_bufs(priv, ch->rel_buf_array, ch->rel_buf_cnt);
2133 +
2134 + ch->rel_buf_cnt = 0;
2135 +}
2136 +
2137 +static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
2138 + struct dpaa2_eth_channel *ch,
2139 + struct dpaa2_fd *fd,
2140 + u16 queue_id,
2141 + void *vaddr)
2142 +{
2143 + struct device *dev = priv->net_dev->dev.parent;
2144 + dma_addr_t addr = dpaa2_fd_get_addr(fd);
2145 + struct rtnl_link_stats64 *percpu_stats;
2146 + struct bpf_prog *xdp_prog;
2147 + struct xdp_buff xdp;
2148 + u32 xdp_act = XDP_PASS;
2149 +
2150 + xdp_prog = READ_ONCE(ch->xdp_prog);
2151 + if (!xdp_prog)
2152 + return xdp_act;
2153 +
2154 + percpu_stats = this_cpu_ptr(priv->percpu_stats);
2155 +
2156 + xdp.data = vaddr + dpaa2_fd_get_offset(fd);
2157 + xdp.data_end = xdp.data + dpaa2_fd_get_len(fd);
2158 + /* Allow the XDP program to use the specially reserved headroom */
2159 + xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
2160 +
2161 + rcu_read_lock();
2162 + xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
2163 +
2164 + /* xdp.data pointer may have changed */
2165 + dpaa2_fd_set_offset(fd, xdp.data - vaddr);
2166 + dpaa2_fd_set_len(fd, xdp.data_end - xdp.data);
2167 +
2168 + switch (xdp_act) {
2169 + case XDP_PASS:
2170 + break;
2171 + default:
2172 + bpf_warn_invalid_xdp_action(xdp_act);
2173 + case XDP_ABORTED:
2174 + case XDP_DROP:
2175 + /* This is our buffer, so we can release it back to hardware */
2176 + release_fd_buf(priv, ch, addr);
2177 + percpu_stats->rx_dropped++;
2178 + break;
2179 + case XDP_TX:
2180 + if (dpaa2_eth_xdp_tx(priv, fd, vaddr, queue_id)) {
2181 + dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
2182 + DMA_BIDIRECTIONAL);
2183 + free_rx_fd(priv, fd, vaddr);
2184 + ch->buf_count--;
2185 + }
2186 + break;
2187 + case XDP_REDIRECT:
2188 + dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
2189 + DMA_BIDIRECTIONAL);
2190 + ch->buf_count--;
2191 + ch->flush = true;
2192 + /* Mark the actual start of the data buffer */
2193 + xdp.data_hard_start = vaddr;
2194 + if (xdp_do_redirect(priv->net_dev, &xdp, xdp_prog))
2195 + free_rx_fd(priv, fd, vaddr);
2196 + break;
2197 + }
2198 +
2199 + if (xdp_act == XDP_TX || xdp_act == XDP_REDIRECT) {
2200 + percpu_stats->rx_packets++;
2201 + percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
2202 + }
2203 +
2204 + rcu_read_unlock();
2205 +
2206 + return xdp_act;
2207 +}
2208 +
2209 /* Main Rx frame processing routine */
2210 static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
2211 struct dpaa2_eth_channel *ch,
2212 const struct dpaa2_fd *fd,
2213 - struct napi_struct *napi)
2214 + struct dpaa2_eth_fq *fq)
2215 {
2216 dma_addr_t addr = dpaa2_fd_get_addr(fd);
2217 u8 fd_format = dpaa2_fd_get_format(fd);
2218 @@ -235,14 +370,16 @@ static void dpaa2_eth_rx(struct dpaa2_et
2219 struct dpaa2_fas *fas;
2220 void *buf_data;
2221 u32 status = 0;
2222 + u32 xdp_act;
2223
2224 /* Tracing point */
2225 trace_dpaa2_rx_fd(priv->net_dev, fd);
2226
2227 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
2228 - dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
2229 + dma_sync_single_for_cpu(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
2230 + DMA_BIDIRECTIONAL);
2231
2232 - fas = dpaa2_get_fas(vaddr);
2233 + fas = dpaa2_get_fas(vaddr, false);
2234 prefetch(fas);
2235 buf_data = vaddr + dpaa2_fd_get_offset(fd);
2236 prefetch(buf_data);
2237 @@ -251,22 +388,43 @@ static void dpaa2_eth_rx(struct dpaa2_et
2238 percpu_extras = this_cpu_ptr(priv->percpu_extras);
2239
2240 if (fd_format == dpaa2_fd_single) {
2241 - skb = build_linear_skb(priv, ch, fd, vaddr);
2242 + xdp_act = dpaa2_eth_run_xdp(priv, ch, (struct dpaa2_fd *)fd,
2243 + fq->flowid, vaddr);
2244 + if (xdp_act != XDP_PASS)
2245 + return;
2246 +
2247 + dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
2248 + DMA_BIDIRECTIONAL);
2249 + skb = build_linear_skb(ch, fd, vaddr);
2250 } else if (fd_format == dpaa2_fd_sg) {
2251 + dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
2252 + DMA_BIDIRECTIONAL);
2253 skb = build_frag_skb(priv, ch, buf_data);
2254 - skb_free_frag(vaddr);
2255 + free_pages((unsigned long)vaddr, 0);
2256 percpu_extras->rx_sg_frames++;
2257 percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
2258 } else {
2259 /* We don't support any other format */
2260 - goto err_frame_format;
2261 + goto drop_cnt;
2262 }
2263
2264 if (unlikely(!skb))
2265 - goto err_build_skb;
2266 + goto drop_fd;
2267
2268 prefetch(skb->data);
2269
2270 + /* Get the timestamp value */
2271 + if (priv->ts_rx_en) {
2272 + struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2273 + __le64 *ts = dpaa2_get_ts(vaddr, false);
2274 + u64 ns;
2275 +
2276 + memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2277 +
2278 + ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ts);
2279 + shhwtstamps->hwtstamp = ns_to_ktime(ns);
2280 + }
2281 +
2282 /* Check if we need to validate the L4 csum */
2283 if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
2284 status = le32_to_cpu(fas->status);
2285 @@ -274,30 +432,80 @@ static void dpaa2_eth_rx(struct dpaa2_et
2286 }
2287
2288 skb->protocol = eth_type_trans(skb, priv->net_dev);
2289 + skb_record_rx_queue(skb, fq->flowid);
2290
2291 percpu_stats->rx_packets++;
2292 percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
2293
2294 - napi_gro_receive(napi, skb);
2295 + napi_gro_receive(&ch->napi, skb);
2296
2297 return;
2298
2299 -err_build_skb:
2300 +drop_fd:
2301 free_rx_fd(priv, fd, vaddr);
2302 -err_frame_format:
2303 +drop_cnt:
2304 percpu_stats->rx_dropped++;
2305 }
2306
2307 +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
2308 +/* Processing of Rx frames received on the error FQ
2309 + * We check and print the error bits and then free the frame
2310 + */
2311 +static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv,
2312 + struct dpaa2_eth_channel *ch,
2313 + const struct dpaa2_fd *fd,
2314 + struct napi_struct *napi __always_unused,
2315 + u16 queue_id __always_unused)
2316 +{
2317 + struct device *dev = priv->net_dev->dev.parent;
2318 + dma_addr_t addr = dpaa2_fd_get_addr(fd);
2319 + void *vaddr;
2320 + struct rtnl_link_stats64 *percpu_stats;
2321 + struct dpaa2_fas *fas;
2322 + u32 status = 0;
2323 + u32 fd_errors;
2324 + bool has_fas_errors = false;
2325 +
2326 + vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
2327 + dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_BIDIRECTIONAL);
2328 +
2329 + /* check frame errors in the FD field */
2330 + fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_RX_ERR_MASK;
2331 + if (likely(fd_errors)) {
2332 + has_fas_errors = (fd_errors & FD_CTRL_FAERR) &&
2333 + !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV);
2334 + if (net_ratelimit())
2335 + netdev_dbg(priv->net_dev, "RX frame FD err: %08x\n",
2336 + fd_errors);
2337 + }
2338 +
2339 + /* check frame errors in the FAS field */
2340 + if (has_fas_errors) {
2341 + fas = dpaa2_get_fas(vaddr, false);
2342 + status = le32_to_cpu(fas->status);
2343 + if (net_ratelimit())
2344 + netdev_dbg(priv->net_dev, "Rx frame FAS err: 0x%08x\n",
2345 + status & DPAA2_FAS_RX_ERR_MASK);
2346 + }
2347 + free_rx_fd(priv, fd, vaddr);
2348 +
2349 + percpu_stats = this_cpu_ptr(priv->percpu_stats);
2350 + percpu_stats->rx_errors++;
2351 + ch->buf_count--;
2352 +}
2353 +#endif
2354 +
2355 /* Consume all frames pull-dequeued into the store. This is the simplest way to
2356 * make sure we don't accidentally issue another volatile dequeue which would
2357 * overwrite (leak) frames already in the store.
2358 *
2359 * Observance of NAPI budget is not our concern, leaving that to the caller.
2360 */
2361 -static int consume_frames(struct dpaa2_eth_channel *ch)
2362 +static int consume_frames(struct dpaa2_eth_channel *ch,
2363 + struct dpaa2_eth_fq **src)
2364 {
2365 struct dpaa2_eth_priv *priv = ch->priv;
2366 - struct dpaa2_eth_fq *fq;
2367 + struct dpaa2_eth_fq *fq = NULL;
2368 struct dpaa2_dq *dq;
2369 const struct dpaa2_fd *fd;
2370 int cleaned = 0;
2371 @@ -315,16 +523,51 @@ static int consume_frames(struct dpaa2_e
2372 }
2373
2374 fd = dpaa2_dq_fd(dq);
2375 + prefetch(fd);
2376 +
2377 fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
2378 - fq->stats.frames++;
2379
2380 - fq->consume(priv, ch, fd, &ch->napi);
2381 + fq->consume(priv, ch, fd, fq);
2382 cleaned++;
2383 } while (!is_last);
2384
2385 + if (!cleaned)
2386 + return 0;
2387 +
2388 + fq->stats.frames += cleaned;
2389 + ch->stats.frames += cleaned;
2390 +
2391 + /* A dequeue operation only pulls frames from a single queue
2392 + * into the store. Return the frame queue as an out param.
2393 + */
2394 + if (src)
2395 + *src = fq;
2396 +
2397 return cleaned;
2398 }
2399
2400 +/* Configure the egress frame annotation for timestamp update */
2401 +static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start)
2402 +{
2403 + struct dpaa2_faead *faead;
2404 + u32 ctrl, frc;
2405 +
2406 + /* Mark the egress frame annotation area as valid */
2407 + frc = dpaa2_fd_get_frc(fd);
2408 + dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
2409 +
2410 + /* Set hardware annotation size */
2411 + ctrl = dpaa2_fd_get_ctrl(fd);
2412 + dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL);
2413 +
2414 + /* enable UPD (update prepanded data) bit in FAEAD field of
2415 + * hardware frame annotation area
2416 + */
2417 + ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
2418 + faead = dpaa2_get_faead(buf_start, true);
2419 + faead->ctrl = cpu_to_le32(ctrl);
2420 +}
2421 +
2422 /* Create a frame descriptor based on a fragmented skb */
2423 static int build_sg_fd(struct dpaa2_eth_priv *priv,
2424 struct sk_buff *skb,
2425 @@ -341,7 +584,6 @@ static int build_sg_fd(struct dpaa2_eth_
2426 int num_sg;
2427 int num_dma_bufs;
2428 struct dpaa2_eth_swa *swa;
2429 - struct dpaa2_fas *fas;
2430
2431 /* Create and map scatterlist.
2432 * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
2433 @@ -365,21 +607,14 @@ static int build_sg_fd(struct dpaa2_eth_
2434
2435 /* Prepare the HW SGT structure */
2436 sgt_buf_size = priv->tx_data_offset +
2437 - sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
2438 - sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN, GFP_ATOMIC);
2439 + sizeof(struct dpaa2_sg_entry) * num_dma_bufs;
2440 + sgt_buf = netdev_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN);
2441 if (unlikely(!sgt_buf)) {
2442 err = -ENOMEM;
2443 goto sgt_buf_alloc_failed;
2444 }
2445 sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
2446 -
2447 - /* PTA from egress side is passed as is to the confirmation side so
2448 - * we need to clear some fields here in order to find consistent values
2449 - * on TX confirmation. We are clearing FAS (Frame Annotation Status)
2450 - * field from the hardware annotation area
2451 - */
2452 - fas = dpaa2_get_fas(sgt_buf);
2453 - memset(fas, 0, DPAA2_FAS_SIZE);
2454 + memset(sgt_buf, 0, sgt_buf_size);
2455
2456 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
2457
2458 @@ -402,10 +637,11 @@ static int build_sg_fd(struct dpaa2_eth_
2459 * all of them on Tx Conf.
2460 */
2461 swa = (struct dpaa2_eth_swa *)sgt_buf;
2462 - swa->skb = skb;
2463 - swa->scl = scl;
2464 - swa->num_sg = num_sg;
2465 - swa->num_dma_bufs = num_dma_bufs;
2466 + swa->type = DPAA2_ETH_SWA_SG;
2467 + swa->sg.skb = skb;
2468 + swa->sg.scl = scl;
2469 + swa->sg.num_sg = num_sg;
2470 + swa->sg.sgt_size = sgt_buf_size;
2471
2472 /* Separately map the SGT buffer */
2473 addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
2474 @@ -417,13 +653,15 @@ static int build_sg_fd(struct dpaa2_eth_
2475 dpaa2_fd_set_format(fd, dpaa2_fd_sg);
2476 dpaa2_fd_set_addr(fd, addr);
2477 dpaa2_fd_set_len(fd, skb->len);
2478 - dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA |
2479 - DPAA2_FD_CTRL_PTV1);
2480 + dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
2481 +
2482 + if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
2483 + enable_tx_tstamp(fd, sgt_buf);
2484
2485 return 0;
2486
2487 dma_map_single_failed:
2488 - kfree(sgt_buf);
2489 + skb_free_frag(sgt_buf);
2490 sgt_buf_alloc_failed:
2491 dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
2492 dma_map_sg_failed:
2493 @@ -437,29 +675,27 @@ static int build_single_fd(struct dpaa2_
2494 struct dpaa2_fd *fd)
2495 {
2496 struct device *dev = priv->net_dev->dev.parent;
2497 - u8 *buffer_start;
2498 - struct dpaa2_fas *fas;
2499 - struct sk_buff **skbh;
2500 + u8 *buffer_start, *aligned_start;
2501 + struct dpaa2_eth_swa *swa;
2502 dma_addr_t addr;
2503
2504 - buffer_start = PTR_ALIGN(skb->data - priv->tx_data_offset -
2505 - DPAA2_ETH_TX_BUF_ALIGN,
2506 - DPAA2_ETH_TX_BUF_ALIGN);
2507 -
2508 - /* PTA from egress side is passed as is to the confirmation side so
2509 - * we need to clear some fields here in order to find consistent values
2510 - * on TX confirmation. We are clearing FAS (Frame Annotation Status)
2511 - * field from the hardware annotation area
2512 + buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb);
2513 +
2514 + /* If there's enough room to align the FD address, do it.
2515 + * It will help hardware optimize accesses.
2516 */
2517 - fas = dpaa2_get_fas(buffer_start);
2518 - memset(fas, 0, DPAA2_FAS_SIZE);
2519 + aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
2520 + DPAA2_ETH_TX_BUF_ALIGN);
2521 + if (aligned_start >= skb->head)
2522 + buffer_start = aligned_start;
2523
2524 /* Store a backpointer to the skb at the beginning of the buffer
2525 * (in the private data area) such that we can release it
2526 * on Tx confirm
2527 */
2528 - skbh = (struct sk_buff **)buffer_start;
2529 - *skbh = skb;
2530 + swa = (struct dpaa2_eth_swa *)buffer_start;
2531 + swa->type = DPAA2_ETH_SWA_SINGLE;
2532 + swa->single.skb = skb;
2533
2534 addr = dma_map_single(dev, buffer_start,
2535 skb_tail_pointer(skb) - buffer_start,
2536 @@ -471,8 +707,10 @@ static int build_single_fd(struct dpaa2_
2537 dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
2538 dpaa2_fd_set_len(fd, skb->len);
2539 dpaa2_fd_set_format(fd, dpaa2_fd_single);
2540 - dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA |
2541 - DPAA2_FD_CTRL_PTV1);
2542 + dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
2543 +
2544 + if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
2545 + enable_tx_tstamp(fd, buffer_start);
2546
2547 return 0;
2548 }
2549 @@ -483,72 +721,75 @@ static int build_single_fd(struct dpaa2_
2550 * back-pointed to is also freed.
2551 * This can be called either from dpaa2_eth_tx_conf() or on the error path of
2552 * dpaa2_eth_tx().
2553 - * Optionally, return the frame annotation status word (FAS), which needs
2554 - * to be checked if we're on the confirmation path.
2555 */
2556 static void free_tx_fd(const struct dpaa2_eth_priv *priv,
2557 - const struct dpaa2_fd *fd,
2558 - u32 *status)
2559 + const struct dpaa2_fd *fd, bool in_napi)
2560 {
2561 struct device *dev = priv->net_dev->dev.parent;
2562 dma_addr_t fd_addr;
2563 - struct sk_buff **skbh, *skb;
2564 + struct sk_buff *skb = NULL;
2565 unsigned char *buffer_start;
2566 - int unmap_size;
2567 - struct scatterlist *scl;
2568 - int num_sg, num_dma_bufs;
2569 struct dpaa2_eth_swa *swa;
2570 u8 fd_format = dpaa2_fd_get_format(fd);
2571 - struct dpaa2_fas *fas;
2572
2573 fd_addr = dpaa2_fd_get_addr(fd);
2574 - skbh = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
2575 - fas = dpaa2_get_fas(skbh);
2576 + buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
2577 + swa = (struct dpaa2_eth_swa *)buffer_start;
2578
2579 if (fd_format == dpaa2_fd_single) {
2580 - skb = *skbh;
2581 - buffer_start = (unsigned char *)skbh;
2582 - /* Accessing the skb buffer is safe before dma unmap, because
2583 - * we didn't map the actual skb shell.
2584 - */
2585 - dma_unmap_single(dev, fd_addr,
2586 - skb_tail_pointer(skb) - buffer_start,
2587 - DMA_BIDIRECTIONAL);
2588 + if (swa->type == DPAA2_ETH_SWA_SINGLE) {
2589 + skb = swa->single.skb;
2590 + /* Accessing the skb buffer is safe before dma unmap,
2591 + * because we didn't map the actual skb shell.
2592 + */
2593 + dma_unmap_single(dev, fd_addr,
2594 + skb_tail_pointer(skb) - buffer_start,
2595 + DMA_BIDIRECTIONAL);
2596 + } else {
2597 + WARN_ONCE(swa->type != DPAA2_ETH_SWA_XDP,
2598 + "Wrong SWA type");
2599 + dma_unmap_single(dev, fd_addr, swa->xdp.dma_size,
2600 + DMA_BIDIRECTIONAL);
2601 + }
2602 } else if (fd_format == dpaa2_fd_sg) {
2603 - swa = (struct dpaa2_eth_swa *)skbh;
2604 - skb = swa->skb;
2605 - scl = swa->scl;
2606 - num_sg = swa->num_sg;
2607 - num_dma_bufs = swa->num_dma_bufs;
2608 + skb = swa->sg.skb;
2609
2610 /* Unmap the scatterlist */
2611 - dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
2612 - kfree(scl);
2613 + dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg, DMA_BIDIRECTIONAL);
2614 + kfree(swa->sg.scl);
2615
2616 /* Unmap the SGT buffer */
2617 - unmap_size = priv->tx_data_offset +
2618 - sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
2619 - dma_unmap_single(dev, fd_addr, unmap_size, DMA_BIDIRECTIONAL);
2620 + dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
2621 + DMA_BIDIRECTIONAL);
2622 } else {
2623 - /* Unsupported format, mark it as errored and give up */
2624 - if (status)
2625 - *status = ~0;
2626 + netdev_dbg(priv->net_dev, "Invalid FD format\n");
2627 return;
2628 }
2629
2630 - /* Read the status from the Frame Annotation after we unmap the first
2631 - * buffer but before we free it. The caller function is responsible
2632 - * for checking the status value.
2633 - */
2634 - if (status)
2635 - *status = le32_to_cpu(fas->status);
2636 + if (swa->type == DPAA2_ETH_SWA_XDP) {
2637 + page_frag_free(buffer_start);
2638 + return;
2639 + }
2640 +
2641 + /* Get the timestamp value */
2642 + if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
2643 + struct skb_shared_hwtstamps shhwtstamps;
2644 + __le64 *ts = dpaa2_get_ts(buffer_start, true);
2645 + u64 ns;
2646 +
2647 + memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2648 +
2649 + ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ts);
2650 + shhwtstamps.hwtstamp = ns_to_ktime(ns);
2651 + skb_tstamp_tx(skb, &shhwtstamps);
2652 + }
2653
2654 - /* Free SGT buffer kmalloc'ed on tx */
2655 + /* Free SGT buffer allocated on tx */
2656 if (fd_format != dpaa2_fd_single)
2657 - kfree(skbh);
2658 + skb_free_frag(buffer_start);
2659
2660 /* Move on with skb release */
2661 - dev_kfree_skb(skb);
2662 + napi_consume_skb(skb, in_napi);
2663 }
2664
2665 static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
2666 @@ -558,20 +799,41 @@ static netdev_tx_t dpaa2_eth_tx(struct s
2667 struct rtnl_link_stats64 *percpu_stats;
2668 struct dpaa2_eth_drv_stats *percpu_extras;
2669 struct dpaa2_eth_fq *fq;
2670 + struct netdev_queue *nq;
2671 u16 queue_mapping;
2672 - int err, i;
2673 + unsigned int needed_headroom;
2674 + u32 fd_len;
2675 + u8 prio;
2676 + int err, i, ch_id = 0;
2677 +
2678 + queue_mapping = skb_get_queue_mapping(skb);
2679 + prio = netdev_txq_to_tc(net_dev, queue_mapping);
2680 + /* Hardware interprets priority level 0 as being the highest,
2681 + * so we need to do a reverse mapping to the netdev tc index
2682 + */
2683 + if (net_dev->num_tc)
2684 + prio = net_dev->num_tc - prio - 1;
2685 +
2686 + queue_mapping %= dpaa2_eth_queue_count(priv);
2687 + fq = &priv->fq[queue_mapping];
2688
2689 percpu_stats = this_cpu_ptr(priv->percpu_stats);
2690 percpu_extras = this_cpu_ptr(priv->percpu_extras);
2691
2692 - if (unlikely(skb_headroom(skb) < DPAA2_ETH_NEEDED_HEADROOM(priv))) {
2693 + needed_headroom = dpaa2_eth_needed_headroom(priv, skb);
2694 + if (skb_headroom(skb) < needed_headroom) {
2695 struct sk_buff *ns;
2696
2697 - ns = skb_realloc_headroom(skb, DPAA2_ETH_NEEDED_HEADROOM(priv));
2698 + ns = skb_realloc_headroom(skb, needed_headroom);
2699 if (unlikely(!ns)) {
2700 percpu_stats->tx_dropped++;
2701 goto err_alloc_headroom;
2702 }
2703 + percpu_extras->tx_reallocs++;
2704 +
2705 + if (skb->sk)
2706 + skb_set_owner_w(ns, skb->sk);
2707 +
2708 dev_kfree_skb(skb);
2709 skb = ns;
2710 }
2711 @@ -602,17 +864,24 @@ static netdev_tx_t dpaa2_eth_tx(struct s
2712 goto err_build_fd;
2713 }
2714
2715 + if (dpaa2_eth_ceetm_is_enabled(priv)) {
2716 + err = dpaa2_ceetm_classify(skb, net_dev->qdisc, &ch_id, &prio);
2717 + if (err)
2718 + goto err_ceetm_classify;
2719 + }
2720 +
2721 /* Tracing point */
2722 trace_dpaa2_tx_fd(net_dev, &fd);
2723
2724 - /* TxConf FQ selection primarily based on cpu affinity; this is
2725 - * non-migratable context, so it's safe to call smp_processor_id().
2726 + fd_len = dpaa2_fd_get_len(&fd);
2727 + nq = netdev_get_tx_queue(net_dev, queue_mapping);
2728 + netdev_tx_sent_queue(nq, fd_len);
2729 +
2730 + /* Everything that happens after this enqueues might race with
2731 + * the Tx confirmation callback for this frame
2732 */
2733 - queue_mapping = smp_processor_id() % dpaa2_eth_queue_count(priv);
2734 - fq = &priv->fq[queue_mapping];
2735 for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
2736 - err = dpaa2_io_service_enqueue_qd(NULL, priv->tx_qdid, 0,
2737 - fq->tx_qdbin, &fd);
2738 + err = priv->enqueue(priv, fq, &fd, 0);
2739 if (err != -EBUSY)
2740 break;
2741 }
2742 @@ -620,14 +889,17 @@ static netdev_tx_t dpaa2_eth_tx(struct s
2743 if (unlikely(err < 0)) {
2744 percpu_stats->tx_errors++;
2745 /* Clean up everything, including freeing the skb */
2746 - free_tx_fd(priv, &fd, NULL);
2747 + free_tx_fd(priv, &fd, false);
2748 + netdev_tx_completed_queue(nq, 1, fd_len);
2749 } else {
2750 percpu_stats->tx_packets++;
2751 - percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
2752 + percpu_stats->tx_bytes += fd_len;
2753 }
2754
2755 return NETDEV_TX_OK;
2756
2757 +err_ceetm_classify:
2758 + free_tx_fd(priv, &fd, false);
2759 err_build_fd:
2760 err_alloc_headroom:
2761 dev_kfree_skb(skb);
2762 @@ -637,48 +909,39 @@ err_alloc_headroom:
2763
2764 /* Tx confirmation frame processing routine */
2765 static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
2766 - struct dpaa2_eth_channel *ch,
2767 + struct dpaa2_eth_channel *ch __always_unused,
2768 const struct dpaa2_fd *fd,
2769 - struct napi_struct *napi __always_unused)
2770 + struct dpaa2_eth_fq *fq)
2771 {
2772 struct rtnl_link_stats64 *percpu_stats;
2773 struct dpaa2_eth_drv_stats *percpu_extras;
2774 - u32 status = 0;
2775 + u32 fd_len = dpaa2_fd_get_len(fd);
2776 u32 fd_errors;
2777 - bool has_fas_errors = false;
2778
2779 /* Tracing point */
2780 trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
2781
2782 percpu_extras = this_cpu_ptr(priv->percpu_extras);
2783 percpu_extras->tx_conf_frames++;
2784 - percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd);
2785 + percpu_extras->tx_conf_bytes += fd_len;
2786 +
2787 + fq->dq_frames++;
2788 + fq->dq_bytes += fd_len;
2789
2790 /* Check frame errors in the FD field */
2791 fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
2792 - if (unlikely(fd_errors)) {
2793 - /* We only check error bits in the FAS field if corresponding
2794 - * FAERR bit is set in FD and the FAS field is marked as valid
2795 - */
2796 - has_fas_errors = (fd_errors & DPAA2_FD_CTRL_FAERR) &&
2797 - !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV);
2798 - if (net_ratelimit())
2799 - netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
2800 - fd_errors);
2801 - }
2802 -
2803 - free_tx_fd(priv, fd, has_fas_errors ? &status : NULL);
2804 + free_tx_fd(priv, fd, true);
2805
2806 if (likely(!fd_errors))
2807 return;
2808
2809 + if (net_ratelimit())
2810 + netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
2811 + fd_errors);
2812 +
2813 percpu_stats = this_cpu_ptr(priv->percpu_stats);
2814 /* Tx-conf logically pertains to the egress path. */
2815 percpu_stats->tx_errors++;
2816 -
2817 - if (has_fas_errors && net_ratelimit())
2818 - netdev_dbg(priv->net_dev, "TX frame FAS error: 0x%08x\n",
2819 - status & DPAA2_FAS_TX_ERR_MASK);
2820 }
2821
2822 static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
2823 @@ -728,26 +991,29 @@ static int set_tx_csum(struct dpaa2_eth_
2824 /* Perform a single release command to add buffers
2825 * to the specified buffer pool
2826 */
2827 -static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid)
2828 +static int add_bufs(struct dpaa2_eth_priv *priv,
2829 + struct dpaa2_eth_channel *ch, u16 bpid)
2830 {
2831 struct device *dev = priv->net_dev->dev.parent;
2832 u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
2833 - void *buf;
2834 + struct page *page;
2835 dma_addr_t addr;
2836 - int i;
2837 + int i, err;
2838
2839 for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
2840 /* Allocate buffer visible to WRIOP + skb shared info +
2841 * alignment padding
2842 */
2843 - buf = napi_alloc_frag(DPAA2_ETH_BUF_RAW_SIZE);
2844 - if (unlikely(!buf))
2845 + /* allocate one page for each Rx buffer. WRIOP sees
2846 + * the entire page except for a tailroom reserved for
2847 + * skb shared info
2848 + */
2849 + page = dev_alloc_pages(0);
2850 + if (!page)
2851 goto err_alloc;
2852
2853 - buf = PTR_ALIGN(buf, DPAA2_ETH_RX_BUF_ALIGN);
2854 -
2855 - addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
2856 - DMA_FROM_DEVICE);
2857 + addr = dma_map_page(dev, page, 0, DPAA2_ETH_RX_BUF_SIZE,
2858 + DMA_BIDIRECTIONAL);
2859 if (unlikely(dma_mapping_error(dev, addr)))
2860 goto err_map;
2861
2862 @@ -755,28 +1021,33 @@ static int add_bufs(struct dpaa2_eth_pri
2863
2864 /* tracing point */
2865 trace_dpaa2_eth_buf_seed(priv->net_dev,
2866 - buf, DPAA2_ETH_BUF_RAW_SIZE,
2867 + page, DPAA2_ETH_RX_BUF_RAW_SIZE,
2868 addr, DPAA2_ETH_RX_BUF_SIZE,
2869 bpid);
2870 }
2871
2872 release_bufs:
2873 - /* In case the portal is busy, retry until successful.
2874 - * The buffer release function would only fail if the QBMan portal
2875 - * was busy, which implies portal contention (i.e. more CPUs than
2876 - * portals, i.e. GPPs w/o affine DPIOs). For all practical purposes,
2877 - * there is little we can realistically do, short of giving up -
2878 - * in which case we'd risk depleting the buffer pool and never again
2879 - * receiving the Rx interrupt which would kick-start the refill logic.
2880 - * So just keep retrying, at the risk of being moved to ksoftirqd.
2881 - */
2882 - while (dpaa2_io_service_release(NULL, bpid, buf_array, i))
2883 + /* In case the portal is busy, retry until successful */
2884 + while ((err = dpaa2_io_service_release(ch->dpio, bpid,
2885 + buf_array, i)) == -EBUSY)
2886 cpu_relax();
2887 +
2888 + /* If release command failed, clean up and bail out;
2889 + * not much else we can do about it
2890 + */
2891 + if (err) {
2892 + free_bufs(priv, buf_array, i);
2893 + return 0;
2894 + }
2895 +
2896 return i;
2897
2898 err_map:
2899 - skb_free_frag(buf);
2900 + __free_pages(page, 0);
2901 err_alloc:
2902 + /* If we managed to allocate at least some buffers,
2903 + * release them to hardware
2904 + */
2905 if (i)
2906 goto release_bufs;
2907
2908 @@ -796,9 +1067,10 @@ static int seed_pool(struct dpaa2_eth_pr
2909 */
2910 preempt_disable();
2911 for (j = 0; j < priv->num_channels; j++) {
2912 - for (i = 0; i < DPAA2_ETH_NUM_BUFS;
2913 + priv->channel[j]->buf_count = 0;
2914 + for (i = 0; i < priv->max_bufs_per_ch;
2915 i += DPAA2_ETH_BUFS_PER_CMD) {
2916 - new_count = add_bufs(priv, bpid);
2917 + new_count = add_bufs(priv, priv->channel[j], bpid);
2918 priv->channel[j]->buf_count += new_count;
2919
2920 if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
2921 @@ -818,10 +1090,8 @@ static int seed_pool(struct dpaa2_eth_pr
2922 */
2923 static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
2924 {
2925 - struct device *dev = priv->net_dev->dev.parent;
2926 u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
2927 - void *vaddr;
2928 - int ret, i;
2929 + int ret;
2930
2931 do {
2932 ret = dpaa2_io_service_acquire(NULL, priv->bpid,
2933 @@ -830,27 +1100,16 @@ static void drain_bufs(struct dpaa2_eth_
2934 netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
2935 return;
2936 }
2937 - for (i = 0; i < ret; i++) {
2938 - /* Same logic as on regular Rx path */
2939 - vaddr = dpaa2_iova_to_virt(priv->iommu_domain,
2940 - buf_array[i]);
2941 - dma_unmap_single(dev, buf_array[i],
2942 - DPAA2_ETH_RX_BUF_SIZE,
2943 - DMA_FROM_DEVICE);
2944 - skb_free_frag(vaddr);
2945 - }
2946 + free_bufs(priv, buf_array, ret);
2947 } while (ret);
2948 }
2949
2950 static void drain_pool(struct dpaa2_eth_priv *priv)
2951 {
2952 - int i;
2953 -
2954 + preempt_disable();
2955 drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
2956 drain_bufs(priv, 1);
2957 -
2958 - for (i = 0; i < priv->num_channels; i++)
2959 - priv->channel[i]->buf_count = 0;
2960 + preempt_enable();
2961 }
2962
2963 /* Function is called from softirq context only, so we don't need to guard
2964 @@ -862,19 +1121,19 @@ static int refill_pool(struct dpaa2_eth_
2965 {
2966 int new_count;
2967
2968 - if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH))
2969 + if (likely(ch->buf_count >= priv->refill_thresh))
2970 return 0;
2971
2972 do {
2973 - new_count = add_bufs(priv, bpid);
2974 + new_count = add_bufs(priv, ch, bpid);
2975 if (unlikely(!new_count)) {
2976 /* Out of memory; abort for now, we'll try later on */
2977 break;
2978 }
2979 ch->buf_count += new_count;
2980 - } while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
2981 + } while (ch->buf_count < priv->max_bufs_per_ch);
2982
2983 - if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
2984 + if (unlikely(ch->buf_count < priv->max_bufs_per_ch))
2985 return -ENOMEM;
2986
2987 return 0;
2988 @@ -887,7 +1146,8 @@ static int pull_channel(struct dpaa2_eth
2989
2990 /* Retry while portal is busy */
2991 do {
2992 - err = dpaa2_io_service_pull_channel(NULL, ch->ch_id, ch->store);
2993 + err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id,
2994 + ch->store);
2995 dequeues++;
2996 cpu_relax();
2997 } while (err == -EBUSY);
2998 @@ -908,14 +1168,17 @@ static int pull_channel(struct dpaa2_eth
2999 static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
3000 {
3001 struct dpaa2_eth_channel *ch;
3002 - int cleaned = 0, store_cleaned;
3003 struct dpaa2_eth_priv *priv;
3004 + int rx_cleaned = 0, txconf_cleaned = 0;
3005 + struct dpaa2_eth_fq *fq, *txc_fq = NULL;
3006 + struct netdev_queue *nq;
3007 + int store_cleaned, work_done;
3008 int err;
3009
3010 ch = container_of(napi, struct dpaa2_eth_channel, napi);
3011 priv = ch->priv;
3012
3013 - while (cleaned < budget) {
3014 + do {
3015 err = pull_channel(ch);
3016 if (unlikely(err))
3017 break;
3018 @@ -923,29 +1186,56 @@ static int dpaa2_eth_poll(struct napi_st
3019 /* Refill pool if appropriate */
3020 refill_pool(priv, ch, priv->bpid);
3021
3022 - store_cleaned = consume_frames(ch);
3023 - cleaned += store_cleaned;
3024 + store_cleaned = consume_frames(ch, &fq);
3025 + if (!store_cleaned)
3026 + break;
3027 + if (fq->type == DPAA2_RX_FQ) {
3028 + rx_cleaned += store_cleaned;
3029 + /* If these are XDP_REDIRECT frames, flush them now */
3030 + /* TODO: Do we need this? */
3031 + if (ch->flush) {
3032 + xdp_do_flush_map();
3033 + ch->flush = false;
3034 + }
3035 + } else {
3036 + txconf_cleaned += store_cleaned;
3037 + /* We have a single Tx conf FQ on this channel */
3038 + txc_fq = fq;
3039 + }
3040
3041 - /* If we have enough budget left for a full store,
3042 - * try a new pull dequeue, otherwise we're done here
3043 + /* If we either consumed the whole NAPI budget with Rx frames
3044 + * or we reached the Tx confirmations threshold, we're done.
3045 */
3046 - if (store_cleaned == 0 ||
3047 - cleaned > budget - DPAA2_ETH_STORE_SIZE)
3048 - break;
3049 - }
3050 + if (rx_cleaned >= budget ||
3051 + txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) {
3052 + work_done = budget;
3053 + goto out;
3054 + }
3055 + } while (store_cleaned);
3056
3057 - if (cleaned < budget) {
3058 - napi_complete_done(napi, cleaned);
3059 - /* Re-enable data available notifications */
3060 - do {
3061 - err = dpaa2_io_service_rearm(NULL, &ch->nctx);
3062 - cpu_relax();
3063 - } while (err == -EBUSY);
3064 - }
3065 + /* We didn't consume the entire budget, so finish napi and
3066 + * re-enable data availability notifications
3067 + */
3068 + napi_complete_done(napi, rx_cleaned);
3069 + do {
3070 + err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
3071 + cpu_relax();
3072 + } while (err == -EBUSY);
3073 + WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
3074 + ch->nctx.desired_cpu);
3075
3076 - ch->stats.frames += cleaned;
3077 + work_done = max(rx_cleaned, 1);
3078
3079 - return cleaned;
3080 +out:
3081 + if (txc_fq) {
3082 + nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
3083 + netdev_tx_completed_queue(nq, txc_fq->dq_frames,
3084 + txc_fq->dq_bytes);
3085 + txc_fq->dq_frames = 0;
3086 + txc_fq->dq_bytes = 0;
3087 + }
3088 +
3089 + return work_done;
3090 }
3091
3092 static void enable_ch_napi(struct dpaa2_eth_priv *priv)
3093 @@ -970,9 +1260,23 @@ static void disable_ch_napi(struct dpaa2
3094 }
3095 }
3096
3097 +static void update_tx_fqids(struct dpaa2_eth_priv *priv);
3098 +
3099 +static void update_pf(struct dpaa2_eth_priv *priv,
3100 + struct dpni_link_state *state)
3101 +{
3102 + bool pause_frames;
3103 +
3104 + pause_frames = !!(state->options & DPNI_LINK_OPT_PAUSE);
3105 + if (priv->tx_pause_frames != pause_frames) {
3106 + priv->tx_pause_frames = pause_frames;
3107 + set_rx_taildrop(priv);
3108 + }
3109 +}
3110 +
3111 static int link_state_update(struct dpaa2_eth_priv *priv)
3112 {
3113 - struct dpni_link_state state;
3114 + struct dpni_link_state state = {0};
3115 int err;
3116
3117 err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
3118 @@ -988,6 +1292,8 @@ static int link_state_update(struct dpaa
3119
3120 priv->link_state = state;
3121 if (state.up) {
3122 + update_tx_fqids(priv);
3123 + update_pf(priv, &state);
3124 netif_carrier_on(priv->net_dev);
3125 netif_tx_start_all_queues(priv->net_dev);
3126 } else {
3127 @@ -1006,28 +1312,30 @@ static int dpaa2_eth_open(struct net_dev
3128 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3129 int err;
3130
3131 - err = seed_pool(priv, priv->bpid);
3132 - if (err) {
3133 - /* Not much to do; the buffer pool, though not filled up,
3134 - * may still contain some buffers which would enable us
3135 - * to limp on.
3136 - */
3137 - netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
3138 - priv->dpbp_dev->obj_desc.id, priv->bpid);
3139 - }
3140 -
3141 /* We'll only start the txqs when the link is actually ready; make sure
3142 * we don't race against the link up notification, which may come
3143 * immediately after dpni_enable();
3144 */
3145 netif_tx_stop_all_queues(net_dev);
3146 - enable_ch_napi(priv);
3147 +
3148 /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will
3149 * return true and cause 'ip link show' to report the LOWER_UP flag,
3150 * even though the link notification wasn't even received.
3151 */
3152 netif_carrier_off(net_dev);
3153
3154 + err = seed_pool(priv, priv->bpid);
3155 + if (err) {
3156 + /* Not much to do; the buffer pool, though not filled up,
3157 + * may still contain some buffers which would enable us
3158 + * to limp on.
3159 + */
3160 + netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
3161 + priv->dpbp_dev->obj_desc.id, priv->bpid);
3162 + }
3163 +
3164 + priv->refill_thresh = DPAA2_ETH_REFILL_THRESH(priv);
3165 +
3166 err = dpni_enable(priv->mc_io, 0, priv->mc_token);
3167 if (err < 0) {
3168 netdev_err(net_dev, "dpni_enable() failed\n");
3169 @@ -1047,51 +1355,20 @@ static int dpaa2_eth_open(struct net_dev
3170
3171 link_state_err:
3172 enable_err:
3173 - disable_ch_napi(priv);
3174 + priv->refill_thresh = 0;
3175 drain_pool(priv);
3176 return err;
3177 }
3178
3179 -/* The DPIO store must be empty when we call this,
3180 - * at the end of every NAPI cycle.
3181 - */
3182 -static u32 drain_channel(struct dpaa2_eth_priv *priv,
3183 - struct dpaa2_eth_channel *ch)
3184 +static int dpaa2_eth_stop(struct net_device *net_dev)
3185 {
3186 - u32 drained = 0, total = 0;
3187 -
3188 - do {
3189 - pull_channel(ch);
3190 - drained = consume_frames(ch);
3191 - total += drained;
3192 - } while (drained);
3193 -
3194 - return total;
3195 -}
3196 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3197 + int dpni_enabled = 0;
3198 + int retries = 10, i;
3199 + int err = 0;
3200
3201 -static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv)
3202 -{
3203 - struct dpaa2_eth_channel *ch;
3204 - int i;
3205 - u32 drained = 0;
3206 -
3207 - for (i = 0; i < priv->num_channels; i++) {
3208 - ch = priv->channel[i];
3209 - drained += drain_channel(priv, ch);
3210 - }
3211 -
3212 - return drained;
3213 -}
3214 -
3215 -static int dpaa2_eth_stop(struct net_device *net_dev)
3216 -{
3217 - struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3218 - int dpni_enabled;
3219 - int retries = 10;
3220 - u32 drained;
3221 -
3222 - netif_tx_stop_all_queues(net_dev);
3223 - netif_carrier_off(net_dev);
3224 + netif_tx_stop_all_queues(net_dev);
3225 + netif_carrier_off(net_dev);
3226
3227 /* Loop while dpni_disable() attempts to drain the egress FQs
3228 * and confirm them back to us.
3229 @@ -1105,56 +1382,24 @@ static int dpaa2_eth_stop(struct net_dev
3230 } while (dpni_enabled && --retries);
3231 if (!retries) {
3232 netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
3233 - /* Must go on and disable NAPI nonetheless, so we don't crash at
3234 - * the next "ifconfig up"
3235 + /* Must go on and finish processing pending frames, so we don't
3236 + * crash at the next "ifconfig up"
3237 */
3238 + err = -ETIMEDOUT;
3239 }
3240
3241 - /* Wait for NAPI to complete on every core and disable it.
3242 - * In particular, this will also prevent NAPI from being rescheduled if
3243 - * a new CDAN is serviced, effectively discarding the CDAN. We therefore
3244 - * don't even need to disarm the channels, except perhaps for the case
3245 - * of a huge coalescing value.
3246 - */
3247 - disable_ch_napi(priv);
3248 + priv->refill_thresh = 0;
3249
3250 - /* Manually drain the Rx and TxConf queues */
3251 - drained = drain_ingress_frames(priv);
3252 - if (drained)
3253 - netdev_dbg(net_dev, "Drained %d frames.\n", drained);
3254 + /* Wait for all running napi poll routines to finish, so that no
3255 + * new refill operations are started
3256 + */
3257 + for (i = 0; i < priv->num_channels; i++)
3258 + napi_synchronize(&priv->channel[i]->napi);
3259
3260 /* Empty the buffer pool */
3261 drain_pool(priv);
3262
3263 - return 0;
3264 -}
3265 -
3266 -static int dpaa2_eth_init(struct net_device *net_dev)
3267 -{
3268 - u64 supported = 0;
3269 - u64 not_supported = 0;
3270 - struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3271 - u32 options = priv->dpni_attrs.options;
3272 -
3273 - /* Capabilities listing */
3274 - supported |= IFF_LIVE_ADDR_CHANGE;
3275 -
3276 - if (options & DPNI_OPT_NO_MAC_FILTER)
3277 - not_supported |= IFF_UNICAST_FLT;
3278 - else
3279 - supported |= IFF_UNICAST_FLT;
3280 -
3281 - net_dev->priv_flags |= supported;
3282 - net_dev->priv_flags &= ~not_supported;
3283 -
3284 - /* Features */
3285 - net_dev->features = NETIF_F_RXCSUM |
3286 - NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3287 - NETIF_F_SG | NETIF_F_HIGHDMA |
3288 - NETIF_F_LLTX;
3289 - net_dev->hw_features = net_dev->features;
3290 -
3291 - return 0;
3292 + return err;
3293 }
3294
3295 static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
3296 @@ -1200,25 +1445,6 @@ static void dpaa2_eth_get_stats(struct n
3297 }
3298 }
3299
3300 -static int dpaa2_eth_change_mtu(struct net_device *net_dev, int mtu)
3301 -{
3302 - struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3303 - int err;
3304 -
3305 - /* Set the maximum Rx frame length to match the transmit side;
3306 - * account for L2 headers when computing the MFL
3307 - */
3308 - err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
3309 - (u16)DPAA2_ETH_L2_MAX_FRM(mtu));
3310 - if (err) {
3311 - netdev_err(net_dev, "dpni_set_max_frame_length() failed\n");
3312 - return err;
3313 - }
3314 -
3315 - net_dev->mtu = mtu;
3316 - return 0;
3317 -}
3318 -
3319 /* Copy mac unicast addresses from @net_dev to @priv.
3320 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
3321 */
3322 @@ -1380,16 +1606,430 @@ static int dpaa2_eth_set_features(struct
3323 return 0;
3324 }
3325
3326 +static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3327 +{
3328 + struct dpaa2_eth_priv *priv = netdev_priv(dev);
3329 + struct hwtstamp_config config;
3330 +
3331 + if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
3332 + return -EFAULT;
3333 +
3334 + switch (config.tx_type) {
3335 + case HWTSTAMP_TX_OFF:
3336 + priv->ts_tx_en = false;
3337 + break;
3338 + case HWTSTAMP_TX_ON:
3339 + priv->ts_tx_en = true;
3340 + break;
3341 + default:
3342 + return -ERANGE;
3343 + }
3344 +
3345 + if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
3346 + priv->ts_rx_en = false;
3347 + } else {
3348 + priv->ts_rx_en = true;
3349 + /* TS is set for all frame types, not only those requested */
3350 + config.rx_filter = HWTSTAMP_FILTER_ALL;
3351 + }
3352 +
3353 + return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
3354 + -EFAULT : 0;
3355 +}
3356 +
3357 +static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3358 +{
3359 + if (cmd == SIOCSHWTSTAMP)
3360 + return dpaa2_eth_ts_ioctl(dev, rq, cmd);
3361 +
3362 + return -EINVAL;
3363 +}
3364 +
3365 +static int set_buffer_layout(struct dpaa2_eth_priv *priv)
3366 +{
3367 + struct device *dev = priv->net_dev->dev.parent;
3368 + struct dpni_buffer_layout buf_layout = {0};
3369 + u16 rx_buf_align;
3370 + int err;
3371 +
3372 + /* We need to check for WRIOP version 1.0.0, but depending on the MC
3373 + * version, this number is not always provided correctly on rev1.
3374 + * We need to check for both alternatives in this situation.
3375 + */
3376 + if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) ||
3377 + priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0))
3378 + rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
3379 + else
3380 + rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
3381 +
3382 + /* tx buffer */
3383 + buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
3384 + buf_layout.pass_timestamp = true;
3385 + buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
3386 + DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
3387 + err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3388 + DPNI_QUEUE_TX, &buf_layout);
3389 + if (err) {
3390 + dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
3391 + return err;
3392 + }
3393 +
3394 + /* tx-confirm buffer */
3395 + buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
3396 + err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3397 + DPNI_QUEUE_TX_CONFIRM, &buf_layout);
3398 + if (err) {
3399 + dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
3400 + return err;
3401 + }
3402 +
3403 + /* Now that we've set our tx buffer layout, retrieve the minimum
3404 + * required tx data offset.
3405 + */
3406 + err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
3407 + &priv->tx_data_offset);
3408 + if (err) {
3409 + dev_err(dev, "dpni_get_tx_data_offset() failed\n");
3410 + return err;
3411 + }
3412 +
3413 + if ((priv->tx_data_offset % 64) != 0)
3414 + dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
3415 + priv->tx_data_offset);
3416 +
3417 + /* rx buffer */
3418 + buf_layout.pass_frame_status = true;
3419 + buf_layout.pass_parser_result = true;
3420 + buf_layout.data_align = rx_buf_align;
3421 + buf_layout.data_head_room = dpaa2_eth_rx_headroom(priv);
3422 + buf_layout.private_data_size = 0;
3423 + /* If XDP program is attached, reserve extra space for
3424 + * potential header expansions
3425 + */
3426 + if (priv->has_xdp_prog)
3427 + buf_layout.data_head_room += XDP_PACKET_HEADROOM;
3428 + buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
3429 + DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
3430 + DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
3431 + DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
3432 + DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
3433 + err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3434 + DPNI_QUEUE_RX, &buf_layout);
3435 + if (err) {
3436 + dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
3437 + return err;
3438 + }
3439 +
3440 + return 0;
3441 +}
3442 +
3443 +#define DPNI_ENQUEUE_FQID_VER_MAJOR 7
3444 +#define DPNI_ENQUEUE_FQID_VER_MINOR 9
3445 +
3446 +static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv,
3447 + struct dpaa2_eth_fq *fq,
3448 + struct dpaa2_fd *fd, u8 prio)
3449 +{
3450 + return dpaa2_io_service_enqueue_qd(fq->channel->dpio,
3451 + priv->tx_qdid, prio,
3452 + fq->tx_qdbin, fd);
3453 +}
3454 +
3455 +static inline int dpaa2_eth_enqueue_fq(struct dpaa2_eth_priv *priv,
3456 + struct dpaa2_eth_fq *fq,
3457 + struct dpaa2_fd *fd,
3458 + u8 prio __always_unused)
3459 +{
3460 + return dpaa2_io_service_enqueue_fq(fq->channel->dpio,
3461 + fq->tx_fqid, fd);
3462 +}
3463 +
3464 +static void set_enqueue_mode(struct dpaa2_eth_priv *priv)
3465 +{
3466 + if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
3467 + DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
3468 + priv->enqueue = dpaa2_eth_enqueue_qd;
3469 + else
3470 + priv->enqueue = dpaa2_eth_enqueue_fq;
3471 +}
3472 +
3473 +static void update_tx_fqids(struct dpaa2_eth_priv *priv)
3474 +{
3475 + struct dpaa2_eth_fq *fq;
3476 + struct dpni_queue queue;
3477 + struct dpni_queue_id qid = {0};
3478 + int i, err;
3479 +
3480 + /* We only use Tx FQIDs for FQID-based enqueue, so check
3481 + * if DPNI version supports it before updating FQIDs
3482 + */
3483 + if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
3484 + DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
3485 + return;
3486 +
3487 + for (i = 0; i < priv->num_fqs; i++) {
3488 + fq = &priv->fq[i];
3489 + if (fq->type != DPAA2_TX_CONF_FQ)
3490 + continue;
3491 + err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3492 + DPNI_QUEUE_TX, 0, fq->flowid,
3493 + &queue, &qid);
3494 + if (err)
3495 + goto out_err;
3496 +
3497 + fq->tx_fqid = qid.fqid;
3498 + if (fq->tx_fqid == 0)
3499 + goto out_err;
3500 + }
3501 +
3502 + return;
3503 +
3504 +out_err:
3505 + netdev_info(priv->net_dev,
3506 + "Error reading Tx FQID, fallback to QDID-based enqueue");
3507 + priv->enqueue = dpaa2_eth_enqueue_qd;
3508 +}
3509 +
3510 +static int dpaa2_eth_set_xdp(struct net_device *net_dev, struct bpf_prog *prog)
3511 +{
3512 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3513 + struct dpaa2_eth_channel *ch;
3514 + struct bpf_prog *old_prog = NULL;
3515 + int i, err;
3516 +
3517 + /* No support for SG frames */
3518 + if (DPAA2_ETH_L2_MAX_FRM(net_dev->mtu) > DPAA2_ETH_RX_BUF_SIZE)
3519 + return -EINVAL;
3520 +
3521 + if (netif_running(net_dev)) {
3522 + err = dpaa2_eth_stop(net_dev);
3523 + if (err)
3524 + return err;
3525 + }
3526 +
3527 + if (prog) {
3528 + prog = bpf_prog_add(prog, priv->num_channels - 1);
3529 + if (IS_ERR(prog))
3530 + return PTR_ERR(prog);
3531 + }
3532 +
3533 + priv->has_xdp_prog = !!prog;
3534 +
3535 + for (i = 0; i < priv->num_channels; i++) {
3536 + ch = priv->channel[i];
3537 + old_prog = xchg(&ch->xdp_prog, prog);
3538 + if (old_prog)
3539 + bpf_prog_put(old_prog);
3540 + }
3541 +
3542 + /* When turning XDP on/off we need to do some reconfiguring
3543 + * of the Rx buffer layout. Buffer pool was drained on dpaa2_eth_stop,
3544 + * so we are sure no old format buffers will be used from now on
3545 + */
3546 + if (priv->has_xdp_prog != !!old_prog)
3547 + set_buffer_layout(priv);
3548 +
3549 + if (netif_running(net_dev)) {
3550 + err = dpaa2_eth_open(net_dev);
3551 + if (err)
3552 + return err;
3553 + }
3554 +
3555 + return 0;
3556 +}
3557 +
3558 +static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_xdp *xdp)
3559 +{
3560 + struct dpaa2_eth_priv *priv = netdev_priv(dev);
3561 +
3562 + switch (xdp->command) {
3563 + case XDP_SETUP_PROG:
3564 + return dpaa2_eth_set_xdp(dev, xdp->prog);
3565 + case XDP_QUERY_PROG:
3566 + xdp->prog_attached = priv->has_xdp_prog;
3567 + return 0;
3568 + default:
3569 + return -EINVAL;
3570 + }
3571 +}
3572 +
3573 +static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, struct xdp_buff *xdp)
3574 +{
3575 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3576 + struct device *dev = net_dev->dev.parent;
3577 + struct rtnl_link_stats64 *percpu_stats;
3578 + struct dpaa2_eth_drv_stats *percpu_extras;
3579 + unsigned int needed_headroom;
3580 + struct dpaa2_eth_swa *swa;
3581 + struct dpaa2_eth_fq *fq;
3582 + struct dpaa2_fd fd;
3583 + void *buffer_start, *aligned_start;
3584 + dma_addr_t addr;
3585 + int err, i;
3586 +
3587 + if (!netif_running(net_dev))
3588 + return -ENETDOWN;
3589 +
3590 + /* We require a minimum headroom to be able to transmit the frame.
3591 + * Otherwise return an error and let the original net_device handle it
3592 + */
3593 + /* TODO: Do we update i/f counters here or just on the Rx device? */
3594 + needed_headroom = dpaa2_eth_needed_headroom(priv, NULL);
3595 + if (xdp->data < xdp->data_hard_start ||
3596 + xdp->data - xdp->data_hard_start < needed_headroom) {
3597 + percpu_stats->tx_dropped++;
3598 + return -EINVAL;
3599 + }
3600 +
3601 + percpu_stats = this_cpu_ptr(priv->percpu_stats);
3602 + percpu_extras = this_cpu_ptr(priv->percpu_extras);
3603 +
3604 + /* Setup the FD fields */
3605 + memset(&fd, 0, sizeof(fd));
3606 +
3607 + /* Align FD address, if possible */
3608 + buffer_start = xdp->data - needed_headroom;
3609 + aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
3610 + DPAA2_ETH_TX_BUF_ALIGN);
3611 + if (aligned_start >= xdp->data_hard_start)
3612 + buffer_start = aligned_start;
3613 +
3614 + swa = (struct dpaa2_eth_swa *)buffer_start;
3615 + /* fill in necessary fields here */
3616 + swa->type = DPAA2_ETH_SWA_XDP;
3617 + swa->xdp.dma_size = xdp->data_end - buffer_start;
3618 +
3619 + addr = dma_map_single(dev, buffer_start,
3620 + xdp->data_end - buffer_start,
3621 + DMA_BIDIRECTIONAL);
3622 + if (unlikely(dma_mapping_error(dev, addr))) {
3623 + percpu_stats->tx_dropped++;
3624 + return -ENOMEM;
3625 + }
3626 +
3627 + dpaa2_fd_set_addr(&fd, addr);
3628 + dpaa2_fd_set_offset(&fd, xdp->data - buffer_start);
3629 + dpaa2_fd_set_len(&fd, xdp->data_end - xdp->data);
3630 + dpaa2_fd_set_format(&fd, dpaa2_fd_single);
3631 + dpaa2_fd_set_ctrl(&fd, FD_CTRL_PTA);
3632 +
3633 + fq = &priv->fq[smp_processor_id()];
3634 + for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
3635 + err = dpaa2_io_service_enqueue_qd(NULL, priv->tx_qdid, 0,
3636 + fq->tx_qdbin, &fd);
3637 + if (err != -EBUSY)
3638 + break;
3639 + }
3640 + percpu_extras->tx_portal_busy += i;
3641 + if (unlikely(err < 0)) {
3642 + percpu_stats->tx_errors++;
3643 + /* let the Rx device handle the cleanup */
3644 + return err;
3645 + }
3646 +
3647 + percpu_stats->tx_packets++;
3648 + percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
3649 +
3650 + return 0;
3651 +}
3652 +
3653 +static void dpaa2_eth_xdp_flush(struct net_device *net_dev)
3654 +{
3655 + /* We don't have hardware support for Tx batching,
3656 + * so we do the actual frame enqueue in ndo_xdp_xmit
3657 + */
3658 +}
3659 +static int dpaa2_eth_update_xps(struct dpaa2_eth_priv *priv)
3660 +{
3661 + struct net_device *net_dev = priv->net_dev;
3662 + unsigned int i, num_queues;
3663 + struct cpumask xps_mask;
3664 + struct dpaa2_eth_fq *fq;
3665 + int err = 0;
3666 +
3667 + num_queues = (net_dev->num_tc ? : 1) * dpaa2_eth_queue_count(priv);
3668 + for (i = 0; i < num_queues; i++) {
3669 + fq = &priv->fq[i % dpaa2_eth_queue_count(priv)];
3670 + cpumask_clear(&xps_mask);
3671 + cpumask_set_cpu(fq->target_cpu, &xps_mask);
3672 + err = netif_set_xps_queue(net_dev, &xps_mask, i);
3673 + if (err) {
3674 + dev_info_once(net_dev->dev.parent,
3675 + "Error setting XPS queue\n");
3676 + break;
3677 + }
3678 + }
3679 +
3680 + return err;
3681 +}
3682 +
3683 +static int dpaa2_eth_setup_tc(struct net_device *net_dev,
3684 + enum tc_setup_type type,
3685 + void *type_data)
3686 +{
3687 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3688 + struct tc_mqprio_qopt *mqprio = (struct tc_mqprio_qopt *)type_data;
3689 + int i, err = 0;
3690 +
3691 + if (type != TC_SETUP_MQPRIO)
3692 + return -EINVAL;
3693 +
3694 + if (mqprio->num_tc > dpaa2_eth_tc_count(priv)) {
3695 + netdev_err(net_dev, "Max %d traffic classes supported\n",
3696 + dpaa2_eth_tc_count(priv));
3697 + return -EINVAL;
3698 + }
3699 +
3700 + if (mqprio->num_tc == net_dev->num_tc)
3701 + return 0;
3702 +
3703 + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
3704 +
3705 + if (!mqprio->num_tc) {
3706 + netdev_reset_tc(net_dev);
3707 + err = netif_set_real_num_tx_queues(net_dev,
3708 + dpaa2_eth_queue_count(priv));
3709 + if (err)
3710 + return err;
3711 +
3712 + goto update_xps;
3713 + }
3714 +
3715 + err = netdev_set_num_tc(net_dev, mqprio->num_tc);
3716 + if (err)
3717 + return err;
3718 +
3719 + err = netif_set_real_num_tx_queues(net_dev, mqprio->num_tc *
3720 + dpaa2_eth_queue_count(priv));
3721 + if (err)
3722 + return err;
3723 +
3724 + for (i = 0; i < mqprio->num_tc; i++) {
3725 + err = netdev_set_tc_queue(net_dev, i,
3726 + dpaa2_eth_queue_count(priv),
3727 + i * dpaa2_eth_queue_count(priv));
3728 + if (err)
3729 + return err;
3730 + }
3731 +
3732 +update_xps:
3733 + err = dpaa2_eth_update_xps(priv);
3734 + return err;
3735 +}
3736 +
3737 static const struct net_device_ops dpaa2_eth_ops = {
3738 .ndo_open = dpaa2_eth_open,
3739 .ndo_start_xmit = dpaa2_eth_tx,
3740 .ndo_stop = dpaa2_eth_stop,
3741 - .ndo_init = dpaa2_eth_init,
3742 .ndo_set_mac_address = dpaa2_eth_set_addr,
3743 .ndo_get_stats64 = dpaa2_eth_get_stats,
3744 - .ndo_change_mtu = dpaa2_eth_change_mtu,
3745 .ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
3746 .ndo_set_features = dpaa2_eth_set_features,
3747 + .ndo_do_ioctl = dpaa2_eth_ioctl,
3748 + .ndo_xdp = dpaa2_eth_xdp,
3749 + .ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
3750 + .ndo_xdp_flush = dpaa2_eth_xdp_flush,
3751 + .ndo_setup_tc = dpaa2_eth_setup_tc,
3752 };
3753
3754 static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
3755 @@ -1422,34 +2062,32 @@ static struct fsl_mc_device *setup_dpcon
3756 err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
3757 if (err) {
3758 dev_err(dev, "dpcon_open() failed\n");
3759 - goto err_open;
3760 + goto free;
3761 }
3762
3763 err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
3764 if (err) {
3765 dev_err(dev, "dpcon_reset() failed\n");
3766 - goto err_reset;
3767 + goto close;
3768 }
3769
3770 err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs);
3771 if (err) {
3772 dev_err(dev, "dpcon_get_attributes() failed\n");
3773 - goto err_get_attr;
3774 + goto close;
3775 }
3776
3777 err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
3778 if (err) {
3779 dev_err(dev, "dpcon_enable() failed\n");
3780 - goto err_enable;
3781 + goto close;
3782 }
3783
3784 return dpcon;
3785
3786 -err_enable:
3787 -err_get_attr:
3788 -err_reset:
3789 +close:
3790 dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
3791 -err_open:
3792 +free:
3793 fsl_mc_object_free(dpcon);
3794
3795 return NULL;
3796 @@ -1502,7 +2140,14 @@ err_setup:
3797 static void free_channel(struct dpaa2_eth_priv *priv,
3798 struct dpaa2_eth_channel *channel)
3799 {
3800 + struct bpf_prog *prog;
3801 +
3802 free_dpcon(priv, channel->dpcon);
3803 +
3804 + prog = READ_ONCE(channel->xdp_prog);
3805 + if (prog)
3806 + bpf_prog_put(prog);
3807 +
3808 kfree(channel);
3809 }
3810
3811 @@ -1546,7 +2191,8 @@ static int setup_dpio(struct dpaa2_eth_p
3812 nctx->desired_cpu = i;
3813
3814 /* Register the new context */
3815 - err = dpaa2_io_service_register(NULL, nctx);
3816 + channel->dpio = dpaa2_io_service_select(i);
3817 + err = dpaa2_io_service_register(channel->dpio, nctx, dev);
3818 if (err) {
3819 dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
3820 /* If no affine DPIO for this core, there's probably
3821 @@ -1579,14 +2225,14 @@ static int setup_dpio(struct dpaa2_eth_p
3822 /* Stop if we already have enough channels to accommodate all
3823 * RX and TX conf queues
3824 */
3825 - if (priv->num_channels == dpaa2_eth_queue_count(priv))
3826 + if (priv->num_channels == priv->dpni_attrs.num_queues)
3827 break;
3828 }
3829
3830 return 0;
3831
3832 err_set_cdan:
3833 - dpaa2_io_service_deregister(NULL, nctx);
3834 + dpaa2_io_service_deregister(channel->dpio, nctx, dev);
3835 err_service_reg:
3836 free_channel(priv, channel);
3837 err_alloc_ch:
3838 @@ -1603,13 +2249,14 @@ err_alloc_ch:
3839
3840 static void free_dpio(struct dpaa2_eth_priv *priv)
3841 {
3842 - int i;
3843 + struct device *dev = priv->net_dev->dev.parent;
3844 struct dpaa2_eth_channel *ch;
3845 + int i;
3846
3847 /* deregister CDAN notifications and free channels */
3848 for (i = 0; i < priv->num_channels; i++) {
3849 ch = priv->channel[i];
3850 - dpaa2_io_service_deregister(NULL, &ch->nctx);
3851 + dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev);
3852 free_channel(priv, ch);
3853 }
3854 }
3855 @@ -1636,8 +2283,7 @@ static void set_fq_affinity(struct dpaa2
3856 {
3857 struct device *dev = priv->net_dev->dev.parent;
3858 struct dpaa2_eth_fq *fq;
3859 - int rx_cpu, txc_cpu;
3860 - int i;
3861 + int rx_cpu, txc_cpu, i;
3862
3863 /* For each FQ, pick one channel/CPU to deliver frames to.
3864 * This may well change at runtime, either through irqbalance or
3865 @@ -1649,6 +2295,7 @@ static void set_fq_affinity(struct dpaa2
3866 fq = &priv->fq[i];
3867 switch (fq->type) {
3868 case DPAA2_RX_FQ:
3869 + case DPAA2_RX_ERR_FQ:
3870 fq->target_cpu = rx_cpu;
3871 rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
3872 if (rx_cpu >= nr_cpu_ids)
3873 @@ -1665,11 +2312,13 @@ static void set_fq_affinity(struct dpaa2
3874 }
3875 fq->channel = get_affine_channel(priv, fq->target_cpu);
3876 }
3877 +
3878 + dpaa2_eth_update_xps(priv);
3879 }
3880
3881 static void setup_fqs(struct dpaa2_eth_priv *priv)
3882 {
3883 - int i;
3884 + int i, j;
3885
3886 /* We have one TxConf FQ per Tx flow.
3887 * The number of Tx and Rx queues is the same.
3888 @@ -1681,11 +2330,19 @@ static void setup_fqs(struct dpaa2_eth_p
3889 priv->fq[priv->num_fqs++].flowid = (u16)i;
3890 }
3891
3892 - for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
3893 - priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
3894 - priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
3895 - priv->fq[priv->num_fqs++].flowid = (u16)i;
3896 - }
3897 + for (i = 0; i < dpaa2_eth_tc_count(priv); i++)
3898 + for (j = 0; j < dpaa2_eth_queue_count(priv); j++) {
3899 + priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
3900 + priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
3901 + priv->fq[priv->num_fqs].tc = (u8)i;
3902 + priv->fq[priv->num_fqs++].flowid = (u16)j;
3903 + }
3904 +
3905 +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
3906 + /* We have exactly one Rx error queue per DPNI */
3907 + priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ;
3908 + priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err;
3909 +#endif
3910
3911 /* For each FQ, decide on which core to process incoming frames */
3912 set_fq_affinity(priv);
3913 @@ -1735,6 +2392,9 @@ static int setup_dpbp(struct dpaa2_eth_p
3914 }
3915 priv->bpid = dpbp_attrs.bpid;
3916
3917 + /* By default we start with flow control enabled */
3918 + priv->max_bufs_per_ch = DPAA2_ETH_NUM_BUFS_FC / priv->num_channels;
3919 +
3920 return 0;
3921
3922 err_get_attr:
3923 @@ -1762,7 +2422,7 @@ static int setup_dpni(struct fsl_mc_devi
3924 struct device *dev = &ls_dev->dev;
3925 struct dpaa2_eth_priv *priv;
3926 struct net_device *net_dev;
3927 - struct dpni_buffer_layout buf_layout = {0};
3928 + struct dpni_link_cfg cfg = {0};
3929 int err;
3930
3931 net_dev = dev_get_drvdata(dev);
3932 @@ -1772,7 +2432,22 @@ static int setup_dpni(struct fsl_mc_devi
3933 err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
3934 if (err) {
3935 dev_err(dev, "dpni_open() failed\n");
3936 - goto err_open;
3937 + return err;
3938 + }
3939 +
3940 + /* Check if we can work with this DPNI object */
3941 + err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major,
3942 + &priv->dpni_ver_minor);
3943 + if (err) {
3944 + dev_err(dev, "dpni_get_api_version() failed\n");
3945 + goto close;
3946 + }
3947 + if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
3948 + dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n",
3949 + priv->dpni_ver_major, priv->dpni_ver_minor,
3950 + DPNI_VER_MAJOR, DPNI_VER_MINOR);
3951 + err = -ENOTSUPP;
3952 + goto close;
3953 }
3954
3955 ls_dev->mc_io = priv->mc_io;
3956 @@ -1781,77 +2456,41 @@ static int setup_dpni(struct fsl_mc_devi
3957 err = dpni_reset(priv->mc_io, 0, priv->mc_token);
3958 if (err) {
3959 dev_err(dev, "dpni_reset() failed\n");
3960 - goto err_reset;
3961 + goto close;
3962 }
3963
3964 err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
3965 &priv->dpni_attrs);
3966 if (err) {
3967 dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
3968 - goto err_get_attr;
3969 + goto close;
3970 }
3971
3972 - /* Configure buffer layouts */
3973 - /* rx buffer */
3974 - buf_layout.pass_parser_result = true;
3975 - buf_layout.pass_frame_status = true;
3976 - buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
3977 - buf_layout.data_align = DPAA2_ETH_RX_BUF_ALIGN;
3978 - buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
3979 - DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
3980 - DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
3981 - DPNI_BUF_LAYOUT_OPT_DATA_ALIGN;
3982 - err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3983 - DPNI_QUEUE_RX, &buf_layout);
3984 - if (err) {
3985 - dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
3986 - goto err_buf_layout;
3987 - }
3988 + err = set_buffer_layout(priv);
3989 + if (err)
3990 + goto close;
3991
3992 - /* tx buffer */
3993 - buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
3994 - DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
3995 - err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3996 - DPNI_QUEUE_TX, &buf_layout);
3997 - if (err) {
3998 - dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
3999 - goto err_buf_layout;
4000 - }
4001 + set_enqueue_mode(priv);
4002
4003 - /* tx-confirm buffer */
4004 - buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
4005 - err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
4006 - DPNI_QUEUE_TX_CONFIRM, &buf_layout);
4007 - if (err) {
4008 - dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
4009 - goto err_buf_layout;
4010 - }
4011 + priv->cls_rule = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) *
4012 + dpaa2_eth_fs_count(priv), GFP_KERNEL);
4013 + if (!priv->cls_rule)
4014 + goto close;
4015
4016 - /* Now that we've set our tx buffer layout, retrieve the minimum
4017 - * required tx data offset.
4018 - */
4019 - err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
4020 - &priv->tx_data_offset);
4021 + /* Enable flow control */
4022 + cfg.options = DPNI_LINK_OPT_AUTONEG | DPNI_LINK_OPT_PAUSE;
4023 + priv->tx_pause_frames = true;
4024 + err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
4025 if (err) {
4026 - dev_err(dev, "dpni_get_tx_data_offset() failed\n");
4027 - goto err_data_offset;
4028 + dev_err(dev, "dpni_set_link_cfg() failed\n");
4029 + goto close;
4030 }
4031
4032 - if ((priv->tx_data_offset % 64) != 0)
4033 - dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
4034 - priv->tx_data_offset);
4035 -
4036 - /* Accommodate software annotation space (SWA) */
4037 - priv->tx_data_offset += DPAA2_ETH_SWA_SIZE;
4038 -
4039 return 0;
4040
4041 -err_data_offset:
4042 -err_buf_layout:
4043 -err_get_attr:
4044 -err_reset:
4045 +close:
4046 dpni_close(priv->mc_io, 0, priv->mc_token);
4047 -err_open:
4048 +
4049 return err;
4050 }
4051
4052 @@ -1865,6 +2504,7 @@ static void free_dpni(struct dpaa2_eth_p
4053 err);
4054
4055 dpni_close(priv->mc_io, 0, priv->mc_token);
4056 +
4057 }
4058
4059 static int setup_rx_flow(struct dpaa2_eth_priv *priv,
4060 @@ -1873,11 +2513,10 @@ static int setup_rx_flow(struct dpaa2_et
4061 struct device *dev = priv->net_dev->dev.parent;
4062 struct dpni_queue queue;
4063 struct dpni_queue_id qid;
4064 - struct dpni_taildrop td;
4065 int err;
4066
4067 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
4068 - DPNI_QUEUE_RX, 0, fq->flowid, &queue, &qid);
4069 + DPNI_QUEUE_RX, fq->tc, fq->flowid, &queue, &qid);
4070 if (err) {
4071 dev_err(dev, "dpni_get_queue(RX) failed\n");
4072 return err;
4073 @@ -1889,24 +2528,136 @@ static int setup_rx_flow(struct dpaa2_et
4074 queue.destination.type = DPNI_DEST_DPCON;
4075 queue.destination.priority = 1;
4076 queue.user_context = (u64)(uintptr_t)fq;
4077 + queue.flc.stash_control = 1;
4078 + queue.flc.value &= 0xFFFFFFFFFFFFFFC0;
4079 + /* 01 01 00 - data, annotation, flow context*/
4080 + queue.flc.value |= 0x14;
4081 +
4082 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
4083 - DPNI_QUEUE_RX, 0, fq->flowid,
4084 - DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
4085 + DPNI_QUEUE_RX, fq->tc, fq->flowid,
4086 + DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST |
4087 + DPNI_QUEUE_OPT_FLC,
4088 &queue);
4089 if (err) {
4090 dev_err(dev, "dpni_set_queue(RX) failed\n");
4091 return err;
4092 }
4093
4094 - td.enable = 1;
4095 - td.threshold = DPAA2_ETH_TAILDROP_THRESH;
4096 - err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, DPNI_CP_QUEUE,
4097 - DPNI_QUEUE_RX, 0, fq->flowid, &td);
4098 - if (err) {
4099 - dev_err(dev, "dpni_set_threshold() failed\n");
4100 - return err;
4101 + return 0;
4102 +}
4103 +
4104 +static int set_queue_taildrop(struct dpaa2_eth_priv *priv,
4105 + struct dpni_taildrop *td)
4106 +{
4107 + struct device *dev = priv->net_dev->dev.parent;
4108 + int i, err;
4109 +
4110 + for (i = 0; i < priv->num_fqs; i++) {
4111 + if (priv->fq[i].type != DPAA2_RX_FQ)
4112 + continue;
4113 +
4114 + err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
4115 + DPNI_CP_QUEUE, DPNI_QUEUE_RX,
4116 + priv->fq[i].tc, priv->fq[i].flowid,
4117 + td);
4118 + if (err) {
4119 + dev_err(dev, "dpni_set_taildrop() failed (%d)\n", err);
4120 + return err;
4121 + }
4122 +
4123 + dev_dbg(dev, "%s taildrop for Rx queue id %d tc %d\n",
4124 + (td->enable ? "Enabled" : "Disabled"),
4125 + priv->fq[i].flowid, priv->fq[i].tc);
4126 + }
4127 +
4128 + return 0;
4129 +}
4130 +
4131 +static int set_group_taildrop(struct dpaa2_eth_priv *priv,
4132 + struct dpni_taildrop *td)
4133 +{
4134 + struct device *dev = priv->net_dev->dev.parent;
4135 + struct dpni_taildrop disable_td, *tc_td;
4136 + int i, err;
4137 +
4138 + memset(&disable_td, 0, sizeof(struct dpni_taildrop));
4139 + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
4140 + if (td->enable && dpaa2_eth_is_pfc_enabled(priv, i))
4141 + /* Do not set taildrop thresholds for PFC-enabled
4142 + * traffic classes. We will enable congestion
4143 + * notifications for them.
4144 + */
4145 + tc_td = &disable_td;
4146 + else
4147 + tc_td = td;
4148 +
4149 + err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
4150 + DPNI_CP_GROUP, DPNI_QUEUE_RX,
4151 + i, 0, tc_td);
4152 + if (err) {
4153 + dev_err(dev, "dpni_set_taildrop() failed (%d)\n", err);
4154 + return err;
4155 + }
4156 +
4157 + dev_dbg(dev, "%s taildrop for Rx group tc %d\n",
4158 + (tc_td->enable ? "Enabled" : "Disabled"),
4159 + i);
4160 + }
4161 +
4162 + return 0;
4163 +}
4164 +
4165 +/* Enable/disable Rx FQ taildrop
4166 + *
4167 + * Rx FQ taildrop is mutually exclusive with flow control and it only gets
4168 + * disabled when FC is active. Depending on FC status, we need to compute
4169 + * the maximum number of buffers in the pool differently, so use the
4170 + * opportunity to update max number of buffers as well.
4171 + */
4172 +int set_rx_taildrop(struct dpaa2_eth_priv *priv)
4173 +{
4174 + enum dpaa2_eth_td_cfg cfg = dpaa2_eth_get_td_type(priv);
4175 + struct dpni_taildrop td_queue, td_group;
4176 + int err = 0;
4177 +
4178 + switch (cfg) {
4179 + case DPAA2_ETH_TD_NONE:
4180 + memset(&td_queue, 0, sizeof(struct dpni_taildrop));
4181 + memset(&td_group, 0, sizeof(struct dpni_taildrop));
4182 + priv->max_bufs_per_ch = DPAA2_ETH_NUM_BUFS_FC /
4183 + priv->num_channels;
4184 + break;
4185 + case DPAA2_ETH_TD_QUEUE:
4186 + memset(&td_group, 0, sizeof(struct dpni_taildrop));
4187 + td_queue.enable = 1;
4188 + td_queue.units = DPNI_CONGESTION_UNIT_BYTES;
4189 + td_queue.threshold = DPAA2_ETH_TAILDROP_THRESH /
4190 + dpaa2_eth_tc_count(priv);
4191 + priv->max_bufs_per_ch = DPAA2_ETH_NUM_BUFS_PER_CH;
4192 + break;
4193 + case DPAA2_ETH_TD_GROUP:
4194 + memset(&td_queue, 0, sizeof(struct dpni_taildrop));
4195 + td_group.enable = 1;
4196 + td_group.units = DPNI_CONGESTION_UNIT_FRAMES;
4197 + td_group.threshold = NAPI_POLL_WEIGHT *
4198 + dpaa2_eth_queue_count(priv);
4199 + priv->max_bufs_per_ch = NAPI_POLL_WEIGHT *
4200 + dpaa2_eth_tc_count(priv);
4201 + break;
4202 + default:
4203 + break;
4204 }
4205
4206 + err = set_queue_taildrop(priv, &td_queue);
4207 + if (err)
4208 + return err;
4209 +
4210 + err = set_group_taildrop(priv, &td_group);
4211 + if (err)
4212 + return err;
4213 +
4214 + priv->refill_thresh = DPAA2_ETH_REFILL_THRESH(priv);
4215 +
4216 return 0;
4217 }
4218
4219 @@ -1926,6 +2677,7 @@ static int setup_tx_flow(struct dpaa2_et
4220 }
4221
4222 fq->tx_qdbin = qid.qdbin;
4223 + fq->tx_fqid = qid.fqid;
4224
4225 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
4226 DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
4227 @@ -1953,23 +2705,88 @@ static int setup_tx_flow(struct dpaa2_et
4228 return 0;
4229 }
4230
4231 -/* Hash key is a 5-tuple: IPsrc, IPdst, IPnextproto, L4src, L4dst */
4232 -static const struct dpaa2_eth_hash_fields hash_fields[] = {
4233 +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
4234 +static int setup_rx_err_flow(struct dpaa2_eth_priv *priv,
4235 + struct dpaa2_eth_fq *fq)
4236 +{
4237 + struct device *dev = priv->net_dev->dev.parent;
4238 + struct dpni_queue q = { { 0 } };
4239 + struct dpni_queue_id qid;
4240 + u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
4241 + int err;
4242 +
4243 + err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
4244 + DPNI_QUEUE_RX_ERR, 0, 0, &q, &qid);
4245 + if (err) {
4246 + dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
4247 + return err;
4248 + }
4249 +
4250 + fq->fqid = qid.fqid;
4251 +
4252 + q.destination.id = fq->channel->dpcon_id;
4253 + q.destination.type = DPNI_DEST_DPCON;
4254 + q.destination.priority = 1;
4255 + q.user_context = (u64)fq;
4256 + err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
4257 + DPNI_QUEUE_RX_ERR, 0, 0, q_opt, &q);
4258 + if (err) {
4259 + dev_err(dev, "dpni_set_queue() failed (%d)\n", err);
4260 + return err;
4261 + }
4262 +
4263 + return 0;
4264 +}
4265 +#endif
4266 +
4267 +/* Supported header fields for Rx hash distribution key */
4268 +static const struct dpaa2_eth_dist_fields dist_fields[] = {
4269 {
4270 + /* L2 header */
4271 + .rxnfc_field = RXH_L2DA,
4272 + .cls_prot = NET_PROT_ETH,
4273 + .cls_field = NH_FLD_ETH_DA,
4274 + .id = DPAA2_ETH_DIST_ETHDST,
4275 + .size = 6,
4276 + }, {
4277 + .cls_prot = NET_PROT_ETH,
4278 + .cls_field = NH_FLD_ETH_SA,
4279 + .id = DPAA2_ETH_DIST_ETHSRC,
4280 + .size = 6,
4281 + }, {
4282 + /* This is the last ethertype field parsed:
4283 + * depending on frame format, it can be the MAC ethertype
4284 + * or the VLAN etype.
4285 + */
4286 + .cls_prot = NET_PROT_ETH,
4287 + .cls_field = NH_FLD_ETH_TYPE,
4288 + .id = DPAA2_ETH_DIST_ETHTYPE,
4289 + .size = 2,
4290 + }, {
4291 + /* VLAN header */
4292 + .rxnfc_field = RXH_VLAN,
4293 + .cls_prot = NET_PROT_VLAN,
4294 + .cls_field = NH_FLD_VLAN_TCI,
4295 + .id = DPAA2_ETH_DIST_VLAN,
4296 + .size = 2,
4297 + }, {
4298 /* IP header */
4299 .rxnfc_field = RXH_IP_SRC,
4300 .cls_prot = NET_PROT_IP,
4301 .cls_field = NH_FLD_IP_SRC,
4302 + .id = DPAA2_ETH_DIST_IPSRC,
4303 .size = 4,
4304 }, {
4305 .rxnfc_field = RXH_IP_DST,
4306 .cls_prot = NET_PROT_IP,
4307 .cls_field = NH_FLD_IP_DST,
4308 + .id = DPAA2_ETH_DIST_IPDST,
4309 .size = 4,
4310 }, {
4311 .rxnfc_field = RXH_L3_PROTO,
4312 .cls_prot = NET_PROT_IP,
4313 .cls_field = NH_FLD_IP_PROTO,
4314 + .id = DPAA2_ETH_DIST_IPPROTO,
4315 .size = 1,
4316 }, {
4317 /* Using UDP ports, this is functionally equivalent to raw
4318 @@ -1978,41 +2795,170 @@ static const struct dpaa2_eth_hash_field
4319 .rxnfc_field = RXH_L4_B_0_1,
4320 .cls_prot = NET_PROT_UDP,
4321 .cls_field = NH_FLD_UDP_PORT_SRC,
4322 + .id = DPAA2_ETH_DIST_L4SRC,
4323 .size = 2,
4324 }, {
4325 .rxnfc_field = RXH_L4_B_2_3,
4326 .cls_prot = NET_PROT_UDP,
4327 .cls_field = NH_FLD_UDP_PORT_DST,
4328 + .id = DPAA2_ETH_DIST_L4DST,
4329 .size = 2,
4330 },
4331 };
4332
4333 -/* Set RX hash options
4334 +/* Configure the Rx hash key using the legacy API */
4335 +static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
4336 +{
4337 + struct device *dev = priv->net_dev->dev.parent;
4338 + struct dpni_rx_tc_dist_cfg dist_cfg;
4339 + int i, err = 0;
4340 +
4341 + memset(&dist_cfg, 0, sizeof(dist_cfg));
4342 +
4343 + dist_cfg.key_cfg_iova = key;
4344 + dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
4345 + dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
4346 +
4347 + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
4348 + err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token,
4349 + i, &dist_cfg);
4350 + if (err) {
4351 + dev_err(dev, "dpni_set_rx_tc_dist failed\n");
4352 + break;
4353 + }
4354 + }
4355 +
4356 + return err;
4357 +}
4358 +
4359 +/* Configure the Rx hash key using the new API */
4360 +static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
4361 +{
4362 + struct device *dev = priv->net_dev->dev.parent;
4363 + struct dpni_rx_dist_cfg dist_cfg;
4364 + int i, err = 0;
4365 +
4366 + memset(&dist_cfg, 0, sizeof(dist_cfg));
4367 +
4368 + dist_cfg.key_cfg_iova = key;
4369 + dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
4370 + dist_cfg.enable = 1;
4371 +
4372 + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
4373 + dist_cfg.tc = i;
4374 + err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token,
4375 + &dist_cfg);
4376 + if (err) {
4377 + dev_err(dev, "dpni_set_rx_hash_dist failed\n");
4378 + break;
4379 + }
4380 + }
4381 +
4382 + return err;
4383 +}
4384 +
4385 +/* Configure the Rx flow classification key */
4386 +static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
4387 +{
4388 + struct device *dev = priv->net_dev->dev.parent;
4389 + struct dpni_rx_dist_cfg dist_cfg;
4390 + int i, err = 0;
4391 +
4392 + memset(&dist_cfg, 0, sizeof(dist_cfg));
4393 +
4394 + dist_cfg.key_cfg_iova = key;
4395 + dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
4396 + dist_cfg.enable = 1;
4397 +
4398 + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
4399 + dist_cfg.tc = i;
4400 + err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token,
4401 + &dist_cfg);
4402 + if (err) {
4403 + dev_err(dev, "dpni_set_rx_fs_dist failed\n");
4404 + break;
4405 + }
4406 + }
4407 +
4408 + return err;
4409 +}
4410 +
4411 +/* Size of the Rx flow classification key */
4412 +int dpaa2_eth_cls_key_size(u64 fields)
4413 +{
4414 + int i, size = 0;
4415 +
4416 + for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
4417 + if (!(fields & dist_fields[i].id))
4418 + continue;
4419 + size += dist_fields[i].size;
4420 + }
4421 +
4422 + return size;
4423 +}
4424 +
4425 +/* Offset of header field in Rx classification key */
4426 +int dpaa2_eth_cls_fld_off(int prot, int field)
4427 +{
4428 + int i, off = 0;
4429 +
4430 + for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
4431 + if (dist_fields[i].cls_prot == prot &&
4432 + dist_fields[i].cls_field == field)
4433 + return off;
4434 + off += dist_fields[i].size;
4435 + }
4436 +
4437 + WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n");
4438 + return 0;
4439 +}
4440 +
4441 +/* Prune unused fields from the classification rule.
4442 + * Used when masking is not supported
4443 + */
4444 +void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields)
4445 +{
4446 + int off = 0, new_off = 0;
4447 + int i, size;
4448 +
4449 + for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
4450 + size = dist_fields[i].size;
4451 + if (dist_fields[i].id & fields) {
4452 + memcpy(key_mem + new_off, key_mem + off, size);
4453 + new_off += size;
4454 + }
4455 + off += size;
4456 + }
4457 +}
4458 +
4459 +/* Set Rx distribution (hash or flow classification) key
4460 * flags is a combination of RXH_ bits
4461 */
4462 -static int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
4463 +static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
4464 + enum dpaa2_eth_rx_dist type, u64 flags)
4465 {
4466 struct device *dev = net_dev->dev.parent;
4467 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4468 struct dpkg_profile_cfg cls_cfg;
4469 - struct dpni_rx_tc_dist_cfg dist_cfg;
4470 + u32 rx_hash_fields = 0;
4471 + dma_addr_t key_iova;
4472 u8 *dma_mem;
4473 int i;
4474 int err = 0;
4475
4476 - if (!dpaa2_eth_hash_enabled(priv)) {
4477 - dev_dbg(dev, "Hashing support is not enabled\n");
4478 - return 0;
4479 - }
4480 -
4481 memset(&cls_cfg, 0, sizeof(cls_cfg));
4482
4483 - for (i = 0; i < ARRAY_SIZE(hash_fields); i++) {
4484 + for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
4485 struct dpkg_extract *key =
4486 &cls_cfg.extracts[cls_cfg.num_extracts];
4487
4488 - if (!(flags & hash_fields[i].rxnfc_field))
4489 + /* For both Rx hashing and classification keys
4490 + * we set only the selected fields.
4491 + */
4492 + if (!(flags & dist_fields[i].id))
4493 continue;
4494 + if (type == DPAA2_ETH_RX_DIST_HASH)
4495 + rx_hash_fields |= dist_fields[i].rxnfc_field;
4496
4497 if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
4498 dev_err(dev, "error adding key extraction rule, too many rules?\n");
4499 @@ -2020,49 +2966,107 @@ static int dpaa2_eth_set_hash(struct net
4500 }
4501
4502 key->type = DPKG_EXTRACT_FROM_HDR;
4503 - key->extract.from_hdr.prot = hash_fields[i].cls_prot;
4504 + key->extract.from_hdr.prot = dist_fields[i].cls_prot;
4505 key->extract.from_hdr.type = DPKG_FULL_FIELD;
4506 - key->extract.from_hdr.field = hash_fields[i].cls_field;
4507 + key->extract.from_hdr.field = dist_fields[i].cls_field;
4508 cls_cfg.num_extracts++;
4509 + }
4510 +
4511 + dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
4512 + if (!dma_mem)
4513 + return -ENOMEM;
4514 +
4515 + err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
4516 + if (err) {
4517 + dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
4518 + goto free_key;
4519 + }
4520 +
4521 + /* Prepare for setting the rx dist */
4522 + key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE,
4523 + DMA_TO_DEVICE);
4524 + if (dma_mapping_error(dev, key_iova)) {
4525 + dev_err(dev, "DMA mapping failed\n");
4526 + err = -ENOMEM;
4527 + goto free_key;
4528 + }
4529 +
4530 + if (type == DPAA2_ETH_RX_DIST_HASH) {
4531 + if (dpaa2_eth_has_legacy_dist(priv))
4532 + err = config_legacy_hash_key(priv, key_iova);
4533 + else
4534 + err = config_hash_key(priv, key_iova);
4535 + } else {
4536 + err = config_cls_key(priv, key_iova);
4537 + }
4538 +
4539 + dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
4540 + DMA_TO_DEVICE);
4541 + if (!err && type == DPAA2_ETH_RX_DIST_HASH)
4542 + priv->rx_hash_fields = rx_hash_fields;
4543 +
4544 +free_key:
4545 + kfree(dma_mem);
4546 + return err;
4547 +}
4548 +
4549 +int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
4550 +{
4551 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4552 + u64 key = 0;
4553 + int i;
4554 +
4555 + if (!dpaa2_eth_hash_enabled(priv))
4556 + return -EOPNOTSUPP;
4557 +
4558 + for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
4559 + if (dist_fields[i].rxnfc_field & flags)
4560 + key |= dist_fields[i].id;
4561 +
4562 + return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, key);
4563 +}
4564 +
4565 +int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags)
4566 +{
4567 + return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_CLS, flags);
4568 +}
4569 +
4570 +static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv)
4571 +{
4572 + struct device *dev = priv->net_dev->dev.parent;
4573 + int err;
4574
4575 - priv->rx_hash_fields |= hash_fields[i].rxnfc_field;
4576 + /* Check if we actually support Rx flow classification */
4577 + if (dpaa2_eth_has_legacy_dist(priv)) {
4578 + dev_dbg(dev, "Rx cls not supported by current MC version\n");
4579 + return -EOPNOTSUPP;
4580 }
4581
4582 - dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
4583 - if (!dma_mem)
4584 - return -ENOMEM;
4585 -
4586 - err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
4587 - if (err) {
4588 - dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
4589 - goto err_prep_key;
4590 + if (!dpaa2_eth_fs_enabled(priv)) {
4591 + dev_dbg(dev, "Rx cls disabled in DPNI options\n");
4592 + return -EOPNOTSUPP;
4593 }
4594
4595 - memset(&dist_cfg, 0, sizeof(dist_cfg));
4596 -
4597 - /* Prepare for setting the rx dist */
4598 - dist_cfg.key_cfg_iova = dma_map_single(dev, dma_mem,
4599 - DPAA2_CLASSIFIER_DMA_SIZE,
4600 - DMA_TO_DEVICE);
4601 - if (dma_mapping_error(dev, dist_cfg.key_cfg_iova)) {
4602 - dev_err(dev, "DMA mapping failed\n");
4603 - err = -ENOMEM;
4604 - goto err_dma_map;
4605 + if (!dpaa2_eth_hash_enabled(priv)) {
4606 + dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n");
4607 + return -EOPNOTSUPP;
4608 }
4609
4610 - dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
4611 - dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
4612 + /* If there is no support for masking in the classification table,
4613 + * we don't set a default key, as it will depend on the rules
4614 + * added by the user at runtime.
4615 + */
4616 + if (!dpaa2_eth_fs_mask_enabled(priv))
4617 + goto out;
4618
4619 - err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
4620 - dma_unmap_single(dev, dist_cfg.key_cfg_iova,
4621 - DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE);
4622 + err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL);
4623 if (err)
4624 - dev_err(dev, "dpni_set_rx_tc_dist() error %d\n", err);
4625 + return err;
4626
4627 -err_dma_map:
4628 -err_prep_key:
4629 - kfree(dma_mem);
4630 - return err;
4631 +out:
4632 + priv->rx_cls_enabled = 1;
4633 +
4634 + return 0;
4635 }
4636
4637 /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
4638 @@ -2080,6 +3084,7 @@ static int bind_dpni(struct dpaa2_eth_pr
4639 pools_params.num_dpbp = 1;
4640 pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
4641 pools_params.pools[0].backup_pool = 0;
4642 + pools_params.pools[0].priority_mask = 0xff;
4643 pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
4644 err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
4645 if (err) {
4646 @@ -2087,17 +3092,28 @@ static int bind_dpni(struct dpaa2_eth_pr
4647 return err;
4648 }
4649
4650 - /* have the interface implicitly distribute traffic based on supported
4651 - * header fields
4652 + /* have the interface implicitly distribute traffic based on
4653 + * the default hash key
4654 */
4655 - err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_SUPPORTED);
4656 - if (err)
4657 - netdev_err(net_dev, "Failed to configure hashing\n");
4658 + err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT);
4659 + if (err && err != -EOPNOTSUPP)
4660 + dev_err(dev, "Failed to configure hashing\n");
4661 +
4662 + /* Configure the flow classification key; it includes all
4663 + * supported header fields and cannot be modified at runtime
4664 + */
4665 + err = dpaa2_eth_set_default_cls(priv);
4666 + if (err && err != -EOPNOTSUPP)
4667 + dev_err(dev, "Failed to configure Rx classification key\n");
4668
4669 /* Configure handling of error frames */
4670 err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
4671 err_cfg.set_frame_annotation = 1;
4672 +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
4673 + err_cfg.error_action = DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE;
4674 +#else
4675 err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
4676 +#endif
4677 err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
4678 &err_cfg);
4679 if (err) {
4680 @@ -2114,6 +3130,11 @@ static int bind_dpni(struct dpaa2_eth_pr
4681 case DPAA2_TX_CONF_FQ:
4682 err = setup_tx_flow(priv, &priv->fq[i]);
4683 break;
4684 +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
4685 + case DPAA2_RX_ERR_FQ:
4686 + err = setup_rx_err_flow(priv, &priv->fq[i]);
4687 + break;
4688 +#endif
4689 default:
4690 dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
4691 return -EINVAL;
4692 @@ -2237,11 +3258,14 @@ static int netdev_init(struct net_device
4693 {
4694 struct device *dev = net_dev->dev.parent;
4695 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4696 + u32 options = priv->dpni_attrs.options;
4697 + u64 supported = 0, not_supported = 0;
4698 u8 bcast_addr[ETH_ALEN];
4699 u8 num_queues;
4700 int err;
4701
4702 net_dev->netdev_ops = &dpaa2_eth_ops;
4703 + net_dev->ethtool_ops = &dpaa2_ethtool_ops;
4704
4705 err = set_mac_addr(priv);
4706 if (err)
4707 @@ -2255,14 +3279,14 @@ static int netdev_init(struct net_device
4708 return err;
4709 }
4710
4711 - /* Reserve enough space to align buffer as per hardware requirement;
4712 - * NOTE: priv->tx_data_offset MUST be initialized at this point.
4713 - */
4714 - net_dev->needed_headroom = DPAA2_ETH_NEEDED_HEADROOM(priv);
4715 -
4716 - /* Set MTU limits */
4717 - net_dev->min_mtu = 68;
4718 + /* Set MTU upper limit; lower limit is 68B (default value) */
4719 net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
4720 + err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
4721 + DPAA2_ETH_MFL);
4722 + if (err) {
4723 + dev_err(dev, "dpni_set_max_frame_length() failed\n");
4724 + return err;
4725 + }
4726
4727 /* Set actual number of queues in the net device */
4728 num_queues = dpaa2_eth_queue_count(priv);
4729 @@ -2277,12 +3301,23 @@ static int netdev_init(struct net_device
4730 return err;
4731 }
4732
4733 - /* Our .ndo_init will be called herein */
4734 - err = register_netdev(net_dev);
4735 - if (err < 0) {
4736 - dev_err(dev, "register_netdev() failed\n");
4737 - return err;
4738 - }
4739 + /* Capabilities listing */
4740 + supported |= IFF_LIVE_ADDR_CHANGE;
4741 +
4742 + if (options & DPNI_OPT_NO_MAC_FILTER)
4743 + not_supported |= IFF_UNICAST_FLT;
4744 + else
4745 + supported |= IFF_UNICAST_FLT;
4746 +
4747 + net_dev->priv_flags |= supported;
4748 + net_dev->priv_flags &= ~not_supported;
4749 +
4750 + /* Features */
4751 + net_dev->features = NETIF_F_RXCSUM |
4752 + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4753 + NETIF_F_SG | NETIF_F_HIGHDMA |
4754 + NETIF_F_LLTX;
4755 + net_dev->hw_features = net_dev->features;
4756
4757 return 0;
4758 }
4759 @@ -2303,14 +3338,9 @@ static int poll_link_state(void *arg)
4760 return 0;
4761 }
4762
4763 -static irqreturn_t dpni_irq0_handler(int irq_num, void *arg)
4764 -{
4765 - return IRQ_WAKE_THREAD;
4766 -}
4767 -
4768 static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
4769 {
4770 - u32 status = 0, clear = 0;
4771 + u32 status = ~0;
4772 struct device *dev = (struct device *)arg;
4773 struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
4774 struct net_device *net_dev = dev_get_drvdata(dev);
4775 @@ -2320,18 +3350,12 @@ static irqreturn_t dpni_irq0_handler_thr
4776 DPNI_IRQ_INDEX, &status);
4777 if (unlikely(err)) {
4778 netdev_err(net_dev, "Can't get irq status (err %d)\n", err);
4779 - clear = 0xffffffff;
4780 - goto out;
4781 + return IRQ_HANDLED;
4782 }
4783
4784 - if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
4785 - clear |= DPNI_IRQ_EVENT_LINK_CHANGED;
4786 + if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
4787 link_state_update(netdev_priv(net_dev));
4788 - }
4789
4790 -out:
4791 - dpni_clear_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
4792 - DPNI_IRQ_INDEX, clear);
4793 return IRQ_HANDLED;
4794 }
4795
4796 @@ -2348,8 +3372,7 @@ static int setup_irqs(struct fsl_mc_devi
4797
4798 irq = ls_dev->irqs[0];
4799 err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
4800 - dpni_irq0_handler,
4801 - dpni_irq0_handler_thread,
4802 + NULL, dpni_irq0_handler_thread,
4803 IRQF_NO_SUSPEND | IRQF_ONESHOT,
4804 dev_name(&ls_dev->dev), &ls_dev->dev);
4805 if (err < 0) {
4806 @@ -2405,6 +3428,393 @@ static void del_ch_napi(struct dpaa2_eth
4807 }
4808 }
4809
4810 +/* SysFS support */
4811 +static ssize_t dpaa2_eth_show_tx_shaping(struct device *dev,
4812 + struct device_attribute *attr,
4813 + char *buf)
4814 +{
4815 + struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
4816 + /* No MC API for getting the shaping config. We're stateful. */
4817 + struct dpni_tx_shaping_cfg *scfg = &priv->shaping_cfg;
4818 +
4819 + return sprintf(buf, "%u %hu\n", scfg->rate_limit, scfg->max_burst_size);
4820 +}
4821 +
4822 +static ssize_t dpaa2_eth_write_tx_shaping(struct device *dev,
4823 + struct device_attribute *attr,
4824 + const char *buf,
4825 + size_t count)
4826 +{
4827 + int err, items;
4828 + struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
4829 + struct dpni_tx_shaping_cfg scfg, ercfg = { 0 };
4830 +
4831 + items = sscanf(buf, "%u %hu", &scfg.rate_limit, &scfg.max_burst_size);
4832 + if (items != 2) {
4833 + pr_err("Expected format: \"rate_limit(Mbps) max_burst_size(bytes)\"\n");
4834 + return -EINVAL;
4835 + }
4836 + /* Size restriction as per MC API documentation */
4837 + if (scfg.max_burst_size > DPAA2_ETH_MAX_BURST_SIZE) {
4838 + pr_err("max_burst_size must be <= %d\n",
4839 + DPAA2_ETH_MAX_BURST_SIZE);
4840 + return -EINVAL;
4841 + }
4842 +
4843 + err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &scfg,
4844 + &ercfg, 0);
4845 + if (err) {
4846 + dev_err(dev, "dpni_set_tx_shaping() failed\n");
4847 + return -EPERM;
4848 + }
4849 + /* If successful, save the current configuration for future inquiries */
4850 + priv->shaping_cfg = scfg;
4851 +
4852 + return count;
4853 +}
4854 +
4855 +static struct device_attribute dpaa2_eth_attrs[] = {
4856 + __ATTR(tx_shaping,
4857 + 0600,
4858 + dpaa2_eth_show_tx_shaping,
4859 + dpaa2_eth_write_tx_shaping),
4860 +};
4861 +
4862 +static void dpaa2_eth_sysfs_init(struct device *dev)
4863 +{
4864 + int i, err;
4865 +
4866 + for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++) {
4867 + err = device_create_file(dev, &dpaa2_eth_attrs[i]);
4868 + if (err) {
4869 + dev_err(dev, "ERROR creating sysfs file\n");
4870 + goto undo;
4871 + }
4872 + }
4873 + return;
4874 +
4875 +undo:
4876 + while (i > 0)
4877 + device_remove_file(dev, &dpaa2_eth_attrs[--i]);
4878 +}
4879 +
4880 +static void dpaa2_eth_sysfs_remove(struct device *dev)
4881 +{
4882 + int i;
4883 +
4884 + for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++)
4885 + device_remove_file(dev, &dpaa2_eth_attrs[i]);
4886 +}
4887 +
4888 +#ifdef CONFIG_FSL_DPAA2_ETH_DCB
4889 +static int dpaa2_eth_dcbnl_ieee_getpfc(struct net_device *net_dev,
4890 + struct ieee_pfc *pfc)
4891 +{
4892 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4893 + struct dpni_congestion_notification_cfg notification_cfg;
4894 + struct dpni_link_state state;
4895 + int err, i;
4896 +
4897 + priv->pfc.pfc_cap = dpaa2_eth_tc_count(priv);
4898 +
4899 + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
4900 + if (err) {
4901 + netdev_err(net_dev, "ERROR %d getting link state", err);
4902 + return err;
4903 + }
4904 +
4905 + if (!(state.options & DPNI_LINK_OPT_PFC_PAUSE))
4906 + return 0;
4907 +
4908 + priv->pfc.pfc_en = 0;
4909 + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
4910 + err = dpni_get_congestion_notification(priv->mc_io, 0,
4911 + priv->mc_token,
4912 + DPNI_QUEUE_RX,
4913 + i, &notification_cfg);
4914 + if (err) {
4915 + netdev_err(net_dev, "Error %d getting congestion notif",
4916 + err);
4917 + return err;
4918 + }
4919 +
4920 + if (notification_cfg.threshold_entry)
4921 + priv->pfc.pfc_en |= 1 << i;
4922 + }
4923 +
4924 + memcpy(pfc, &priv->pfc, sizeof(priv->pfc));
4925 +
4926 + return 0;
4927 +}
4928 +
4929 +/* Configure ingress classification based on VLAN PCP */
4930 +static int set_vlan_qos(struct dpaa2_eth_priv *priv)
4931 +{
4932 + struct device *dev = priv->net_dev->dev.parent;
4933 + struct dpkg_profile_cfg kg_cfg = {0};
4934 + struct dpni_qos_tbl_cfg qos_cfg = {0};
4935 + struct dpni_rule_cfg key_params;
4936 + u8 *params_iova, *key, *mask = NULL;
4937 + /* We only need the trailing 16 bits, without the TPID */
4938 + u8 key_size = VLAN_HLEN / 2;
4939 + int err = 0, i, j = 0;
4940 +
4941 + if (priv->vlan_clsf_set)
4942 + return 0;
4943 +
4944 + params_iova = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
4945 + if (!params_iova)
4946 + return -ENOMEM;
4947 +
4948 + kg_cfg.num_extracts = 1;
4949 + kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
4950 + kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_VLAN;
4951 + kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
4952 + kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_VLAN_TCI;
4953 +
4954 + err = dpni_prepare_key_cfg(&kg_cfg, params_iova);
4955 + if (err) {
4956 + dev_err(dev, "dpkg_prepare_key_cfg failed: %d\n", err);
4957 + goto out_free;
4958 + }
4959 +
4960 + /* Set QoS table */
4961 + qos_cfg.default_tc = 0;
4962 + qos_cfg.discard_on_miss = 0;
4963 + qos_cfg.key_cfg_iova = dma_map_single(dev, params_iova,
4964 + DPAA2_CLASSIFIER_DMA_SIZE,
4965 + DMA_TO_DEVICE);
4966 + if (dma_mapping_error(dev, qos_cfg.key_cfg_iova)) {
4967 + dev_err(dev, "%s: DMA mapping failed\n", __func__);
4968 + err = -ENOMEM;
4969 + goto out_free;
4970 + }
4971 + err = dpni_set_qos_table(priv->mc_io, 0, priv->mc_token, &qos_cfg);
4972 + dma_unmap_single(dev, qos_cfg.key_cfg_iova,
4973 + DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE);
4974 +
4975 + if (err) {
4976 + dev_err(dev, "dpni_set_qos_table failed: %d\n", err);
4977 + goto out_free;
4978 + }
4979 +
4980 + key_params.key_size = key_size;
4981 +
4982 + if (dpaa2_eth_fs_mask_enabled(priv)) {
4983 + mask = kzalloc(key_size, GFP_KERNEL);
4984 + if (!mask)
4985 + goto out_free;
4986 +
4987 + *mask = cpu_to_be16(VLAN_PRIO_MASK);
4988 +
4989 + key_params.mask_iova = dma_map_single(dev, mask, key_size,
4990 + DMA_TO_DEVICE);
4991 + if (dma_mapping_error(dev, key_params.mask_iova)) {
4992 + dev_err(dev, "DMA mapping failed %s\n", __func__);
4993 + err = -ENOMEM;
4994 + goto out_free_mask;
4995 + }
4996 + } else {
4997 + key_params.mask_iova = 0;
4998 + }
4999 +
5000 + key = kzalloc(key_size, GFP_KERNEL);
5001 + if (!key)
5002 + goto out_cleanup_mask;
5003 +
5004 + key_params.key_iova = dma_map_single(dev, key, key_size,
5005 + DMA_TO_DEVICE);
5006 + if (dma_mapping_error(dev, key_params.key_iova)) {
5007 + dev_err(dev, "%s: DMA mapping failed\n", __func__);
5008 + err = -ENOMEM;
5009 + goto out_free_key;
5010 + }
5011 +
5012 + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
5013 + *key = cpu_to_be16(i << VLAN_PRIO_SHIFT);
5014 +
5015 + dma_sync_single_for_device(dev, key_params.key_iova,
5016 + key_size, DMA_TO_DEVICE);
5017 +
5018 + err = dpni_add_qos_entry(priv->mc_io, 0, priv->mc_token,
5019 + &key_params, i, j++);
5020 + if (err) {
5021 + dev_err(dev, "dpni_add_qos_entry failed: %d\n", err);
5022 + goto out_remove;
5023 + }
5024 + }
5025 +
5026 + priv->vlan_clsf_set = true;
5027 + dev_dbg(dev, "Vlan PCP QoS classification set\n");
5028 + goto out_cleanup;
5029 +
5030 +out_remove:
5031 + for (j = 0; j < i; j++) {
5032 + *key = cpu_to_be16(j << VLAN_PRIO_SHIFT);
5033 +
5034 + dma_sync_single_for_device(dev, key_params.key_iova, key_size,
5035 + DMA_TO_DEVICE);
5036 +
5037 + err = dpni_remove_qos_entry(priv->mc_io, 0, priv->mc_token,
5038 + &key_params);
5039 + if (err)
5040 + dev_err(dev, "dpni_remove_qos_entry failed: %d\n", err);
5041 + }
5042 +
5043 +out_cleanup:
5044 + dma_unmap_single(dev, key_params.key_iova, key_size, DMA_TO_DEVICE);
5045 +out_free_key:
5046 + kfree(key);
5047 +out_cleanup_mask:
5048 + if (key_params.mask_iova)
5049 + dma_unmap_single(dev, key_params.mask_iova, key_size,
5050 + DMA_TO_DEVICE);
5051 +out_free_mask:
5052 + kfree(mask);
5053 +out_free:
5054 + kfree(params_iova);
5055 + return err;
5056 +}
5057 +
5058 +static int dpaa2_eth_dcbnl_ieee_setpfc(struct net_device *net_dev,
5059 + struct ieee_pfc *pfc)
5060 +{
5061 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5062 + struct dpni_congestion_notification_cfg notification_cfg = {0};
5063 + struct dpni_link_state state = {0};
5064 + struct dpni_link_cfg cfg = {0};
5065 + struct ieee_pfc old_pfc;
5066 + int err = 0, i;
5067 +
5068 + if (dpaa2_eth_tc_count(priv) == 1) {
5069 + netdev_dbg(net_dev, "DPNI has 1 TC, PFC configuration N/A\n");
5070 + return 0;
5071 + }
5072 +
5073 + /* Zero out pfc_enabled prios greater than tc_count */
5074 + pfc->pfc_en &= (1 << dpaa2_eth_tc_count(priv)) - 1;
5075 +
5076 + if (priv->pfc.pfc_en == pfc->pfc_en)
5077 + /* Same enabled mask, nothing to be done */
5078 + return 0;
5079 +
5080 + err = set_vlan_qos(priv);
5081 + if (err)
5082 + return err;
5083 +
5084 + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
5085 + if (err) {
5086 + netdev_err(net_dev, "ERROR %d getting link state", err);
5087 + return err;
5088 + }
5089 +
5090 + cfg.rate = state.rate;
5091 + cfg.options = state.options;
5092 + if (pfc->pfc_en)
5093 + cfg.options |= DPNI_LINK_OPT_PFC_PAUSE;
5094 + else
5095 + cfg.options &= ~DPNI_LINK_OPT_PFC_PAUSE;
5096 +
5097 + err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
5098 + if (err) {
5099 + netdev_err(net_dev, "ERROR %d setting link cfg", err);
5100 + return err;
5101 + }
5102 +
5103 + memcpy(&old_pfc, &priv->pfc, sizeof(priv->pfc));
5104 + memcpy(&priv->pfc, pfc, sizeof(priv->pfc));
5105 +
5106 + err = set_rx_taildrop(priv);
5107 + if (err)
5108 + goto out_restore_config;
5109 +
5110 + /* configure congestion notifications */
5111 + notification_cfg.notification_mode = DPNI_CONG_OPT_FLOW_CONTROL;
5112 + notification_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
5113 + notification_cfg.message_iova = 0ULL;
5114 + notification_cfg.message_ctx = 0ULL;
5115 +
5116 + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
5117 + if (dpaa2_eth_is_pfc_enabled(priv, i)) {
5118 + notification_cfg.threshold_entry = NAPI_POLL_WEIGHT;
5119 + notification_cfg.threshold_exit = NAPI_POLL_WEIGHT / 2;
5120 + } else {
5121 + notification_cfg.threshold_entry = 0;
5122 + notification_cfg.threshold_exit = 0;
5123 + }
5124 +
5125 + err = dpni_set_congestion_notification(priv->mc_io, 0,
5126 + priv->mc_token,
5127 + DPNI_QUEUE_RX,
5128 + i, &notification_cfg);
5129 + if (err) {
5130 + netdev_err(net_dev, "Error %d setting congestion notif",
5131 + err);
5132 + goto out_restore_config;
5133 + }
5134 +
5135 + netdev_dbg(net_dev, "%s congestion notifications for tc %d\n",
5136 + (notification_cfg.threshold_entry ?
5137 + "Enabled" : "Disabled"), i);
5138 + }
5139 +
5140 + return 0;
5141 +
5142 +out_restore_config:
5143 + memcpy(&priv->pfc, &old_pfc, sizeof(priv->pfc));
5144 + return err;
5145 +}
5146 +
5147 +static u8 dpaa2_eth_dcbnl_getdcbx(struct net_device *net_dev)
5148 +{
5149 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5150 +
5151 + return priv->dcbx_mode;
5152 +}
5153 +
5154 +static u8 dpaa2_eth_dcbnl_setdcbx(struct net_device *net_dev, u8 mode)
5155 +{
5156 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5157 +
5158 + priv->dcbx_mode = mode;
5159 + return 0;
5160 +}
5161 +
5162 +static u8 dpaa2_eth_dcbnl_getcap(struct net_device *net_dev, int capid, u8 *cap)
5163 +{
5164 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5165 +
5166 + switch (capid) {
5167 + case DCB_CAP_ATTR_PFC:
5168 + *cap = true;
5169 + break;
5170 + case DCB_CAP_ATTR_PFC_TCS:
5171 + /* bitmap where each bit represents a number of traffic
5172 + * classes the device can be configured to use for Priority
5173 + * Flow Control
5174 + */
5175 + *cap = 1 << (dpaa2_eth_tc_count(priv) - 1);
5176 + break;
5177 + case DCB_CAP_ATTR_DCBX:
5178 + *cap = priv->dcbx_mode;
5179 + break;
5180 + default:
5181 + *cap = false;
5182 + break;
5183 + }
5184 +
5185 + return 0;
5186 +}
5187 +
5188 +const struct dcbnl_rtnl_ops dpaa2_eth_dcbnl_ops = {
5189 + .ieee_getpfc = dpaa2_eth_dcbnl_ieee_getpfc,
5190 + .ieee_setpfc = dpaa2_eth_dcbnl_ieee_setpfc,
5191 + .getdcbx = dpaa2_eth_dcbnl_getdcbx,
5192 + .setdcbx = dpaa2_eth_dcbnl_setdcbx,
5193 + .getcap = dpaa2_eth_dcbnl_getcap,
5194 +};
5195 +#endif
5196 +
5197 static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
5198 {
5199 struct device *dev;
5200 @@ -2415,7 +3825,7 @@ static int dpaa2_eth_probe(struct fsl_mc
5201 dev = &dpni_dev->dev;
5202
5203 /* Net device */
5204 - net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES);
5205 + net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_NETDEV_QUEUES);
5206 if (!net_dev) {
5207 dev_err(dev, "alloc_etherdev_mq() failed\n");
5208 return -ENOMEM;
5209 @@ -2433,7 +3843,10 @@ static int dpaa2_eth_probe(struct fsl_mc
5210 err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
5211 &priv->mc_io);
5212 if (err) {
5213 - dev_err(dev, "MC portal allocation failed\n");
5214 + if (err == -ENXIO)
5215 + err = -EPROBE_DEFER;
5216 + else
5217 + dev_err(dev, "MC portal allocation failed\n");
5218 goto err_portal_alloc;
5219 }
5220
5221 @@ -2456,9 +3869,6 @@ static int dpaa2_eth_probe(struct fsl_mc
5222 if (err)
5223 goto err_bind;
5224
5225 - /* Add a NAPI context for each channel */
5226 - add_ch_napi(priv);
5227 -
5228 /* Percpu statistics */
5229 priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
5230 if (!priv->percpu_stats) {
5231 @@ -2491,7 +3901,14 @@ static int dpaa2_eth_probe(struct fsl_mc
5232 if (err)
5233 goto err_alloc_rings;
5234
5235 - net_dev->ethtool_ops = &dpaa2_ethtool_ops;
5236 +#ifdef CONFIG_FSL_DPAA2_ETH_DCB
5237 + net_dev->dcbnl_ops = &dpaa2_eth_dcbnl_ops;
5238 + priv->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
5239 +#endif
5240 +
5241 + /* Add a NAPI context for each channel */
5242 + add_ch_napi(priv);
5243 + enable_ch_napi(priv);
5244
5245 err = setup_irqs(dpni_dev);
5246 if (err) {
5247 @@ -2499,25 +3916,41 @@ static int dpaa2_eth_probe(struct fsl_mc
5248 priv->poll_thread = kthread_run(poll_link_state, priv,
5249 "%s_poll_link", net_dev->name);
5250 if (IS_ERR(priv->poll_thread)) {
5251 - netdev_err(net_dev, "Error starting polling thread\n");
5252 + dev_err(dev, "Error starting polling thread\n");
5253 goto err_poll_thread;
5254 }
5255 priv->do_link_poll = true;
5256 }
5257
5258 + err = register_netdev(net_dev);
5259 + if (err < 0) {
5260 + dev_err(dev, "register_netdev() failed\n");
5261 + goto err_netdev_reg;
5262 + }
5263 +
5264 + dpaa2_eth_sysfs_init(&net_dev->dev);
5265 +#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
5266 + dpaa2_dbg_add(priv);
5267 +#endif
5268 +
5269 dev_info(dev, "Probed interface %s\n", net_dev->name);
5270 return 0;
5271
5272 +err_netdev_reg:
5273 + if (priv->do_link_poll)
5274 + kthread_stop(priv->poll_thread);
5275 + else
5276 + fsl_mc_free_irqs(dpni_dev);
5277 err_poll_thread:
5278 free_rings(priv);
5279 err_alloc_rings:
5280 err_csum:
5281 - unregister_netdev(net_dev);
5282 err_netdev_init:
5283 free_percpu(priv->percpu_extras);
5284 err_alloc_percpu_extras:
5285 free_percpu(priv->percpu_stats);
5286 err_alloc_percpu_stats:
5287 + disable_ch_napi(priv);
5288 del_ch_napi(priv);
5289 err_bind:
5290 free_dpbp(priv);
5291 @@ -2544,8 +3977,15 @@ static int dpaa2_eth_remove(struct fsl_m
5292 net_dev = dev_get_drvdata(dev);
5293 priv = netdev_priv(net_dev);
5294
5295 +#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
5296 + dpaa2_dbg_remove(priv);
5297 +#endif
5298 + dpaa2_eth_sysfs_remove(&net_dev->dev);
5299 +
5300 unregister_netdev(net_dev);
5301 - dev_info(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
5302 +
5303 + disable_ch_napi(priv);
5304 + del_ch_napi(priv);
5305
5306 if (priv->do_link_poll)
5307 kthread_stop(priv->poll_thread);
5308 @@ -2555,17 +3995,16 @@ static int dpaa2_eth_remove(struct fsl_m
5309 free_rings(priv);
5310 free_percpu(priv->percpu_stats);
5311 free_percpu(priv->percpu_extras);
5312 -
5313 - del_ch_napi(priv);
5314 free_dpbp(priv);
5315 free_dpio(priv);
5316 free_dpni(priv);
5317
5318 fsl_mc_portal_free(priv->mc_io);
5319
5320 - dev_set_drvdata(dev, NULL);
5321 free_netdev(net_dev);
5322
5323 + dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
5324 +
5325 return 0;
5326 }
5327
5328 @@ -2588,4 +4027,34 @@ static struct fsl_mc_driver dpaa2_eth_dr
5329 .match_id_table = dpaa2_eth_match_id_table
5330 };
5331
5332 -module_fsl_mc_driver(dpaa2_eth_driver);
5333 +static int __init dpaa2_eth_driver_init(void)
5334 +{
5335 + int err;
5336 +
5337 + dpaa2_eth_dbg_init();
5338 + err = fsl_mc_driver_register(&dpaa2_eth_driver);
5339 + if (err)
5340 + goto out_debugfs_err;
5341 +
5342 + err = dpaa2_ceetm_register();
5343 + if (err)
5344 + goto out_ceetm_err;
5345 +
5346 + return 0;
5347 +
5348 +out_ceetm_err:
5349 + fsl_mc_driver_unregister(&dpaa2_eth_driver);
5350 +out_debugfs_err:
5351 + dpaa2_eth_dbg_exit();
5352 + return err;
5353 +}
5354 +
5355 +static void __exit dpaa2_eth_driver_exit(void)
5356 +{
5357 + dpaa2_ceetm_unregister();
5358 + fsl_mc_driver_unregister(&dpaa2_eth_driver);
5359 + dpaa2_eth_dbg_exit();
5360 +}
5361 +
5362 +module_init(dpaa2_eth_driver_init);
5363 +module_exit(dpaa2_eth_driver_exit);
5364 --- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
5365 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
5366 @@ -1,40 +1,15 @@
5367 +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
5368 /* Copyright 2014-2016 Freescale Semiconductor Inc.
5369 * Copyright 2016 NXP
5370 - *
5371 - * Redistribution and use in source and binary forms, with or without
5372 - * modification, are permitted provided that the following conditions are met:
5373 - * * Redistributions of source code must retain the above copyright
5374 - * notice, this list of conditions and the following disclaimer.
5375 - * * Redistributions in binary form must reproduce the above copyright
5376 - * notice, this list of conditions and the following disclaimer in the
5377 - * documentation and/or other materials provided with the distribution.
5378 - * * Neither the name of Freescale Semiconductor nor the
5379 - * names of its contributors may be used to endorse or promote products
5380 - * derived from this software without specific prior written permission.
5381 - *
5382 - *
5383 - * ALTERNATIVELY, this software may be distributed under the terms of the
5384 - * GNU General Public License ("GPL") as published by the Free Software
5385 - * Foundation, either version 2 of that License or (at your option) any
5386 - * later version.
5387 - *
5388 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
5389 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
5390 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
5391 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
5392 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
5393 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
5394 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
5395 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
5396 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5397 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5398 */
5399
5400 #ifndef __DPAA2_ETH_H
5401 #define __DPAA2_ETH_H
5402
5403 +#include <linux/dcbnl.h>
5404 #include <linux/netdevice.h>
5405 #include <linux/if_vlan.h>
5406 +#include <linux/filter.h>
5407
5408 #include "../../fsl-mc/include/dpaa2-io.h"
5409 #include "../../fsl-mc/include/dpaa2-fd.h"
5410 @@ -44,6 +19,9 @@
5411 #include "dpni-cmd.h"
5412
5413 #include "dpaa2-eth-trace.h"
5414 +#include "dpaa2-eth-debugfs.h"
5415 +
5416 +#define DPAA2_WRIOP_VERSION(x, y, z) ((x) << 10 | (y) << 5 | (z) << 0)
5417
5418 #define DPAA2_ETH_STORE_SIZE 16
5419
5420 @@ -60,43 +38,59 @@
5421 /* Convert L3 MTU to L2 MFL */
5422 #define DPAA2_ETH_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN)
5423
5424 -/* Set the taildrop threshold (in bytes) to allow the enqueue of several jumbo
5425 - * frames in the Rx queues (length of the current frame is not
5426 - * taken into account when making the taildrop decision)
5427 - */
5428 -#define DPAA2_ETH_TAILDROP_THRESH (64 * 1024)
5429 -
5430 -/* Buffer quota per queue. Must be large enough such that for minimum sized
5431 - * frames taildrop kicks in before the bpool gets depleted, so we compute
5432 - * how many 64B frames fit inside the taildrop threshold and add a margin
5433 - * to accommodate the buffer refill delay.
5434 - */
5435 -#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64)
5436 -#define DPAA2_ETH_NUM_BUFS (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256)
5437 -#define DPAA2_ETH_REFILL_THRESH DPAA2_ETH_MAX_FRAMES_PER_QUEUE
5438 +/* Maximum burst size value for Tx shaping */
5439 +#define DPAA2_ETH_MAX_BURST_SIZE 0xF7FF
5440
5441 /* Maximum number of buffers that can be acquired/released through a single
5442 * QBMan command
5443 */
5444 #define DPAA2_ETH_BUFS_PER_CMD 7
5445
5446 -/* Hardware requires alignment for ingress/egress buffer addresses
5447 - * and ingress buffer lengths.
5448 +/* Set the taildrop threshold to 1MB to allow the enqueue of a sufficiently
5449 + * large number of jumbo frames in the Rx queues (length of the current frame
5450 + * is not taken into account when making the taildrop decision)
5451 + */
5452 +#define DPAA2_ETH_TAILDROP_THRESH (1024 * 1024)
5453 +
5454 +/* Maximum number of Tx confirmation frames to be processed
5455 + * in a single NAPI call
5456 + */
5457 +#define DPAA2_ETH_TXCONF_PER_NAPI 256
5458 +
5459 +/* Buffer quota per channel.
5460 + * We want to keep in check number of ingress frames in flight: for small
5461 + * sized frames, buffer pool depletion will kick in first; for large sizes,
5462 + * Rx FQ taildrop threshold will ensure only a reasonable number of frames
5463 + * will be pending at any given time.
5464 */
5465 -#define DPAA2_ETH_RX_BUF_SIZE 2048
5466 +#define DPAA2_ETH_NUM_BUFS_PER_CH 1024
5467 +#define DPAA2_ETH_REFILL_THRESH(priv) \
5468 + ((priv)->max_bufs_per_ch - DPAA2_ETH_BUFS_PER_CMD)
5469 +
5470 +/* Global buffer quota in case flow control is enabled */
5471 +#define DPAA2_ETH_NUM_BUFS_FC 256
5472 +
5473 +/* Hardware requires alignment for ingress/egress buffer addresses */
5474 #define DPAA2_ETH_TX_BUF_ALIGN 64
5475 -#define DPAA2_ETH_RX_BUF_ALIGN 256
5476 -#define DPAA2_ETH_NEEDED_HEADROOM(p_priv) \
5477 - ((p_priv)->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN)
5478 -
5479 -/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but we need to allocate ingress
5480 - * buffers large enough to allow building an skb around them and also account
5481 - * for alignment restrictions
5482 - */
5483 -#define DPAA2_ETH_BUF_RAW_SIZE \
5484 - (DPAA2_ETH_RX_BUF_SIZE + \
5485 - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \
5486 - DPAA2_ETH_RX_BUF_ALIGN)
5487 +
5488 +#define DPAA2_ETH_RX_BUF_RAW_SIZE PAGE_SIZE
5489 +#define DPAA2_ETH_RX_BUF_TAILROOM \
5490 + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
5491 +#define DPAA2_ETH_RX_BUF_SIZE \
5492 + (DPAA2_ETH_RX_BUF_RAW_SIZE - DPAA2_ETH_RX_BUF_TAILROOM)
5493 +
5494 +/* Hardware annotation area in RX/TX buffers */
5495 +#define DPAA2_ETH_RX_HWA_SIZE 64
5496 +#define DPAA2_ETH_TX_HWA_SIZE 128
5497 +
5498 +/* PTP nominal frequency 1GHz */
5499 +#define DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS 1
5500 +
5501 +/* Due to a limitation in WRIOP 1.0.0, the RX buffer data must be aligned
5502 + * to 256B. For newer revisions, the requirement is only for 64B alignment
5503 + */
5504 +#define DPAA2_ETH_RX_BUF_ALIGN_REV1 256
5505 +#define DPAA2_ETH_RX_BUF_ALIGN 64
5506
5507 /* We are accommodating a skb backpointer and some S/G info
5508 * in the frame's software annotation. The hardware
5509 @@ -104,12 +98,32 @@
5510 */
5511 #define DPAA2_ETH_SWA_SIZE 64
5512
5513 +/* We store different information in the software annotation area of a Tx frame
5514 + * based on what type of frame it is
5515 + */
5516 +enum dpaa2_eth_swa_type {
5517 + DPAA2_ETH_SWA_SINGLE,
5518 + DPAA2_ETH_SWA_SG,
5519 + DPAA2_ETH_SWA_XDP,
5520 +};
5521 +
5522 /* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */
5523 struct dpaa2_eth_swa {
5524 - struct sk_buff *skb;
5525 - struct scatterlist *scl;
5526 - int num_sg;
5527 - int num_dma_bufs;
5528 + enum dpaa2_eth_swa_type type;
5529 + union {
5530 + struct {
5531 + struct sk_buff *skb;
5532 + } single;
5533 + struct {
5534 + struct sk_buff *skb;
5535 + struct scatterlist *scl;
5536 + int num_sg;
5537 + int sgt_size;
5538 + } sg;
5539 + struct {
5540 + int dma_size;
5541 + } xdp;
5542 + };
5543 };
5544
5545 /* Annotation valid bits in FD FRC */
5546 @@ -121,22 +135,14 @@ struct dpaa2_eth_swa {
5547 #define DPAA2_FD_FRC_FAICFDV 0x0400
5548
5549 /* Error bits in FD CTRL */
5550 -#define DPAA2_FD_CTRL_UFD 0x00000004
5551 -#define DPAA2_FD_CTRL_SBE 0x00000008
5552 -#define DPAA2_FD_CTRL_FSE 0x00000020
5553 -#define DPAA2_FD_CTRL_FAERR 0x00000040
5554 -
5555 -#define DPAA2_FD_RX_ERR_MASK (DPAA2_FD_CTRL_SBE | \
5556 - DPAA2_FD_CTRL_FAERR)
5557 -#define DPAA2_FD_TX_ERR_MASK (DPAA2_FD_CTRL_UFD | \
5558 - DPAA2_FD_CTRL_SBE | \
5559 - DPAA2_FD_CTRL_FSE | \
5560 - DPAA2_FD_CTRL_FAERR)
5561 +#define DPAA2_FD_RX_ERR_MASK (FD_CTRL_SBE | FD_CTRL_FAERR)
5562 +#define DPAA2_FD_TX_ERR_MASK (FD_CTRL_UFD | \
5563 + FD_CTRL_SBE | \
5564 + FD_CTRL_FSE | \
5565 + FD_CTRL_FAERR)
5566
5567 /* Annotation bits in FD CTRL */
5568 -#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128 */
5569 -#define DPAA2_FD_CTRL_PTA 0x00800000
5570 -#define DPAA2_FD_CTRL_PTV1 0x00400000
5571 +#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128B */
5572
5573 /* Frame annotation status */
5574 struct dpaa2_fas {
5575 @@ -144,7 +150,7 @@ struct dpaa2_fas {
5576 u8 ppid;
5577 __le16 ifpid;
5578 __le32 status;
5579 -} __packed;
5580 +};
5581
5582 /* Frame annotation status word is located in the first 8 bytes
5583 * of the buffer's hardware annoatation area
5584 @@ -152,11 +158,45 @@ struct dpaa2_fas {
5585 #define DPAA2_FAS_OFFSET 0
5586 #define DPAA2_FAS_SIZE (sizeof(struct dpaa2_fas))
5587
5588 +/* Timestamp is located in the next 8 bytes of the buffer's
5589 + * hardware annotation area
5590 + */
5591 +#define DPAA2_TS_OFFSET 0x8
5592 +
5593 +/* Frame annotation egress action descriptor */
5594 +#define DPAA2_FAEAD_OFFSET 0x58
5595 +
5596 +struct dpaa2_faead {
5597 + __le32 conf_fqid;
5598 + __le32 ctrl;
5599 +};
5600 +
5601 +#define DPAA2_FAEAD_A2V 0x20000000
5602 +#define DPAA2_FAEAD_A4V 0x08000000
5603 +#define DPAA2_FAEAD_UPDV 0x00001000
5604 +#define DPAA2_FAEAD_EBDDV 0x00002000
5605 +#define DPAA2_FAEAD_UPD 0x00000010
5606 +
5607 /* Accessors for the hardware annotation fields that we use */
5608 -#define dpaa2_get_hwa(buf_addr) \
5609 - ((void *)(buf_addr) + DPAA2_ETH_SWA_SIZE)
5610 -#define dpaa2_get_fas(buf_addr) \
5611 - (struct dpaa2_fas *)(dpaa2_get_hwa(buf_addr) + DPAA2_FAS_OFFSET)
5612 +static inline void *dpaa2_get_hwa(void *buf_addr, bool swa)
5613 +{
5614 + return buf_addr + (swa ? DPAA2_ETH_SWA_SIZE : 0);
5615 +}
5616 +
5617 +static inline struct dpaa2_fas *dpaa2_get_fas(void *buf_addr, bool swa)
5618 +{
5619 + return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAS_OFFSET;
5620 +}
5621 +
5622 +static inline __le64 *dpaa2_get_ts(void *buf_addr, bool swa)
5623 +{
5624 + return dpaa2_get_hwa(buf_addr, swa) + DPAA2_TS_OFFSET;
5625 +}
5626 +
5627 +static inline struct dpaa2_faead *dpaa2_get_faead(void *buf_addr, bool swa)
5628 +{
5629 + return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAEAD_OFFSET;
5630 +}
5631
5632 /* Error and status bits in the frame annotation status word */
5633 /* Debug frame, otherwise supposed to be discarded */
5634 @@ -203,11 +243,6 @@ struct dpaa2_fas {
5635 DPAA2_FAS_BLE | \
5636 DPAA2_FAS_L3CE | \
5637 DPAA2_FAS_L4CE)
5638 -/* Tx errors */
5639 -#define DPAA2_FAS_TX_ERR_MASK (DPAA2_FAS_KSE | \
5640 - DPAA2_FAS_EOFHE | \
5641 - DPAA2_FAS_MNLE | \
5642 - DPAA2_FAS_TIDE)
5643
5644 /* Time in milliseconds between link state updates */
5645 #define DPAA2_ETH_LINK_STATE_REFRESH 1000
5646 @@ -226,6 +261,7 @@ struct dpaa2_eth_drv_stats {
5647 __u64 tx_conf_bytes;
5648 __u64 tx_sg_frames;
5649 __u64 tx_sg_bytes;
5650 + __u64 tx_reallocs;
5651 __u64 rx_sg_frames;
5652 __u64 rx_sg_bytes;
5653 /* Enqueues retried due to portal busy */
5654 @@ -250,17 +286,23 @@ struct dpaa2_eth_ch_stats {
5655 __u64 pull_err;
5656 };
5657
5658 +#define DPAA2_ETH_MAX_TCS 8
5659 +
5660 /* Maximum number of queues associated with a DPNI */
5661 -#define DPAA2_ETH_MAX_RX_QUEUES 16
5662 -#define DPAA2_ETH_MAX_TX_QUEUES NR_CPUS
5663 +#define DPAA2_ETH_MAX_RX_QUEUES (DPNI_MAX_DIST_SIZE * DPAA2_ETH_MAX_TCS)
5664 +#define DPAA2_ETH_MAX_TX_QUEUES DPNI_MAX_SENDERS
5665 +#define DPAA2_ETH_MAX_RX_ERR_QUEUES 1
5666 #define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \
5667 - DPAA2_ETH_MAX_TX_QUEUES)
5668 + DPAA2_ETH_MAX_TX_QUEUES + \
5669 + DPAA2_ETH_MAX_RX_ERR_QUEUES)
5670 +#define DPAA2_ETH_MAX_NETDEV_QUEUES (DPNI_MAX_DIST_SIZE * DPAA2_ETH_MAX_TCS)
5671
5672 -#define DPAA2_ETH_MAX_DPCONS NR_CPUS
5673 +#define DPAA2_ETH_MAX_DPCONS 16
5674
5675 enum dpaa2_eth_fq_type {
5676 DPAA2_RX_FQ = 0,
5677 DPAA2_TX_CONF_FQ,
5678 + DPAA2_RX_ERR_FQ
5679 };
5680
5681 struct dpaa2_eth_priv;
5682 @@ -268,15 +310,19 @@ struct dpaa2_eth_priv;
5683 struct dpaa2_eth_fq {
5684 u32 fqid;
5685 u32 tx_qdbin;
5686 + u32 tx_fqid;
5687 u16 flowid;
5688 + u8 tc;
5689 int target_cpu;
5690 + u32 dq_frames;
5691 + u32 dq_bytes;
5692 struct dpaa2_eth_channel *channel;
5693 enum dpaa2_eth_fq_type type;
5694
5695 - void (*consume)(struct dpaa2_eth_priv *,
5696 - struct dpaa2_eth_channel *,
5697 - const struct dpaa2_fd *,
5698 - struct napi_struct *);
5699 + void (*consume)(struct dpaa2_eth_priv *priv,
5700 + struct dpaa2_eth_channel *ch,
5701 + const struct dpaa2_fd *fd,
5702 + struct dpaa2_eth_fq *fq);
5703 struct dpaa2_eth_fq_stats stats;
5704 };
5705
5706 @@ -285,19 +331,29 @@ struct dpaa2_eth_channel {
5707 struct fsl_mc_device *dpcon;
5708 int dpcon_id;
5709 int ch_id;
5710 - int dpio_id;
5711 struct napi_struct napi;
5712 + struct dpaa2_io *dpio;
5713 struct dpaa2_io_store *store;
5714 struct dpaa2_eth_priv *priv;
5715 int buf_count;
5716 struct dpaa2_eth_ch_stats stats;
5717 + struct bpf_prog *xdp_prog;
5718 + u64 rel_buf_array[DPAA2_ETH_BUFS_PER_CMD];
5719 + u8 rel_buf_cnt;
5720 + bool flush;
5721 };
5722
5723 -struct dpaa2_eth_hash_fields {
5724 +struct dpaa2_eth_dist_fields {
5725 u64 rxnfc_field;
5726 enum net_prot cls_prot;
5727 int cls_field;
5728 int size;
5729 + u64 id;
5730 +};
5731 +
5732 +struct dpaa2_eth_cls_rule {
5733 + struct ethtool_rx_flow_spec fs;
5734 + u8 in_use;
5735 };
5736
5737 /* Driver private data */
5738 @@ -306,17 +362,29 @@ struct dpaa2_eth_priv {
5739
5740 u8 num_fqs;
5741 struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES];
5742 + int (*enqueue)(struct dpaa2_eth_priv *priv,
5743 + struct dpaa2_eth_fq *fq,
5744 + struct dpaa2_fd *fd, u8 prio);
5745
5746 u8 num_channels;
5747 struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS];
5748 + int max_bufs_per_ch;
5749 + int refill_thresh;
5750 +
5751 + bool has_xdp_prog;
5752
5753 struct dpni_attr dpni_attrs;
5754 + u16 dpni_ver_major;
5755 + u16 dpni_ver_minor;
5756 u16 tx_data_offset;
5757
5758 struct fsl_mc_device *dpbp_dev;
5759 u16 bpid;
5760 struct iommu_domain *iommu_domain;
5761
5762 + bool ts_tx_en; /* Tx timestamping enabled */
5763 + bool ts_rx_en; /* Rx timestamping enabled */
5764 +
5765 u16 tx_qdid;
5766 struct fsl_mc_io *mc_io;
5767 /* Cores which have an affine DPIO/DPCON.
5768 @@ -337,13 +405,30 @@ struct dpaa2_eth_priv {
5769
5770 /* enabled ethtool hashing bits */
5771 u64 rx_hash_fields;
5772 + u64 rx_cls_fields;
5773 + struct dpaa2_eth_cls_rule *cls_rule;
5774 + u8 rx_cls_enabled;
5775 +#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
5776 + struct dpaa2_debugfs dbg;
5777 +#endif
5778 + struct dpni_tx_shaping_cfg shaping_cfg;
5779 +
5780 + u8 dcbx_mode;
5781 + struct ieee_pfc pfc;
5782 + bool vlan_clsf_set;
5783 + bool tx_pause_frames;
5784 +
5785 + bool ceetm_en;
5786 };
5787
5788 -/* default Rx hash options, set during probing */
5789 #define DPAA2_RXH_SUPPORTED (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \
5790 | RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 \
5791 | RXH_L4_B_2_3)
5792
5793 +/* default Rx hash options, set during probing */
5794 +#define DPAA2_RXH_DEFAULT (RXH_L3_PROTO | RXH_IP_SRC | RXH_IP_DST | \
5795 + RXH_L4_B_0_1 | RXH_L4_B_2_3)
5796 +
5797 #define dpaa2_eth_hash_enabled(priv) \
5798 ((priv)->dpni_attrs.num_queues > 1)
5799
5800 @@ -352,10 +437,127 @@ struct dpaa2_eth_priv {
5801
5802 extern const struct ethtool_ops dpaa2_ethtool_ops;
5803 extern const char dpaa2_eth_drv_version[];
5804 +extern int dpaa2_phc_index;
5805 +
5806 +static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv,
5807 + u16 ver_major, u16 ver_minor)
5808 +{
5809 + if (priv->dpni_ver_major == ver_major)
5810 + return priv->dpni_ver_minor - ver_minor;
5811 + return priv->dpni_ver_major - ver_major;
5812 +}
5813 +
5814 +/* Minimum firmware version that supports a more flexible API
5815 + * for configuring the Rx flow hash key
5816 + */
5817 +#define DPNI_RX_DIST_KEY_VER_MAJOR 7
5818 +#define DPNI_RX_DIST_KEY_VER_MINOR 5
5819 +
5820 +#define dpaa2_eth_has_legacy_dist(priv) \
5821 + (dpaa2_eth_cmp_dpni_ver((priv), DPNI_RX_DIST_KEY_VER_MAJOR, \
5822 + DPNI_RX_DIST_KEY_VER_MINOR) < 0)
5823 +
5824 +#define dpaa2_eth_fs_enabled(priv) \
5825 + (!((priv)->dpni_attrs.options & DPNI_OPT_NO_FS))
5826 +
5827 +#define dpaa2_eth_fs_mask_enabled(priv) \
5828 + ((priv)->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)
5829 +
5830 +#define dpaa2_eth_fs_count(priv) \
5831 + ((priv)->dpni_attrs.fs_entries)
5832 +
5833 +#define dpaa2_eth_queue_count(priv) \
5834 + ((priv)->num_channels)
5835 +
5836 +#define dpaa2_eth_tc_count(priv) \
5837 + ((priv)->dpni_attrs.num_tcs)
5838 +
5839 +enum dpaa2_eth_rx_dist {
5840 + DPAA2_ETH_RX_DIST_HASH,
5841 + DPAA2_ETH_RX_DIST_CLS
5842 +};
5843 +
5844 +/* Unique IDs for the supported Rx classification header fields */
5845 +#define DPAA2_ETH_DIST_ETHDST BIT(0)
5846 +#define DPAA2_ETH_DIST_ETHSRC BIT(1)
5847 +#define DPAA2_ETH_DIST_ETHTYPE BIT(2)
5848 +#define DPAA2_ETH_DIST_VLAN BIT(3)
5849 +#define DPAA2_ETH_DIST_IPSRC BIT(4)
5850 +#define DPAA2_ETH_DIST_IPDST BIT(5)
5851 +#define DPAA2_ETH_DIST_IPPROTO BIT(6)
5852 +#define DPAA2_ETH_DIST_L4SRC BIT(7)
5853 +#define DPAA2_ETH_DIST_L4DST BIT(8)
5854 +#define DPAA2_ETH_DIST_ALL (~0U)
5855 +
5856 +static inline
5857 +unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv,
5858 + struct sk_buff *skb)
5859 +{
5860 + unsigned int headroom = DPAA2_ETH_SWA_SIZE;
5861 +
5862 + /* If we don't have an skb (e.g. XDP buffer), we only need space for
5863 + * the software annotation area
5864 + */
5865 + if (!skb)
5866 + return headroom;
5867
5868 -static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv)
5869 + /* For non-linear skbs we have no headroom requirement, as we build a
5870 + * SG frame with a newly allocated SGT buffer
5871 + */
5872 + if (skb_is_nonlinear(skb))
5873 + return 0;
5874 +
5875 + /* If we have Tx timestamping, need 128B hardware annotation */
5876 + if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
5877 + headroom += DPAA2_ETH_TX_HWA_SIZE;
5878 +
5879 + return headroom;
5880 +}
5881 +
5882 +/* Extra headroom space requested to hardware, in order to make sure there's
5883 + * no realloc'ing in forwarding scenarios
5884 + */
5885 +static inline unsigned int dpaa2_eth_rx_headroom(struct dpaa2_eth_priv *priv)
5886 +{
5887 + return priv->tx_data_offset - DPAA2_ETH_RX_HWA_SIZE;
5888 +}
5889 +
5890 +static inline bool dpaa2_eth_is_pfc_enabled(struct dpaa2_eth_priv *priv,
5891 + int traffic_class)
5892 +{
5893 + return priv->pfc.pfc_en & (1 << traffic_class);
5894 +}
5895 +
5896 +enum dpaa2_eth_td_cfg {
5897 + DPAA2_ETH_TD_NONE,
5898 + DPAA2_ETH_TD_QUEUE,
5899 + DPAA2_ETH_TD_GROUP
5900 +};
5901 +
5902 +static inline enum dpaa2_eth_td_cfg
5903 +dpaa2_eth_get_td_type(struct dpaa2_eth_priv *priv)
5904 +{
5905 + bool pfc_enabled = !!(priv->pfc.pfc_en);
5906 +
5907 + if (pfc_enabled)
5908 + return DPAA2_ETH_TD_GROUP;
5909 + else if (priv->tx_pause_frames)
5910 + return DPAA2_ETH_TD_NONE;
5911 + else
5912 + return DPAA2_ETH_TD_QUEUE;
5913 +}
5914 +
5915 +static inline int dpaa2_eth_ch_count(struct dpaa2_eth_priv *priv)
5916 {
5917 - return priv->dpni_attrs.num_queues;
5918 + return 1;
5919 }
5920
5921 +int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags);
5922 +int dpaa2_eth_set_cls(struct net_device *net_dev, u64 key);
5923 +int dpaa2_eth_cls_key_size(u64 key);
5924 +int dpaa2_eth_cls_fld_off(int prot, int field);
5925 +void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields);
5926 +
5927 +int set_rx_taildrop(struct dpaa2_eth_priv *priv);
5928 +
5929 #endif /* __DPAA2_H */
5930 --- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
5931 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
5932 @@ -1,35 +1,10 @@
5933 +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
5934 /* Copyright 2014-2016 Freescale Semiconductor Inc.
5935 - * Copyright 2016 NXP
5936 - *
5937 - * Redistribution and use in source and binary forms, with or without
5938 - * modification, are permitted provided that the following conditions are met:
5939 - * * Redistributions of source code must retain the above copyright
5940 - * notice, this list of conditions and the following disclaimer.
5941 - * * Redistributions in binary form must reproduce the above copyright
5942 - * notice, this list of conditions and the following disclaimer in the
5943 - * documentation and/or other materials provided with the distribution.
5944 - * * Neither the name of Freescale Semiconductor nor the
5945 - * names of its contributors may be used to endorse or promote products
5946 - * derived from this software without specific prior written permission.
5947 - *
5948 - *
5949 - * ALTERNATIVELY, this software may be distributed under the terms of the
5950 - * GNU General Public License ("GPL") as published by the Free Software
5951 - * Foundation, either version 2 of that License or (at your option) any
5952 - * later version.
5953 - *
5954 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
5955 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
5956 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
5957 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
5958 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
5959 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
5960 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
5961 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
5962 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5963 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5964 + * Copyright 2016-2017 NXP
5965 */
5966
5967 +#include <linux/net_tstamp.h>
5968 +
5969 #include "dpni.h" /* DPNI_LINK_OPT_* */
5970 #include "dpaa2-eth.h"
5971
5972 @@ -52,6 +27,10 @@ static char dpaa2_ethtool_stats[][ETH_GS
5973 "[hw] rx nobuffer discards",
5974 "[hw] tx discarded frames",
5975 "[hw] tx confirmed frames",
5976 + "[hw] tx dequeued bytes",
5977 + "[hw] tx dequeued frames",
5978 + "[hw] tx rejected bytes",
5979 + "[hw] tx rejected frames",
5980 };
5981
5982 #define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats)
5983 @@ -62,6 +41,7 @@ static char dpaa2_ethtool_extras[][ETH_G
5984 "[drv] tx conf bytes",
5985 "[drv] tx sg frames",
5986 "[drv] tx sg bytes",
5987 + "[drv] tx realloc frames",
5988 "[drv] rx sg frames",
5989 "[drv] rx sg bytes",
5990 "[drv] enqueue portal busy",
5991 @@ -69,6 +49,12 @@ static char dpaa2_ethtool_extras[][ETH_G
5992 "[drv] dequeue portal busy",
5993 "[drv] channel pull errors",
5994 "[drv] cdan",
5995 + /* FQ stats */
5996 + "rx pending frames",
5997 + "rx pending bytes",
5998 + "tx conf pending frames",
5999 + "tx conf pending bytes",
6000 + "buffer count"
6001 };
6002
6003 #define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras)
6004 @@ -76,14 +62,55 @@ static char dpaa2_ethtool_extras[][ETH_G
6005 static void dpaa2_eth_get_drvinfo(struct net_device *net_dev,
6006 struct ethtool_drvinfo *drvinfo)
6007 {
6008 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
6009 +
6010 strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
6011 - strlcpy(drvinfo->version, dpaa2_eth_drv_version,
6012 - sizeof(drvinfo->version));
6013 - strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
6014 +
6015 + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
6016 + "%u.%u", priv->dpni_ver_major, priv->dpni_ver_minor);
6017 +
6018 strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
6019 sizeof(drvinfo->bus_info));
6020 }
6021
6022 +#define DPNI_LINK_AUTONEG_VER_MAJOR 7
6023 +#define DPNI_LINK_AUTONEG_VER_MINOR 8
6024 +
6025 +struct dpaa2_eth_link_mode_map {
6026 + u64 dpni_lm;
6027 + u64 ethtool_lm;
6028 +};
6029 +
6030 +static const struct dpaa2_eth_link_mode_map dpaa2_eth_lm_map[] = {
6031 + {DPNI_ADVERTISED_10BASET_FULL, ETHTOOL_LINK_MODE_10baseT_Full_BIT},
6032 + {DPNI_ADVERTISED_100BASET_FULL, ETHTOOL_LINK_MODE_100baseT_Full_BIT},
6033 + {DPNI_ADVERTISED_1000BASET_FULL, ETHTOOL_LINK_MODE_1000baseT_Full_BIT},
6034 + {DPNI_ADVERTISED_10000BASET_FULL, ETHTOOL_LINK_MODE_10000baseT_Full_BIT},
6035 + {DPNI_ADVERTISED_2500BASEX_FULL, ETHTOOL_LINK_MODE_2500baseX_Full_BIT},
6036 + {DPNI_ADVERTISED_AUTONEG, ETHTOOL_LINK_MODE_Autoneg_BIT},
6037 +};
6038 +
6039 +static void link_mode_dpni2ethtool(u64 dpni_lm, unsigned long *ethtool_lm)
6040 +{
6041 + int i;
6042 +
6043 + for (i = 0; i < ARRAY_SIZE(dpaa2_eth_lm_map); i++) {
6044 + if (dpni_lm & dpaa2_eth_lm_map[i].dpni_lm)
6045 + __set_bit(dpaa2_eth_lm_map[i].ethtool_lm, ethtool_lm);
6046 + }
6047 +}
6048 +
6049 +static void link_mode_ethtool2dpni(const unsigned long *ethtool_lm,
6050 + u64 *dpni_lm)
6051 +{
6052 + int i;
6053 +
6054 + for (i = 0; i < ARRAY_SIZE(dpaa2_eth_lm_map); i++) {
6055 + if (test_bit(dpaa2_eth_lm_map[i].ethtool_lm, ethtool_lm))
6056 + *dpni_lm |= dpaa2_eth_lm_map[i].dpni_lm;
6057 + }
6058 +}
6059 +
6060 static int
6061 dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
6062 struct ethtool_link_ksettings *link_settings)
6063 @@ -92,17 +119,27 @@ dpaa2_eth_get_link_ksettings(struct net_
6064 int err = 0;
6065 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
6066
6067 - err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
6068 - if (err) {
6069 - netdev_err(net_dev, "ERROR %d getting link state\n", err);
6070 - goto out;
6071 + if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_LINK_AUTONEG_VER_MAJOR,
6072 + DPNI_LINK_AUTONEG_VER_MINOR) < 0) {
6073 + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token,
6074 + &state);
6075 + if (err) {
6076 + netdev_err(net_dev, "dpni_get_link_state failed\n");
6077 + goto out;
6078 + }
6079 + } else {
6080 + err = dpni_get_link_state_v2(priv->mc_io, 0, priv->mc_token,
6081 + &state);
6082 + if (err) {
6083 + netdev_err(net_dev, "dpni_get_link_state_v2 failed\n");
6084 + goto out;
6085 + }
6086 + link_mode_dpni2ethtool(state.supported,
6087 + link_settings->link_modes.supported);
6088 + link_mode_dpni2ethtool(state.advertising,
6089 + link_settings->link_modes.advertising);
6090 }
6091
6092 - /* At the moment, we have no way of interrogating the DPMAC
6093 - * from the DPNI side - and for that matter there may exist
6094 - * no DPMAC at all. So for now we just don't report anything
6095 - * beyond the DPNI attributes.
6096 - */
6097 if (state.options & DPNI_LINK_OPT_AUTONEG)
6098 link_settings->base.autoneg = AUTONEG_ENABLE;
6099 if (!(state.options & DPNI_LINK_OPT_HALF_DUPLEX))
6100 @@ -113,25 +150,37 @@ out:
6101 return err;
6102 }
6103
6104 +#define DPNI_DYNAMIC_LINK_SET_VER_MAJOR 7
6105 +#define DPNI_DYNAMIC_LINK_SET_VER_MINOR 1
6106 static int
6107 dpaa2_eth_set_link_ksettings(struct net_device *net_dev,
6108 const struct ethtool_link_ksettings *link_settings)
6109 {
6110 - struct dpni_link_cfg cfg = {0};
6111 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
6112 + struct dpni_link_state state = {0};
6113 + struct dpni_link_cfg cfg = {0};
6114 int err = 0;
6115
6116 - netdev_dbg(net_dev, "Setting link parameters...");
6117 + /* If using an older MC version, the DPNI must be down
6118 + * in order to be able to change link settings. Taking steps to let
6119 + * the user know that.
6120 + */
6121 + if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_DYNAMIC_LINK_SET_VER_MAJOR,
6122 + DPNI_DYNAMIC_LINK_SET_VER_MINOR) < 0) {
6123 + if (netif_running(net_dev)) {
6124 + netdev_info(net_dev, "Interface must be brought down first.\n");
6125 + return -EACCES;
6126 + }
6127 + }
6128
6129 - /* Due to a temporary MC limitation, the DPNI must be down
6130 - * in order to be able to change link settings. Taking steps to let
6131 - * the user know that.
6132 - */
6133 - if (netif_running(net_dev)) {
6134 - netdev_info(net_dev, "Sorry, interface must be brought down first.\n");
6135 - return -EACCES;
6136 + /* Need to interrogate link state to get flow control params */
6137 + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
6138 + if (err) {
6139 + netdev_err(net_dev, "Error getting link state\n");
6140 + goto out;
6141 }
6142
6143 + cfg.options = state.options;
6144 cfg.rate = link_settings->base.speed;
6145 if (link_settings->base.autoneg == AUTONEG_ENABLE)
6146 cfg.options |= DPNI_LINK_OPT_AUTONEG;
6147 @@ -142,13 +191,92 @@ dpaa2_eth_set_link_ksettings(struct net_
6148 else
6149 cfg.options &= ~DPNI_LINK_OPT_HALF_DUPLEX;
6150
6151 + if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_LINK_AUTONEG_VER_MAJOR,
6152 + DPNI_LINK_AUTONEG_VER_MINOR)) {
6153 + err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
6154 + } else {
6155 + link_mode_ethtool2dpni(link_settings->link_modes.advertising,
6156 + &cfg.advertising);
6157 + dpni_set_link_cfg_v2(priv->mc_io, 0, priv->mc_token, &cfg);
6158 + }
6159 + if (err)
6160 + netdev_err(net_dev, "dpni_set_link_cfg failed");
6161 +
6162 +out:
6163 + return err;
6164 +}
6165 +
6166 +static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
6167 + struct ethtool_pauseparam *pause)
6168 +{
6169 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
6170 + struct dpni_link_state state = {0};
6171 + int err;
6172 +
6173 + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
6174 + if (err)
6175 + netdev_dbg(net_dev, "Error getting link state\n");
6176 +
6177 + /* Report general port autonegotiation status */
6178 + pause->autoneg = !!(state.options & DPNI_LINK_OPT_AUTONEG);
6179 + pause->rx_pause = !!(state.options & DPNI_LINK_OPT_PAUSE);
6180 + pause->tx_pause = pause->rx_pause ^
6181 + !!(state.options & DPNI_LINK_OPT_ASYM_PAUSE);
6182 +}
6183 +
6184 +static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
6185 + struct ethtool_pauseparam *pause)
6186 +{
6187 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
6188 + struct dpni_link_state state = {0};
6189 + struct dpni_link_cfg cfg = {0};
6190 + u32 current_tx_pause;
6191 + int err = 0;
6192 +
6193 + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
6194 + if (err) {
6195 + netdev_dbg(net_dev, "Error getting link state\n");
6196 + goto out;
6197 + }
6198 +
6199 + cfg.rate = state.rate;
6200 + cfg.options = state.options;
6201 + current_tx_pause = !!(cfg.options & DPNI_LINK_OPT_PAUSE) ^
6202 + !!(cfg.options & DPNI_LINK_OPT_ASYM_PAUSE);
6203 +
6204 + /* We don't support changing pause frame autonegotiation separately
6205 + * from general port autoneg
6206 + */
6207 + if (pause->autoneg != !!(state.options & DPNI_LINK_OPT_AUTONEG))
6208 + netdev_warn(net_dev,
6209 + "Cannot change pause frame autoneg separately\n");
6210 +
6211 + if (pause->rx_pause)
6212 + cfg.options |= DPNI_LINK_OPT_PAUSE;
6213 + else
6214 + cfg.options &= ~DPNI_LINK_OPT_PAUSE;
6215 +
6216 + if (pause->rx_pause ^ pause->tx_pause)
6217 + cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
6218 + else
6219 + cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
6220 +
6221 err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
6222 + if (err) {
6223 + netdev_dbg(net_dev, "Error setting link\n");
6224 + goto out;
6225 + }
6226 +
6227 + /* Enable/disable Rx FQ taildrop if Tx pause frames have changed */
6228 + if (current_tx_pause == pause->tx_pause)
6229 + goto out;
6230 +
6231 + priv->tx_pause_frames = pause->tx_pause;
6232 + err = set_rx_taildrop(priv);
6233 if (err)
6234 - /* ethtool will be loud enough if we return an error; no point
6235 - * in putting our own error message on the console by default
6236 - */
6237 - netdev_dbg(net_dev, "ERROR %d setting link cfg\n", err);
6238 + netdev_dbg(net_dev, "Error configuring taildrop\n");
6239
6240 +out:
6241 return err;
6242 }
6243
6244 @@ -192,6 +320,10 @@ static void dpaa2_eth_get_ethtool_stats(
6245 int j, k, err;
6246 int num_cnt;
6247 union dpni_statistics dpni_stats;
6248 + u32 fcnt, bcnt;
6249 + u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
6250 + u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
6251 + u32 buf_cnt;
6252 u64 cdan = 0;
6253 u64 portal_busy = 0, pull_err = 0;
6254 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
6255 @@ -202,9 +334,9 @@ static void dpaa2_eth_get_ethtool_stats(
6256 sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS));
6257
6258 /* Print standard counters, from DPNI statistics */
6259 - for (j = 0; j <= 2; j++) {
6260 + for (j = 0; j <= 3; j++) {
6261 err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token,
6262 - j, &dpni_stats);
6263 + j, 0, &dpni_stats);
6264 if (err != 0)
6265 netdev_warn(net_dev, "dpni_get_stats(%d) failed\n", j);
6266 switch (j) {
6267 @@ -217,6 +349,9 @@ static void dpaa2_eth_get_ethtool_stats(
6268 case 2:
6269 num_cnt = sizeof(dpni_stats.page_2) / sizeof(u64);
6270 break;
6271 + case 3:
6272 + num_cnt = sizeof(dpni_stats.page_3) / sizeof(u64);
6273 + break;
6274 }
6275 for (k = 0; k < num_cnt; k++)
6276 *(data + i++) = dpni_stats.raw.counter[k];
6277 @@ -240,12 +375,410 @@ static void dpaa2_eth_get_ethtool_stats(
6278 *(data + i++) = portal_busy;
6279 *(data + i++) = pull_err;
6280 *(data + i++) = cdan;
6281 +
6282 + for (j = 0; j < priv->num_fqs; j++) {
6283 + /* Print FQ instantaneous counts */
6284 + err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid,
6285 + &fcnt, &bcnt);
6286 + if (err) {
6287 + netdev_warn(net_dev, "FQ query error %d", err);
6288 + return;
6289 + }
6290 +
6291 + if (priv->fq[j].type == DPAA2_TX_CONF_FQ) {
6292 + fcnt_tx_total += fcnt;
6293 + bcnt_tx_total += bcnt;
6294 + } else {
6295 + fcnt_rx_total += fcnt;
6296 + bcnt_rx_total += bcnt;
6297 + }
6298 + }
6299 +
6300 + *(data + i++) = fcnt_rx_total;
6301 + *(data + i++) = bcnt_rx_total;
6302 + *(data + i++) = fcnt_tx_total;
6303 + *(data + i++) = bcnt_tx_total;
6304 +
6305 + err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt);
6306 + if (err) {
6307 + netdev_warn(net_dev, "Buffer count query error %d\n", err);
6308 + return;
6309 + }
6310 + *(data + i++) = buf_cnt;
6311 +}
6312 +
6313 +static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
6314 + void *key, void *mask, u64 *fields)
6315 +{
6316 + int off;
6317 +
6318 + if (eth_mask->h_proto) {
6319 + off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
6320 + *(__be16 *)(key + off) = eth_value->h_proto;
6321 + *(__be16 *)(mask + off) = eth_mask->h_proto;
6322 + *fields |= DPAA2_ETH_DIST_ETHTYPE;
6323 + }
6324 +
6325 + if (!is_zero_ether_addr(eth_mask->h_source)) {
6326 + off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_SA);
6327 + ether_addr_copy(key + off, eth_value->h_source);
6328 + ether_addr_copy(mask + off, eth_mask->h_source);
6329 + *fields |= DPAA2_ETH_DIST_ETHSRC;
6330 + }
6331 +
6332 + if (!is_zero_ether_addr(eth_mask->h_dest)) {
6333 + off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
6334 + ether_addr_copy(key + off, eth_value->h_dest);
6335 + ether_addr_copy(mask + off, eth_mask->h_dest);
6336 + *fields |= DPAA2_ETH_DIST_ETHDST;
6337 + }
6338 +
6339 + return 0;
6340 +}
6341 +
6342 +static int prep_user_ip_rule(struct ethtool_usrip4_spec *uip_value,
6343 + struct ethtool_usrip4_spec *uip_mask,
6344 + void *key, void *mask, u64 *fields)
6345 +{
6346 + int off;
6347 + u32 tmp_value, tmp_mask;
6348 +
6349 + if (uip_mask->tos || uip_mask->ip_ver)
6350 + return -EOPNOTSUPP;
6351 +
6352 + if (uip_mask->ip4src) {
6353 + off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
6354 + *(__be32 *)(key + off) = uip_value->ip4src;
6355 + *(__be32 *)(mask + off) = uip_mask->ip4src;
6356 + *fields |= DPAA2_ETH_DIST_IPSRC;
6357 + }
6358 +
6359 + if (uip_mask->ip4dst) {
6360 + off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
6361 + *(__be32 *)(key + off) = uip_value->ip4dst;
6362 + *(__be32 *)(mask + off) = uip_mask->ip4dst;
6363 + *fields |= DPAA2_ETH_DIST_IPDST;
6364 + }
6365 +
6366 + if (uip_mask->proto) {
6367 + off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
6368 + *(u8 *)(key + off) = uip_value->proto;
6369 + *(u8 *)(mask + off) = uip_mask->proto;
6370 + *fields |= DPAA2_ETH_DIST_IPPROTO;
6371 + }
6372 +
6373 + if (uip_mask->l4_4_bytes) {
6374 + tmp_value = be32_to_cpu(uip_value->l4_4_bytes);
6375 + tmp_mask = be32_to_cpu(uip_mask->l4_4_bytes);
6376 +
6377 + off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
6378 + *(__be16 *)(key + off) = htons(tmp_value >> 16);
6379 + *(__be16 *)(mask + off) = htons(tmp_mask >> 16);
6380 + *fields |= DPAA2_ETH_DIST_L4SRC;
6381 +
6382 + off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
6383 + *(__be16 *)(key + off) = htons(tmp_value & 0xFFFF);
6384 + *(__be16 *)(mask + off) = htons(tmp_mask & 0xFFFF);
6385 + *fields |= DPAA2_ETH_DIST_L4DST;
6386 + }
6387 +
6388 + /* Only apply the rule for IPv4 frames */
6389 + off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
6390 + *(__be16 *)(key + off) = htons(ETH_P_IP);
6391 + *(__be16 *)(mask + off) = htons(0xFFFF);
6392 + *fields |= DPAA2_ETH_DIST_ETHTYPE;
6393 +
6394 + return 0;
6395 +}
6396 +
6397 +static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
6398 + struct ethtool_tcpip4_spec *l4_mask,
6399 + void *key, void *mask, u8 l4_proto, u64 *fields)
6400 +{
6401 + int off;
6402 +
6403 + if (l4_mask->tos)
6404 + return -EOPNOTSUPP;
6405 + if (l4_mask->ip4src) {
6406 + off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
6407 + *(__be32 *)(key + off) = l4_value->ip4src;
6408 + *(__be32 *)(mask + off) = l4_mask->ip4src;
6409 + *fields |= DPAA2_ETH_DIST_IPSRC;
6410 + }
6411 +
6412 + if (l4_mask->ip4dst) {
6413 + off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
6414 + *(__be32 *)(key + off) = l4_value->ip4dst;
6415 + *(__be32 *)(mask + off) = l4_mask->ip4dst;
6416 + *fields |= DPAA2_ETH_DIST_IPDST;
6417 + }
6418 +
6419 + if (l4_mask->psrc) {
6420 + off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
6421 + *(__be16 *)(key + off) = l4_value->psrc;
6422 + *(__be16 *)(mask + off) = l4_mask->psrc;
6423 + *fields |= DPAA2_ETH_DIST_L4SRC;
6424 + }
6425 +
6426 + if (l4_mask->pdst) {
6427 + off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
6428 + *(__be16 *)(key + off) = l4_value->pdst;
6429 + *(__be16 *)(mask + off) = l4_mask->pdst;
6430 + *fields |= DPAA2_ETH_DIST_L4DST;
6431 + }
6432 +
6433 + /* Only apply the rule for the user-specified L4 protocol
6434 + * and if ethertype matches IPv4
6435 + */
6436 + off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
6437 + *(__be16 *)(key + off) = htons(ETH_P_IP);
6438 + *(__be16 *)(mask + off) = htons(0xFFFF);
6439 + *fields |= DPAA2_ETH_DIST_ETHTYPE;
6440 +
6441 + off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
6442 + *(u8 *)(key + off) = l4_proto;
6443 + *(u8 *)(mask + off) = 0xFF;
6444 + *fields |= DPAA2_ETH_DIST_IPPROTO;
6445 +
6446 + return 0;
6447 +}
6448 +
6449 +static int prep_ext_rule(struct ethtool_flow_ext *ext_value,
6450 + struct ethtool_flow_ext *ext_mask,
6451 + void *key, void *mask, u64 *fields)
6452 +{
6453 + int off;
6454 +
6455 + if (ext_mask->vlan_etype)
6456 + return -EOPNOTSUPP;
6457 +
6458 + if (ext_mask->vlan_tci) {
6459 + off = dpaa2_eth_cls_fld_off(NET_PROT_VLAN, NH_FLD_VLAN_TCI);
6460 + *(__be16 *)(key + off) = ext_value->vlan_tci;
6461 + *(__be16 *)(mask + off) = ext_mask->vlan_tci;
6462 + *fields |= DPAA2_ETH_DIST_VLAN;
6463 + }
6464 +
6465 + return 0;
6466 +}
6467 +
6468 +static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
6469 + struct ethtool_flow_ext *ext_mask,
6470 + void *key, void *mask, u64 *fields)
6471 +{
6472 + int off;
6473 +
6474 + if (!is_zero_ether_addr(ext_mask->h_dest)) {
6475 + off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
6476 + ether_addr_copy(key + off, ext_value->h_dest);
6477 + ether_addr_copy(mask + off, ext_mask->h_dest);
6478 + *fields |= DPAA2_ETH_DIST_ETHDST;
6479 + }
6480 +
6481 + return 0;
6482 +}
6483 +
6484 +static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask,
6485 + u64 *fields)
6486 +{
6487 + int err;
6488 +
6489 + switch (fs->flow_type & 0xFF) {
6490 + case ETHER_FLOW:
6491 + err = prep_eth_rule(&fs->h_u.ether_spec, &fs->m_u.ether_spec,
6492 + key, mask, fields);
6493 + break;
6494 + case IP_USER_FLOW:
6495 + err = prep_user_ip_rule(&fs->h_u.usr_ip4_spec,
6496 + &fs->m_u.usr_ip4_spec, key, mask, fields);
6497 + break;
6498 + case TCP_V4_FLOW:
6499 + err = prep_l4_rule(&fs->h_u.tcp_ip4_spec, &fs->m_u.tcp_ip4_spec,
6500 + key, mask, IPPROTO_TCP, fields);
6501 + break;
6502 + case UDP_V4_FLOW:
6503 + err = prep_l4_rule(&fs->h_u.udp_ip4_spec, &fs->m_u.udp_ip4_spec,
6504 + key, mask, IPPROTO_UDP, fields);
6505 + break;
6506 + case SCTP_V4_FLOW:
6507 + err = prep_l4_rule(&fs->h_u.sctp_ip4_spec, &fs->m_u.sctp_ip4_spec,
6508 + key, mask, IPPROTO_SCTP, fields);
6509 + break;
6510 + default:
6511 + return -EOPNOTSUPP;
6512 + }
6513 +
6514 + if (err)
6515 + return err;
6516 +
6517 + if (fs->flow_type & FLOW_EXT) {
6518 + err = prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask, fields);
6519 + if (err)
6520 + return err;
6521 + }
6522 +
6523 + if (fs->flow_type & FLOW_MAC_EXT) {
6524 + err = prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key, mask,
6525 + fields);
6526 + if (err)
6527 + return err;
6528 + }
6529 +
6530 + return 0;
6531 +}
6532 +
6533 +static int do_cls_rule(struct net_device *net_dev,
6534 + struct ethtool_rx_flow_spec *fs,
6535 + bool add)
6536 +{
6537 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
6538 + struct device *dev = net_dev->dev.parent;
6539 + struct dpni_rule_cfg rule_cfg = { 0 };
6540 + struct dpni_fs_action_cfg fs_act = { 0 };
6541 + dma_addr_t key_iova;
6542 + u64 fields = 0;
6543 + void *key_buf;
6544 + int i, err = 0;
6545 +
6546 + if (fs->ring_cookie != RX_CLS_FLOW_DISC &&
6547 + fs->ring_cookie >= dpaa2_eth_queue_count(priv))
6548 + return -EINVAL;
6549 +
6550 + rule_cfg.key_size = dpaa2_eth_cls_key_size(DPAA2_ETH_DIST_ALL);
6551 +
6552 + /* allocate twice the key size, for the actual key and for mask */
6553 + key_buf = kzalloc(rule_cfg.key_size * 2, GFP_KERNEL);
6554 + if (!key_buf)
6555 + return -ENOMEM;
6556 +
6557 + /* Fill the key and mask memory areas */
6558 + err = prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size, &fields);
6559 + if (err)
6560 + goto free_mem;
6561 +
6562 + if (!dpaa2_eth_fs_mask_enabled(priv)) {
6563 + /* Masking allows us to configure a maximal key during init and
6564 + * use it for all flow steering rules. Without it, we include
6565 + * in the key only the fields actually used, so we need to
6566 + * extract the others from the final key buffer.
6567 + *
6568 + * Program the FS key if needed, or return error if previously
6569 + * set key can't be used for the current rule. User needs to
6570 + * delete existing rules in this case to allow for the new one.
6571 + */
6572 + if (!priv->rx_cls_fields) {
6573 + err = dpaa2_eth_set_cls(net_dev, fields);
6574 + if (err)
6575 + goto free_mem;
6576 +
6577 + priv->rx_cls_fields = fields;
6578 + } else if (priv->rx_cls_fields != fields) {
6579 + netdev_err(net_dev, "No support for multiple FS keys, need to delete existing rules\n");
6580 + err = -EOPNOTSUPP;
6581 + goto free_mem;
6582 + }
6583 +
6584 + dpaa2_eth_cls_trim_rule(key_buf, fields);
6585 + rule_cfg.key_size = dpaa2_eth_cls_key_size(fields);
6586 + }
6587 +
6588 + key_iova = dma_map_single(dev, key_buf, rule_cfg.key_size * 2,
6589 + DMA_TO_DEVICE);
6590 + if (dma_mapping_error(dev, key_iova)) {
6591 + err = -ENOMEM;
6592 + goto free_mem;
6593 + }
6594 +
6595 + rule_cfg.key_iova = key_iova;
6596 + if (dpaa2_eth_fs_mask_enabled(priv))
6597 + rule_cfg.mask_iova = key_iova + rule_cfg.key_size;
6598 +
6599 + if (add) {
6600 + if (fs->ring_cookie == RX_CLS_FLOW_DISC)
6601 + fs_act.options |= DPNI_FS_OPT_DISCARD;
6602 + else
6603 + fs_act.flow_id = fs->ring_cookie;
6604 + }
6605 + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
6606 + if (add)
6607 + err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token,
6608 + i, fs->location, &rule_cfg,
6609 + &fs_act);
6610 + else
6611 + err = dpni_remove_fs_entry(priv->mc_io, 0,
6612 + priv->mc_token, i,
6613 + &rule_cfg);
6614 + if (err)
6615 + break;
6616 + }
6617 +
6618 + dma_unmap_single(dev, key_iova, rule_cfg.key_size * 2, DMA_TO_DEVICE);
6619 +
6620 +free_mem:
6621 + kfree(key_buf);
6622 +
6623 + return err;
6624 +}
6625 +
6626 +static int num_rules(struct dpaa2_eth_priv *priv)
6627 +{
6628 + int i, rules = 0;
6629 +
6630 + for (i = 0; i < dpaa2_eth_fs_count(priv); i++)
6631 + if (priv->cls_rule[i].in_use)
6632 + rules++;
6633 +
6634 + return rules;
6635 +}
6636 +
6637 +static int update_cls_rule(struct net_device *net_dev,
6638 + struct ethtool_rx_flow_spec *new_fs,
6639 + int location)
6640 +{
6641 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
6642 + struct dpaa2_eth_cls_rule *rule;
6643 + int err = -EINVAL;
6644 +
6645 + if (!priv->rx_cls_enabled)
6646 + return -EOPNOTSUPP;
6647 +
6648 + if (location >= dpaa2_eth_fs_count(priv))
6649 + return -EINVAL;
6650 +
6651 + rule = &priv->cls_rule[location];
6652 +
6653 + /* If a rule is present at the specified location, delete it. */
6654 + if (rule->in_use) {
6655 + err = do_cls_rule(net_dev, &rule->fs, false);
6656 + if (err)
6657 + return err;
6658 +
6659 + rule->in_use = 0;
6660 +
6661 + if (!dpaa2_eth_fs_mask_enabled(priv) && !num_rules(priv))
6662 + priv->rx_cls_fields = 0;
6663 + }
6664 +
6665 + /* If no new entry to add, return here */
6666 + if (!new_fs)
6667 + return err;
6668 +
6669 + err = do_cls_rule(net_dev, new_fs, true);
6670 + if (err)
6671 + return err;
6672 +
6673 + rule->in_use = 1;
6674 + rule->fs = *new_fs;
6675 +
6676 + return 0;
6677 }
6678
6679 static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
6680 struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
6681 {
6682 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
6683 + int rule_cnt = dpaa2_eth_fs_count(priv);
6684 + int i, j = 0;
6685
6686 switch (rxnfc->cmd) {
6687 case ETHTOOL_GRXFH:
6688 @@ -258,6 +791,29 @@ static int dpaa2_eth_get_rxnfc(struct ne
6689 case ETHTOOL_GRXRINGS:
6690 rxnfc->data = dpaa2_eth_queue_count(priv);
6691 break;
6692 + case ETHTOOL_GRXCLSRLCNT:
6693 + rxnfc->rule_cnt = 0;
6694 + rxnfc->rule_cnt = num_rules(priv);
6695 + rxnfc->data = rule_cnt;
6696 + break;
6697 + case ETHTOOL_GRXCLSRULE:
6698 + if (rxnfc->fs.location >= rule_cnt)
6699 + return -EINVAL;
6700 + if (!priv->cls_rule[rxnfc->fs.location].in_use)
6701 + return -EINVAL;
6702 + rxnfc->fs = priv->cls_rule[rxnfc->fs.location].fs;
6703 + break;
6704 + case ETHTOOL_GRXCLSRLALL:
6705 + for (i = 0; i < rule_cnt; i++) {
6706 + if (!priv->cls_rule[i].in_use)
6707 + continue;
6708 + if (j == rxnfc->rule_cnt)
6709 + return -EMSGSIZE;
6710 + rule_locs[j++] = i;
6711 + }
6712 + rxnfc->rule_cnt = j;
6713 + rxnfc->data = rule_cnt;
6714 + break;
6715 default:
6716 return -EOPNOTSUPP;
6717 }
6718 @@ -265,13 +821,61 @@ static int dpaa2_eth_get_rxnfc(struct ne
6719 return 0;
6720 }
6721
6722 +int dpaa2_phc_index = -1;
6723 +EXPORT_SYMBOL(dpaa2_phc_index);
6724 +
6725 +static int dpaa2_eth_get_ts_info(struct net_device *dev,
6726 + struct ethtool_ts_info *info)
6727 +{
6728 + info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
6729 + SOF_TIMESTAMPING_RX_HARDWARE |
6730 + SOF_TIMESTAMPING_RAW_HARDWARE;
6731 +
6732 + info->phc_index = dpaa2_phc_index;
6733 +
6734 + info->tx_types = (1 << HWTSTAMP_TX_OFF) |
6735 + (1 << HWTSTAMP_TX_ON);
6736 +
6737 + info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6738 + (1 << HWTSTAMP_FILTER_ALL);
6739 + return 0;
6740 +}
6741 +
6742 +static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
6743 + struct ethtool_rxnfc *rxnfc)
6744 +{
6745 + int err = 0;
6746 +
6747 + switch (rxnfc->cmd) {
6748 + case ETHTOOL_SRXFH:
6749 + if ((rxnfc->data & DPAA2_RXH_SUPPORTED) != rxnfc->data)
6750 + return -EOPNOTSUPP;
6751 + err = dpaa2_eth_set_hash(net_dev, rxnfc->data);
6752 + break;
6753 + case ETHTOOL_SRXCLSRLINS:
6754 + err = update_cls_rule(net_dev, &rxnfc->fs, rxnfc->fs.location);
6755 + break;
6756 + case ETHTOOL_SRXCLSRLDEL:
6757 + err = update_cls_rule(net_dev, NULL, rxnfc->fs.location);
6758 + break;
6759 + default:
6760 + err = -EOPNOTSUPP;
6761 + }
6762 +
6763 + return err;
6764 +}
6765 +
6766 const struct ethtool_ops dpaa2_ethtool_ops = {
6767 .get_drvinfo = dpaa2_eth_get_drvinfo,
6768 .get_link = ethtool_op_get_link,
6769 .get_link_ksettings = dpaa2_eth_get_link_ksettings,
6770 .set_link_ksettings = dpaa2_eth_set_link_ksettings,
6771 + .get_pauseparam = dpaa2_eth_get_pauseparam,
6772 + .set_pauseparam = dpaa2_eth_set_pauseparam,
6773 .get_sset_count = dpaa2_eth_get_sset_count,
6774 .get_ethtool_stats = dpaa2_eth_get_ethtool_stats,
6775 .get_strings = dpaa2_eth_get_strings,
6776 .get_rxnfc = dpaa2_eth_get_rxnfc,
6777 + .set_rxnfc = dpaa2_eth_set_rxnfc,
6778 + .get_ts_info = dpaa2_eth_get_ts_info,
6779 };
6780 --- a/drivers/staging/fsl-dpaa2/ethernet/dpkg.h
6781 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpkg.h
6782 @@ -1,39 +1,10 @@
6783 +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
6784 /* Copyright 2013-2015 Freescale Semiconductor Inc.
6785 - *
6786 - * Redistribution and use in source and binary forms, with or without
6787 - * modification, are permitted provided that the following conditions are met:
6788 - * * Redistributions of source code must retain the above copyright
6789 - * notice, this list of conditions and the following disclaimer.
6790 - * * Redistributions in binary form must reproduce the above copyright
6791 - * notice, this list of conditions and the following disclaimer in the
6792 - * documentation and/or other materials provided with the distribution.
6793 - * * Neither the name of the above-listed copyright holders nor the
6794 - * names of any contributors may be used to endorse or promote products
6795 - * derived from this software without specific prior written permission.
6796 - *
6797 - *
6798 - * ALTERNATIVELY, this software may be distributed under the terms of the
6799 - * GNU General Public License ("GPL") as published by the Free Software
6800 - * Foundation, either version 2 of that License or (at your option) any
6801 - * later version.
6802 - *
6803 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
6804 - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
6805 - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
6806 - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
6807 - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
6808 - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
6809 - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
6810 - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
6811 - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
6812 - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
6813 - * POSSIBILITY OF SUCH DAMAGE.
6814 */
6815 #ifndef __FSL_DPKG_H_
6816 #define __FSL_DPKG_H_
6817
6818 #include <linux/types.h>
6819 -#include "net.h"
6820
6821 /* Data Path Key Generator API
6822 * Contains initialization APIs and runtime APIs for the Key Generator
6823 @@ -86,6 +57,355 @@ struct dpkg_mask {
6824 u8 offset;
6825 };
6826
6827 +/* Protocol fields */
6828 +
6829 +/* Ethernet fields */
6830 +#define NH_FLD_ETH_DA BIT(0)
6831 +#define NH_FLD_ETH_SA BIT(1)
6832 +#define NH_FLD_ETH_LENGTH BIT(2)
6833 +#define NH_FLD_ETH_TYPE BIT(3)
6834 +#define NH_FLD_ETH_FINAL_CKSUM BIT(4)
6835 +#define NH_FLD_ETH_PADDING BIT(5)
6836 +#define NH_FLD_ETH_ALL_FIELDS (BIT(6) - 1)
6837 +
6838 +/* VLAN fields */
6839 +#define NH_FLD_VLAN_VPRI BIT(0)
6840 +#define NH_FLD_VLAN_CFI BIT(1)
6841 +#define NH_FLD_VLAN_VID BIT(2)
6842 +#define NH_FLD_VLAN_LENGTH BIT(3)
6843 +#define NH_FLD_VLAN_TYPE BIT(4)
6844 +#define NH_FLD_VLAN_ALL_FIELDS (BIT(5) - 1)
6845 +
6846 +#define NH_FLD_VLAN_TCI (NH_FLD_VLAN_VPRI | \
6847 + NH_FLD_VLAN_CFI | \
6848 + NH_FLD_VLAN_VID)
6849 +
6850 +/* IP (generic) fields */
6851 +#define NH_FLD_IP_VER BIT(0)
6852 +#define NH_FLD_IP_DSCP BIT(2)
6853 +#define NH_FLD_IP_ECN BIT(3)
6854 +#define NH_FLD_IP_PROTO BIT(4)
6855 +#define NH_FLD_IP_SRC BIT(5)
6856 +#define NH_FLD_IP_DST BIT(6)
6857 +#define NH_FLD_IP_TOS_TC BIT(7)
6858 +#define NH_FLD_IP_ID BIT(8)
6859 +#define NH_FLD_IP_ALL_FIELDS (BIT(9) - 1)
6860 +
6861 +/* IPV4 fields */
6862 +#define NH_FLD_IPV4_VER BIT(0)
6863 +#define NH_FLD_IPV4_HDR_LEN BIT(1)
6864 +#define NH_FLD_IPV4_TOS BIT(2)
6865 +#define NH_FLD_IPV4_TOTAL_LEN BIT(3)
6866 +#define NH_FLD_IPV4_ID BIT(4)
6867 +#define NH_FLD_IPV4_FLAG_D BIT(5)
6868 +#define NH_FLD_IPV4_FLAG_M BIT(6)
6869 +#define NH_FLD_IPV4_OFFSET BIT(7)
6870 +#define NH_FLD_IPV4_TTL BIT(8)
6871 +#define NH_FLD_IPV4_PROTO BIT(9)
6872 +#define NH_FLD_IPV4_CKSUM BIT(10)
6873 +#define NH_FLD_IPV4_SRC_IP BIT(11)
6874 +#define NH_FLD_IPV4_DST_IP BIT(12)
6875 +#define NH_FLD_IPV4_OPTS BIT(13)
6876 +#define NH_FLD_IPV4_OPTS_COUNT BIT(14)
6877 +#define NH_FLD_IPV4_ALL_FIELDS (BIT(15) - 1)
6878 +
6879 +/* IPV6 fields */
6880 +#define NH_FLD_IPV6_VER BIT(0)
6881 +#define NH_FLD_IPV6_TC BIT(1)
6882 +#define NH_FLD_IPV6_SRC_IP BIT(2)
6883 +#define NH_FLD_IPV6_DST_IP BIT(3)
6884 +#define NH_FLD_IPV6_NEXT_HDR BIT(4)
6885 +#define NH_FLD_IPV6_FL BIT(5)
6886 +#define NH_FLD_IPV6_HOP_LIMIT BIT(6)
6887 +#define NH_FLD_IPV6_ID BIT(7)
6888 +#define NH_FLD_IPV6_ALL_FIELDS (BIT(8) - 1)
6889 +
6890 +/* ICMP fields */
6891 +#define NH_FLD_ICMP_TYPE BIT(0)
6892 +#define NH_FLD_ICMP_CODE BIT(1)
6893 +#define NH_FLD_ICMP_CKSUM BIT(2)
6894 +#define NH_FLD_ICMP_ID BIT(3)
6895 +#define NH_FLD_ICMP_SQ_NUM BIT(4)
6896 +#define NH_FLD_ICMP_ALL_FIELDS (BIT(5) - 1)
6897 +
6898 +/* IGMP fields */
6899 +#define NH_FLD_IGMP_VERSION BIT(0)
6900 +#define NH_FLD_IGMP_TYPE BIT(1)
6901 +#define NH_FLD_IGMP_CKSUM BIT(2)
6902 +#define NH_FLD_IGMP_DATA BIT(3)
6903 +#define NH_FLD_IGMP_ALL_FIELDS (BIT(4) - 1)
6904 +
6905 +/* TCP fields */
6906 +#define NH_FLD_TCP_PORT_SRC BIT(0)
6907 +#define NH_FLD_TCP_PORT_DST BIT(1)
6908 +#define NH_FLD_TCP_SEQ BIT(2)
6909 +#define NH_FLD_TCP_ACK BIT(3)
6910 +#define NH_FLD_TCP_OFFSET BIT(4)
6911 +#define NH_FLD_TCP_FLAGS BIT(5)
6912 +#define NH_FLD_TCP_WINDOW BIT(6)
6913 +#define NH_FLD_TCP_CKSUM BIT(7)
6914 +#define NH_FLD_TCP_URGPTR BIT(8)
6915 +#define NH_FLD_TCP_OPTS BIT(9)
6916 +#define NH_FLD_TCP_OPTS_COUNT BIT(10)
6917 +#define NH_FLD_TCP_ALL_FIELDS (BIT(11) - 1)
6918 +
6919 +/* UDP fields */
6920 +#define NH_FLD_UDP_PORT_SRC BIT(0)
6921 +#define NH_FLD_UDP_PORT_DST BIT(1)
6922 +#define NH_FLD_UDP_LEN BIT(2)
6923 +#define NH_FLD_UDP_CKSUM BIT(3)
6924 +#define NH_FLD_UDP_ALL_FIELDS (BIT(4) - 1)
6925 +
6926 +/* UDP-lite fields */
6927 +#define NH_FLD_UDP_LITE_PORT_SRC BIT(0)
6928 +#define NH_FLD_UDP_LITE_PORT_DST BIT(1)
6929 +#define NH_FLD_UDP_LITE_ALL_FIELDS (BIT(2) - 1)
6930 +
6931 +/* UDP-encap-ESP fields */
6932 +#define NH_FLD_UDP_ENC_ESP_PORT_SRC BIT(0)
6933 +#define NH_FLD_UDP_ENC_ESP_PORT_DST BIT(1)
6934 +#define NH_FLD_UDP_ENC_ESP_LEN BIT(2)
6935 +#define NH_FLD_UDP_ENC_ESP_CKSUM BIT(3)
6936 +#define NH_FLD_UDP_ENC_ESP_SPI BIT(4)
6937 +#define NH_FLD_UDP_ENC_ESP_SEQUENCE_NUM BIT(5)
6938 +#define NH_FLD_UDP_ENC_ESP_ALL_FIELDS (BIT(6) - 1)
6939 +
6940 +/* SCTP fields */
6941 +#define NH_FLD_SCTP_PORT_SRC BIT(0)
6942 +#define NH_FLD_SCTP_PORT_DST BIT(1)
6943 +#define NH_FLD_SCTP_VER_TAG BIT(2)
6944 +#define NH_FLD_SCTP_CKSUM BIT(3)
6945 +#define NH_FLD_SCTP_ALL_FIELDS (BIT(4) - 1)
6946 +
6947 +/* DCCP fields */
6948 +#define NH_FLD_DCCP_PORT_SRC BIT(0)
6949 +#define NH_FLD_DCCP_PORT_DST BIT(1)
6950 +#define NH_FLD_DCCP_ALL_FIELDS (BIT(2) - 1)
6951 +
6952 +/* IPHC fields */
6953 +#define NH_FLD_IPHC_CID BIT(0)
6954 +#define NH_FLD_IPHC_CID_TYPE BIT(1)
6955 +#define NH_FLD_IPHC_HCINDEX BIT(2)
6956 +#define NH_FLD_IPHC_GEN BIT(3)
6957 +#define NH_FLD_IPHC_D_BIT BIT(4)
6958 +#define NH_FLD_IPHC_ALL_FIELDS (BIT(5) - 1)
6959 +
6960 +/* SCTP fields */
6961 +#define NH_FLD_SCTP_CHUNK_DATA_TYPE BIT(0)
6962 +#define NH_FLD_SCTP_CHUNK_DATA_FLAGS BIT(1)
6963 +#define NH_FLD_SCTP_CHUNK_DATA_LENGTH BIT(2)
6964 +#define NH_FLD_SCTP_CHUNK_DATA_TSN BIT(3)
6965 +#define NH_FLD_SCTP_CHUNK_DATA_STREAM_ID BIT(4)
6966 +#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN BIT(5)
6967 +#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID BIT(6)
6968 +#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED BIT(7)
6969 +#define NH_FLD_SCTP_CHUNK_DATA_BEGGINING BIT(8)
6970 +#define NH_FLD_SCTP_CHUNK_DATA_END BIT(9)
6971 +#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS (BIT(10) - 1)
6972 +
6973 +/* L2TPV2 fields */
6974 +#define NH_FLD_L2TPV2_TYPE_BIT BIT(0)
6975 +#define NH_FLD_L2TPV2_LENGTH_BIT BIT(1)
6976 +#define NH_FLD_L2TPV2_SEQUENCE_BIT BIT(2)
6977 +#define NH_FLD_L2TPV2_OFFSET_BIT BIT(3)
6978 +#define NH_FLD_L2TPV2_PRIORITY_BIT BIT(4)
6979 +#define NH_FLD_L2TPV2_VERSION BIT(5)
6980 +#define NH_FLD_L2TPV2_LEN BIT(6)
6981 +#define NH_FLD_L2TPV2_TUNNEL_ID BIT(7)
6982 +#define NH_FLD_L2TPV2_SESSION_ID BIT(8)
6983 +#define NH_FLD_L2TPV2_NS BIT(9)
6984 +#define NH_FLD_L2TPV2_NR BIT(10)
6985 +#define NH_FLD_L2TPV2_OFFSET_SIZE BIT(11)
6986 +#define NH_FLD_L2TPV2_FIRST_BYTE BIT(12)
6987 +#define NH_FLD_L2TPV2_ALL_FIELDS (BIT(13) - 1)
6988 +
6989 +/* L2TPV3 fields */
6990 +#define NH_FLD_L2TPV3_CTRL_TYPE_BIT BIT(0)
6991 +#define NH_FLD_L2TPV3_CTRL_LENGTH_BIT BIT(1)
6992 +#define NH_FLD_L2TPV3_CTRL_SEQUENCE_BIT BIT(2)
6993 +#define NH_FLD_L2TPV3_CTRL_VERSION BIT(3)
6994 +#define NH_FLD_L2TPV3_CTRL_LENGTH BIT(4)
6995 +#define NH_FLD_L2TPV3_CTRL_CONTROL BIT(5)
6996 +#define NH_FLD_L2TPV3_CTRL_SENT BIT(6)
6997 +#define NH_FLD_L2TPV3_CTRL_RECV BIT(7)
6998 +#define NH_FLD_L2TPV3_CTRL_FIRST_BYTE BIT(8)
6999 +#define NH_FLD_L2TPV3_CTRL_ALL_FIELDS (BIT(9) - 1)
7000 +
7001 +#define NH_FLD_L2TPV3_SESS_TYPE_BIT BIT(0)
7002 +#define NH_FLD_L2TPV3_SESS_VERSION BIT(1)
7003 +#define NH_FLD_L2TPV3_SESS_ID BIT(2)
7004 +#define NH_FLD_L2TPV3_SESS_COOKIE BIT(3)
7005 +#define NH_FLD_L2TPV3_SESS_ALL_FIELDS (BIT(4) - 1)
7006 +
7007 +/* PPP fields */
7008 +#define NH_FLD_PPP_PID BIT(0)
7009 +#define NH_FLD_PPP_COMPRESSED BIT(1)
7010 +#define NH_FLD_PPP_ALL_FIELDS (BIT(2) - 1)
7011 +
7012 +/* PPPoE fields */
7013 +#define NH_FLD_PPPOE_VER BIT(0)
7014 +#define NH_FLD_PPPOE_TYPE BIT(1)
7015 +#define NH_FLD_PPPOE_CODE BIT(2)
7016 +#define NH_FLD_PPPOE_SID BIT(3)
7017 +#define NH_FLD_PPPOE_LEN BIT(4)
7018 +#define NH_FLD_PPPOE_SESSION BIT(5)
7019 +#define NH_FLD_PPPOE_PID BIT(6)
7020 +#define NH_FLD_PPPOE_ALL_FIELDS (BIT(7) - 1)
7021 +
7022 +/* PPP-Mux fields */
7023 +#define NH_FLD_PPPMUX_PID BIT(0)
7024 +#define NH_FLD_PPPMUX_CKSUM BIT(1)
7025 +#define NH_FLD_PPPMUX_COMPRESSED BIT(2)
7026 +#define NH_FLD_PPPMUX_ALL_FIELDS (BIT(3) - 1)
7027 +
7028 +/* PPP-Mux sub-frame fields */
7029 +#define NH_FLD_PPPMUX_SUBFRM_PFF BIT(0)
7030 +#define NH_FLD_PPPMUX_SUBFRM_LXT BIT(1)
7031 +#define NH_FLD_PPPMUX_SUBFRM_LEN BIT(2)
7032 +#define NH_FLD_PPPMUX_SUBFRM_PID BIT(3)
7033 +#define NH_FLD_PPPMUX_SUBFRM_USE_PID BIT(4)
7034 +#define NH_FLD_PPPMUX_SUBFRM_ALL_FIELDS (BIT(5) - 1)
7035 +
7036 +/* LLC fields */
7037 +#define NH_FLD_LLC_DSAP BIT(0)
7038 +#define NH_FLD_LLC_SSAP BIT(1)
7039 +#define NH_FLD_LLC_CTRL BIT(2)
7040 +#define NH_FLD_LLC_ALL_FIELDS (BIT(3) - 1)
7041 +
7042 +/* NLPID fields */
7043 +#define NH_FLD_NLPID_NLPID BIT(0)
7044 +#define NH_FLD_NLPID_ALL_FIELDS (BIT(1) - 1)
7045 +
7046 +/* SNAP fields */
7047 +#define NH_FLD_SNAP_OUI BIT(0)
7048 +#define NH_FLD_SNAP_PID BIT(1)
7049 +#define NH_FLD_SNAP_ALL_FIELDS (BIT(2) - 1)
7050 +
7051 +/* LLC SNAP fields */
7052 +#define NH_FLD_LLC_SNAP_TYPE BIT(0)
7053 +#define NH_FLD_LLC_SNAP_ALL_FIELDS (BIT(1) - 1)
7054 +
7055 +/* ARP fields */
7056 +#define NH_FLD_ARP_HTYPE BIT(0)
7057 +#define NH_FLD_ARP_PTYPE BIT(1)
7058 +#define NH_FLD_ARP_HLEN BIT(2)
7059 +#define NH_FLD_ARP_PLEN BIT(3)
7060 +#define NH_FLD_ARP_OPER BIT(4)
7061 +#define NH_FLD_ARP_SHA BIT(5)
7062 +#define NH_FLD_ARP_SPA BIT(6)
7063 +#define NH_FLD_ARP_THA BIT(7)
7064 +#define NH_FLD_ARP_TPA BIT(8)
7065 +#define NH_FLD_ARP_ALL_FIELDS (BIT(9) - 1)
7066 +
7067 +/* RFC2684 fields */
7068 +#define NH_FLD_RFC2684_LLC BIT(0)
7069 +#define NH_FLD_RFC2684_NLPID BIT(1)
7070 +#define NH_FLD_RFC2684_OUI BIT(2)
7071 +#define NH_FLD_RFC2684_PID BIT(3)
7072 +#define NH_FLD_RFC2684_VPN_OUI BIT(4)
7073 +#define NH_FLD_RFC2684_VPN_IDX BIT(5)
7074 +#define NH_FLD_RFC2684_ALL_FIELDS (BIT(6) - 1)
7075 +
7076 +/* User defined fields */
7077 +#define NH_FLD_USER_DEFINED_SRCPORT BIT(0)
7078 +#define NH_FLD_USER_DEFINED_PCDID BIT(1)
7079 +#define NH_FLD_USER_DEFINED_ALL_FIELDS (BIT(2) - 1)
7080 +
7081 +/* Payload fields */
7082 +#define NH_FLD_PAYLOAD_BUFFER BIT(0)
7083 +#define NH_FLD_PAYLOAD_SIZE BIT(1)
7084 +#define NH_FLD_MAX_FRM_SIZE BIT(2)
7085 +#define NH_FLD_MIN_FRM_SIZE BIT(3)
7086 +#define NH_FLD_PAYLOAD_TYPE BIT(4)
7087 +#define NH_FLD_FRAME_SIZE BIT(5)
7088 +#define NH_FLD_PAYLOAD_ALL_FIELDS (BIT(6) - 1)
7089 +
7090 +/* GRE fields */
7091 +#define NH_FLD_GRE_TYPE BIT(0)
7092 +#define NH_FLD_GRE_ALL_FIELDS (BIT(1) - 1)
7093 +
7094 +/* MINENCAP fields */
7095 +#define NH_FLD_MINENCAP_SRC_IP BIT(0)
7096 +#define NH_FLD_MINENCAP_DST_IP BIT(1)
7097 +#define NH_FLD_MINENCAP_TYPE BIT(2)
7098 +#define NH_FLD_MINENCAP_ALL_FIELDS (BIT(3) - 1)
7099 +
7100 +/* IPSEC AH fields */
7101 +#define NH_FLD_IPSEC_AH_SPI BIT(0)
7102 +#define NH_FLD_IPSEC_AH_NH BIT(1)
7103 +#define NH_FLD_IPSEC_AH_ALL_FIELDS (BIT(2) - 1)
7104 +
7105 +/* IPSEC ESP fields */
7106 +#define NH_FLD_IPSEC_ESP_SPI BIT(0)
7107 +#define NH_FLD_IPSEC_ESP_SEQUENCE_NUM BIT(1)
7108 +#define NH_FLD_IPSEC_ESP_ALL_FIELDS (BIT(2) - 1)
7109 +
7110 +/* MPLS fields */
7111 +#define NH_FLD_MPLS_LABEL_STACK BIT(0)
7112 +#define NH_FLD_MPLS_LABEL_STACK_ALL_FIELDS (BIT(1) - 1)
7113 +
7114 +/* MACSEC fields */
7115 +#define NH_FLD_MACSEC_SECTAG BIT(0)
7116 +#define NH_FLD_MACSEC_ALL_FIELDS (BIT(1) - 1)
7117 +
7118 +/* GTP fields */
7119 +#define NH_FLD_GTP_TEID BIT(0)
7120 +
7121 +/* Supported protocols */
7122 +enum net_prot {
7123 + NET_PROT_NONE = 0,
7124 + NET_PROT_PAYLOAD,
7125 + NET_PROT_ETH,
7126 + NET_PROT_VLAN,
7127 + NET_PROT_IPV4,
7128 + NET_PROT_IPV6,
7129 + NET_PROT_IP,
7130 + NET_PROT_TCP,
7131 + NET_PROT_UDP,
7132 + NET_PROT_UDP_LITE,
7133 + NET_PROT_IPHC,
7134 + NET_PROT_SCTP,
7135 + NET_PROT_SCTP_CHUNK_DATA,
7136 + NET_PROT_PPPOE,
7137 + NET_PROT_PPP,
7138 + NET_PROT_PPPMUX,
7139 + NET_PROT_PPPMUX_SUBFRM,
7140 + NET_PROT_L2TPV2,
7141 + NET_PROT_L2TPV3_CTRL,
7142 + NET_PROT_L2TPV3_SESS,
7143 + NET_PROT_LLC,
7144 + NET_PROT_LLC_SNAP,
7145 + NET_PROT_NLPID,
7146 + NET_PROT_SNAP,
7147 + NET_PROT_MPLS,
7148 + NET_PROT_IPSEC_AH,
7149 + NET_PROT_IPSEC_ESP,
7150 + NET_PROT_UDP_ENC_ESP, /* RFC 3948 */
7151 + NET_PROT_MACSEC,
7152 + NET_PROT_GRE,
7153 + NET_PROT_MINENCAP,
7154 + NET_PROT_DCCP,
7155 + NET_PROT_ICMP,
7156 + NET_PROT_IGMP,
7157 + NET_PROT_ARP,
7158 + NET_PROT_CAPWAP_DATA,
7159 + NET_PROT_CAPWAP_CTRL,
7160 + NET_PROT_RFC2684,
7161 + NET_PROT_ICMPV6,
7162 + NET_PROT_FCOE,
7163 + NET_PROT_FIP,
7164 + NET_PROT_ISCSI,
7165 + NET_PROT_GTP,
7166 + NET_PROT_USER_DEFINED_L2,
7167 + NET_PROT_USER_DEFINED_L3,
7168 + NET_PROT_USER_DEFINED_L4,
7169 + NET_PROT_USER_DEFINED_L5,
7170 + NET_PROT_USER_DEFINED_SHIM1,
7171 + NET_PROT_USER_DEFINED_SHIM2,
7172 +
7173 + NET_PROT_DUMMY_LAST
7174 +};
7175 +
7176 /**
7177 * struct dpkg_extract - A structure for defining a single extraction
7178 * @type: Determines how the union below is interpreted:
7179 --- a/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
7180 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
7181 @@ -1,34 +1,6 @@
7182 +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
7183 /* Copyright 2013-2016 Freescale Semiconductor Inc.
7184 * Copyright 2016 NXP
7185 - *
7186 - * Redistribution and use in source and binary forms, with or without
7187 - * modification, are permitted provided that the following conditions are met:
7188 - * * Redistributions of source code must retain the above copyright
7189 - * notice, this list of conditions and the following disclaimer.
7190 - * * Redistributions in binary form must reproduce the above copyright
7191 - * notice, this list of conditions and the following disclaimer in the
7192 - * documentation and/or other materials provided with the distribution.
7193 - * * Neither the name of the above-listed copyright holders nor the
7194 - * names of any contributors may be used to endorse or promote products
7195 - * derived from this software without specific prior written permission.
7196 - *
7197 - *
7198 - * ALTERNATIVELY, this software may be distributed under the terms of the
7199 - * GNU General Public License ("GPL") as published by the Free Software
7200 - * Foundation, either version 2 of that License or (at your option) any
7201 - * later version.
7202 - *
7203 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
7204 - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
7205 - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
7206 - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
7207 - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
7208 - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
7209 - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
7210 - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
7211 - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
7212 - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
7213 - * POSSIBILITY OF SUCH DAMAGE.
7214 */
7215 #ifndef _FSL_DPNI_CMD_H
7216 #define _FSL_DPNI_CMD_H
7217 @@ -39,9 +11,11 @@
7218 #define DPNI_VER_MAJOR 7
7219 #define DPNI_VER_MINOR 0
7220 #define DPNI_CMD_BASE_VERSION 1
7221 +#define DPNI_CMD_2ND_VERSION 2
7222 #define DPNI_CMD_ID_OFFSET 4
7223
7224 #define DPNI_CMD(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION)
7225 +#define DPNI_CMD_V2(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_2ND_VERSION)
7226
7227 #define DPNI_CMDID_OPEN DPNI_CMD(0x801)
7228 #define DPNI_CMDID_CLOSE DPNI_CMD(0x800)
7229 @@ -64,16 +38,18 @@
7230 #define DPNI_CMDID_GET_IRQ_STATUS DPNI_CMD(0x016)
7231 #define DPNI_CMDID_CLEAR_IRQ_STATUS DPNI_CMD(0x017)
7232
7233 -#define DPNI_CMDID_SET_POOLS DPNI_CMD(0x200)
7234 +#define DPNI_CMDID_SET_POOLS DPNI_CMD_V2(0x200)
7235 #define DPNI_CMDID_SET_ERRORS_BEHAVIOR DPNI_CMD(0x20B)
7236
7237 #define DPNI_CMDID_GET_QDID DPNI_CMD(0x210)
7238 #define DPNI_CMDID_GET_TX_DATA_OFFSET DPNI_CMD(0x212)
7239 #define DPNI_CMDID_GET_LINK_STATE DPNI_CMD(0x215)
7240 +#define DPNI_CMDID_GET_LINK_STATE_V2 DPNI_CMD_V2(0x215)
7241 #define DPNI_CMDID_SET_MAX_FRAME_LENGTH DPNI_CMD(0x216)
7242 #define DPNI_CMDID_GET_MAX_FRAME_LENGTH DPNI_CMD(0x217)
7243 #define DPNI_CMDID_SET_LINK_CFG DPNI_CMD(0x21A)
7244 -#define DPNI_CMDID_SET_TX_SHAPING DPNI_CMD(0x21B)
7245 +#define DPNI_CMDID_SET_LINK_CFG_V2 DPNI_CMD_V2(0x21A)
7246 +#define DPNI_CMDID_SET_TX_SHAPING DPNI_CMD_V2(0x21B)
7247
7248 #define DPNI_CMDID_SET_MCAST_PROMISC DPNI_CMD(0x220)
7249 #define DPNI_CMDID_GET_MCAST_PROMISC DPNI_CMD(0x221)
7250 @@ -87,11 +63,16 @@
7251
7252 #define DPNI_CMDID_SET_RX_TC_DIST DPNI_CMD(0x235)
7253
7254 +#define DPNI_CMDID_SET_QOS_TBL DPNI_CMD(0x240)
7255 +#define DPNI_CMDID_ADD_QOS_ENT DPNI_CMD(0x241)
7256 +#define DPNI_CMDID_REMOVE_QOS_ENT DPNI_CMD(0x242)
7257 #define DPNI_CMDID_ADD_FS_ENT DPNI_CMD(0x244)
7258 #define DPNI_CMDID_REMOVE_FS_ENT DPNI_CMD(0x245)
7259 #define DPNI_CMDID_CLR_FS_ENT DPNI_CMD(0x246)
7260
7261 -#define DPNI_CMDID_GET_STATISTICS DPNI_CMD(0x25D)
7262 +#define DPNI_CMDID_SET_TX_PRIORITIES DPNI_CMD_V2(0x250)
7263 +#define DPNI_CMDID_GET_STATISTICS DPNI_CMD_V2(0x25D)
7264 +#define DPNI_CMDID_RESET_STATISTICS DPNI_CMD(0x25E)
7265 #define DPNI_CMDID_GET_QUEUE DPNI_CMD(0x25F)
7266 #define DPNI_CMDID_SET_QUEUE DPNI_CMD(0x260)
7267 #define DPNI_CMDID_GET_TAILDROP DPNI_CMD(0x261)
7268 @@ -110,6 +91,9 @@
7269 #define DPNI_CMDID_GET_OFFLOAD DPNI_CMD(0x26B)
7270 #define DPNI_CMDID_SET_OFFLOAD DPNI_CMD(0x26C)
7271
7272 +#define DPNI_CMDID_SET_RX_FS_DIST DPNI_CMD(0x273)
7273 +#define DPNI_CMDID_SET_RX_HASH_DIST DPNI_CMD(0x274)
7274 +
7275 /* Macros for accessing command fields smaller than 1byte */
7276 #define DPNI_MASK(field) \
7277 GENMASK(DPNI_##field##_SHIFT + DPNI_##field##_SIZE - 1, \
7278 @@ -126,13 +110,14 @@ struct dpni_cmd_open {
7279
7280 #define DPNI_BACKUP_POOL(val, order) (((val) & 0x1) << (order))
7281 struct dpni_cmd_set_pools {
7282 - /* cmd word 0 */
7283 u8 num_dpbp;
7284 u8 backup_pool_mask;
7285 __le16 pad;
7286 - /* cmd word 0..4 */
7287 - __le32 dpbp_id[DPNI_MAX_DPBP];
7288 - /* cmd word 4..6 */
7289 + struct {
7290 + __le16 dpbp_id;
7291 + u8 priority_mask;
7292 + u8 pad;
7293 + } pool[DPNI_MAX_DPBP];
7294 __le16 buffer_size[DPNI_MAX_DPBP];
7295 };
7296
7297 @@ -303,6 +288,7 @@ struct dpni_rsp_get_tx_data_offset {
7298
7299 struct dpni_cmd_get_statistics {
7300 u8 page_number;
7301 + u8 param;
7302 };
7303
7304 struct dpni_rsp_get_statistics {
7305 @@ -319,8 +305,22 @@ struct dpni_cmd_set_link_cfg {
7306 __le64 options;
7307 };
7308
7309 +struct dpni_cmd_set_link_cfg_v2 {
7310 + /* cmd word 0 */
7311 + __le64 pad0;
7312 + /* cmd word 1 */
7313 + __le32 rate;
7314 + __le32 pad1;
7315 + /* cmd word 2 */
7316 + __le64 options;
7317 + /* cmd word 3 */
7318 + __le64 advertising;
7319 +};
7320 +
7321 #define DPNI_LINK_STATE_SHIFT 0
7322 #define DPNI_LINK_STATE_SIZE 1
7323 +#define DPNI_STATE_VALID_SHIFT 1
7324 +#define DPNI_STATE_VALID_SIZE 1
7325
7326 struct dpni_rsp_get_link_state {
7327 /* response word 0 */
7328 @@ -335,6 +335,39 @@ struct dpni_rsp_get_link_state {
7329 __le64 options;
7330 };
7331
7332 +struct dpni_rsp_get_link_state_v2 {
7333 + /* response word 0 */
7334 + __le32 pad0;
7335 + /* from LSB: up:1, valid:1 */
7336 + u8 flags;
7337 + u8 pad1[3];
7338 + /* response word 1 */
7339 + __le32 rate;
7340 + __le32 pad2;
7341 + /* response word 2 */
7342 + __le64 options;
7343 + /* cmd word 3 */
7344 + __le64 supported;
7345 + /* cmd word 4 */
7346 + __le64 advertising;
7347 +};
7348 +
7349 +#define DPNI_COUPLED_SHIFT 0
7350 +#define DPNI_COUPLED_SIZE 1
7351 +
7352 +struct dpni_cmd_set_tx_shaping {
7353 + /* cmd word 0 */
7354 + __le16 tx_cr_max_burst_size;
7355 + __le16 tx_er_max_burst_size;
7356 + __le32 pad;
7357 + /* cmd word 1 */
7358 + __le32 tx_cr_rate_limit;
7359 + __le32 tx_er_rate_limit;
7360 + /* cmd word 2 */
7361 + /* from LSB: coupled:1 */
7362 + u8 coupled;
7363 +};
7364 +
7365 struct dpni_cmd_set_max_frame_length {
7366 __le16 max_frame_length;
7367 };
7368 @@ -394,6 +427,24 @@ struct dpni_cmd_clear_mac_filters {
7369 u8 flags;
7370 };
7371
7372 +#define DPNI_SEPARATE_GRP_SHIFT 0
7373 +#define DPNI_SEPARATE_GRP_SIZE 1
7374 +#define DPNI_MODE_1_SHIFT 0
7375 +#define DPNI_MODE_1_SIZE 4
7376 +#define DPNI_MODE_2_SHIFT 4
7377 +#define DPNI_MODE_2_SIZE 4
7378 +
7379 +struct dpni_cmd_set_tx_priorities {
7380 + __le16 flags;
7381 + u8 prio_group_A;
7382 + u8 prio_group_B;
7383 + __le32 pad0;
7384 + u8 modes[4];
7385 + __le32 pad1;
7386 + __le64 pad2;
7387 + __le16 delta_bandwidth[8];
7388 +};
7389 +
7390 #define DPNI_DIST_MODE_SHIFT 0
7391 #define DPNI_DIST_MODE_SIZE 4
7392 #define DPNI_MISS_ACTION_SHIFT 4
7393 @@ -503,6 +554,63 @@ struct dpni_cmd_set_queue {
7394 __le64 user_context;
7395 };
7396
7397 +#define DPNI_DISCARD_ON_MISS_SHIFT 0
7398 +#define DPNI_DISCARD_ON_MISS_SIZE 1
7399 +
7400 +struct dpni_cmd_set_qos_table {
7401 + __le32 pad;
7402 + u8 default_tc;
7403 + /* only the LSB */
7404 + u8 discard_on_miss;
7405 + __le16 pad1[21];
7406 + __le64 key_cfg_iova;
7407 +};
7408 +
7409 +struct dpni_cmd_add_qos_entry {
7410 + __le16 pad;
7411 + u8 tc_id;
7412 + u8 key_size;
7413 + __le16 index;
7414 + __le16 pad2;
7415 + __le64 key_iova;
7416 + __le64 mask_iova;
7417 +};
7418 +
7419 +struct dpni_cmd_remove_qos_entry {
7420 + u8 pad1[3];
7421 + u8 key_size;
7422 + __le32 pad2;
7423 + __le64 key_iova;
7424 + __le64 mask_iova;
7425 +};
7426 +
7427 +struct dpni_cmd_add_fs_entry {
7428 + /* cmd word 0 */
7429 + __le16 options;
7430 + u8 tc_id;
7431 + u8 key_size;
7432 + __le16 index;
7433 + __le16 flow_id;
7434 + /* cmd word 1 */
7435 + __le64 key_iova;
7436 + /* cmd word 2 */
7437 + __le64 mask_iova;
7438 + /* cmd word 3 */
7439 + __le64 flc;
7440 +};
7441 +
7442 +struct dpni_cmd_remove_fs_entry {
7443 + /* cmd word 0 */
7444 + __le16 pad0;
7445 + u8 tc_id;
7446 + u8 key_size;
7447 + __le32 pad1;
7448 + /* cmd word 1 */
7449 + __le64 key_iova;
7450 + /* cmd word 2 */
7451 + __le64 mask_iova;
7452 +};
7453 +
7454 struct dpni_cmd_set_taildrop {
7455 /* cmd word 0 */
7456 u8 congestion_point;
7457 @@ -538,4 +646,79 @@ struct dpni_rsp_get_taildrop {
7458 __le32 threshold;
7459 };
7460
7461 +struct dpni_rsp_get_api_version {
7462 + u16 major;
7463 + u16 minor;
7464 +};
7465 +
7466 +#define DPNI_DEST_TYPE_SHIFT 0
7467 +#define DPNI_DEST_TYPE_SIZE 4
7468 +#define DPNI_CONG_UNITS_SHIFT 4
7469 +#define DPNI_CONG_UNITS_SIZE 2
7470 +
7471 +struct dpni_cmd_set_congestion_notification {
7472 + /* cmd word 0 */
7473 + u8 qtype;
7474 + u8 tc;
7475 + u8 pad[6];
7476 + /* cmd word 1 */
7477 + __le32 dest_id;
7478 + __le16 notification_mode;
7479 + u8 dest_priority;
7480 + /* from LSB: dest_type: 4 units:2 */
7481 + u8 type_units;
7482 + /* cmd word 2 */
7483 + __le64 message_iova;
7484 + /* cmd word 3 */
7485 + __le64 message_ctx;
7486 + /* cmd word 4 */
7487 + __le32 threshold_entry;
7488 + __le32 threshold_exit;
7489 +};
7490 +
7491 +struct dpni_cmd_get_congestion_notification {
7492 + /* cmd word 0 */
7493 + u8 qtype;
7494 + u8 tc;
7495 +};
7496 +
7497 +struct dpni_rsp_get_congestion_notification {
7498 + /* cmd word 0 */
7499 + __le64 pad;
7500 + /* cmd word 1 */
7501 + __le32 dest_id;
7502 + __le16 notification_mode;
7503 + u8 dest_priority;
7504 + /* from LSB: dest_type: 4 units:2 */
7505 + u8 type_units;
7506 + /* cmd word 2 */
7507 + __le64 message_iova;
7508 + /* cmd word 3 */
7509 + __le64 message_ctx;
7510 + /* cmd word 4 */
7511 + __le32 threshold_entry;
7512 + __le32 threshold_exit;
7513 +};
7514 +
7515 +#define DPNI_RX_FS_DIST_ENABLE_SHIFT 0
7516 +#define DPNI_RX_FS_DIST_ENABLE_SIZE 1
7517 +struct dpni_cmd_set_rx_fs_dist {
7518 + __le16 dist_size;
7519 + u8 enable;
7520 + u8 tc;
7521 + __le16 miss_flow_id;
7522 + __le16 pad;
7523 + __le64 key_cfg_iova;
7524 +};
7525 +
7526 +#define DPNI_RX_HASH_DIST_ENABLE_SHIFT 0
7527 +#define DPNI_RX_HASH_DIST_ENABLE_SIZE 1
7528 +struct dpni_cmd_set_rx_hash_dist {
7529 + __le16 dist_size;
7530 + u8 enable;
7531 + u8 tc;
7532 + __le32 pad;
7533 + __le64 key_cfg_iova;
7534 +};
7535 +
7536 #endif /* _FSL_DPNI_CMD_H */
7537 --- a/drivers/staging/fsl-dpaa2/ethernet/dpni.c
7538 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.c
7539 @@ -1,34 +1,6 @@
7540 +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
7541 /* Copyright 2013-2016 Freescale Semiconductor Inc.
7542 * Copyright 2016 NXP
7543 - *
7544 - * Redistribution and use in source and binary forms, with or without
7545 - * modification, are permitted provided that the following conditions are met:
7546 - * * Redistributions of source code must retain the above copyright
7547 - * notice, this list of conditions and the following disclaimer.
7548 - * * Redistributions in binary form must reproduce the above copyright
7549 - * notice, this list of conditions and the following disclaimer in the
7550 - * documentation and/or other materials provided with the distribution.
7551 - * * Neither the name of the above-listed copyright holders nor the
7552 - * names of any contributors may be used to endorse or promote products
7553 - * derived from this software without specific prior written permission.
7554 - *
7555 - *
7556 - * ALTERNATIVELY, this software may be distributed under the terms of the
7557 - * GNU General Public License ("GPL") as published by the Free Software
7558 - * Foundation, either version 2 of that License or (at your option) any
7559 - * later version.
7560 - *
7561 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
7562 - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
7563 - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
7564 - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
7565 - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
7566 - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
7567 - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
7568 - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
7569 - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
7570 - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
7571 - * POSSIBILITY OF SUCH DAMAGE.
7572 */
7573 #include <linux/kernel.h>
7574 #include <linux/errno.h>
7575 @@ -122,7 +94,7 @@ int dpni_open(struct fsl_mc_io *mc_io,
7576 int dpni_id,
7577 u16 *token)
7578 {
7579 - struct mc_command cmd = { 0 };
7580 + struct fsl_mc_command cmd = { 0 };
7581 struct dpni_cmd_open *cmd_params;
7582
7583 int err;
7584 @@ -160,7 +132,7 @@ int dpni_close(struct fsl_mc_io *mc_io,
7585 u32 cmd_flags,
7586 u16 token)
7587 {
7588 - struct mc_command cmd = { 0 };
7589 + struct fsl_mc_command cmd = { 0 };
7590
7591 /* prepare command */
7592 cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE,
7593 @@ -188,7 +160,7 @@ int dpni_set_pools(struct fsl_mc_io *mc_
7594 u16 token,
7595 const struct dpni_pools_cfg *cfg)
7596 {
7597 - struct mc_command cmd = { 0 };
7598 + struct fsl_mc_command cmd = { 0 };
7599 struct dpni_cmd_set_pools *cmd_params;
7600 int i;
7601
7602 @@ -199,7 +171,10 @@ int dpni_set_pools(struct fsl_mc_io *mc_
7603 cmd_params = (struct dpni_cmd_set_pools *)cmd.params;
7604 cmd_params->num_dpbp = cfg->num_dpbp;
7605 for (i = 0; i < DPNI_MAX_DPBP; i++) {
7606 - cmd_params->dpbp_id[i] = cpu_to_le32(cfg->pools[i].dpbp_id);
7607 + cmd_params->pool[i].dpbp_id =
7608 + cpu_to_le16(cfg->pools[i].dpbp_id);
7609 + cmd_params->pool[i].priority_mask =
7610 + cfg->pools[i].priority_mask;
7611 cmd_params->buffer_size[i] =
7612 cpu_to_le16(cfg->pools[i].buffer_size);
7613 cmd_params->backup_pool_mask |=
7614 @@ -222,7 +197,7 @@ int dpni_enable(struct fsl_mc_io *mc_io,
7615 u32 cmd_flags,
7616 u16 token)
7617 {
7618 - struct mc_command cmd = { 0 };
7619 + struct fsl_mc_command cmd = { 0 };
7620
7621 /* prepare command */
7622 cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE,
7623 @@ -245,7 +220,7 @@ int dpni_disable(struct fsl_mc_io *mc_io
7624 u32 cmd_flags,
7625 u16 token)
7626 {
7627 - struct mc_command cmd = { 0 };
7628 + struct fsl_mc_command cmd = { 0 };
7629
7630 /* prepare command */
7631 cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE,
7632 @@ -270,7 +245,7 @@ int dpni_is_enabled(struct fsl_mc_io *mc
7633 u16 token,
7634 int *en)
7635 {
7636 - struct mc_command cmd = { 0 };
7637 + struct fsl_mc_command cmd = { 0 };
7638 struct dpni_rsp_is_enabled *rsp_params;
7639 int err;
7640
7641 @@ -303,7 +278,7 @@ int dpni_reset(struct fsl_mc_io *mc_io,
7642 u32 cmd_flags,
7643 u16 token)
7644 {
7645 - struct mc_command cmd = { 0 };
7646 + struct fsl_mc_command cmd = { 0 };
7647
7648 /* prepare command */
7649 cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET,
7650 @@ -335,7 +310,7 @@ int dpni_set_irq_enable(struct fsl_mc_io
7651 u8 irq_index,
7652 u8 en)
7653 {
7654 - struct mc_command cmd = { 0 };
7655 + struct fsl_mc_command cmd = { 0 };
7656 struct dpni_cmd_set_irq_enable *cmd_params;
7657
7658 /* prepare command */
7659 @@ -366,7 +341,7 @@ int dpni_get_irq_enable(struct fsl_mc_io
7660 u8 irq_index,
7661 u8 *en)
7662 {
7663 - struct mc_command cmd = { 0 };
7664 + struct fsl_mc_command cmd = { 0 };
7665 struct dpni_cmd_get_irq_enable *cmd_params;
7666 struct dpni_rsp_get_irq_enable *rsp_params;
7667
7668 @@ -413,7 +388,7 @@ int dpni_set_irq_mask(struct fsl_mc_io *
7669 u8 irq_index,
7670 u32 mask)
7671 {
7672 - struct mc_command cmd = { 0 };
7673 + struct fsl_mc_command cmd = { 0 };
7674 struct dpni_cmd_set_irq_mask *cmd_params;
7675
7676 /* prepare command */
7677 @@ -447,7 +422,7 @@ int dpni_get_irq_mask(struct fsl_mc_io *
7678 u8 irq_index,
7679 u32 *mask)
7680 {
7681 - struct mc_command cmd = { 0 };
7682 + struct fsl_mc_command cmd = { 0 };
7683 struct dpni_cmd_get_irq_mask *cmd_params;
7684 struct dpni_rsp_get_irq_mask *rsp_params;
7685 int err;
7686 @@ -489,7 +464,7 @@ int dpni_get_irq_status(struct fsl_mc_io
7687 u8 irq_index,
7688 u32 *status)
7689 {
7690 - struct mc_command cmd = { 0 };
7691 + struct fsl_mc_command cmd = { 0 };
7692 struct dpni_cmd_get_irq_status *cmd_params;
7693 struct dpni_rsp_get_irq_status *rsp_params;
7694 int err;
7695 @@ -532,7 +507,7 @@ int dpni_clear_irq_status(struct fsl_mc_
7696 u8 irq_index,
7697 u32 status)
7698 {
7699 - struct mc_command cmd = { 0 };
7700 + struct fsl_mc_command cmd = { 0 };
7701 struct dpni_cmd_clear_irq_status *cmd_params;
7702
7703 /* prepare command */
7704 @@ -561,7 +536,7 @@ int dpni_get_attributes(struct fsl_mc_io
7705 u16 token,
7706 struct dpni_attr *attr)
7707 {
7708 - struct mc_command cmd = { 0 };
7709 + struct fsl_mc_command cmd = { 0 };
7710 struct dpni_rsp_get_attr *rsp_params;
7711
7712 int err;
7713 @@ -609,7 +584,7 @@ int dpni_set_errors_behavior(struct fsl_
7714 u16 token,
7715 struct dpni_error_cfg *cfg)
7716 {
7717 - struct mc_command cmd = { 0 };
7718 + struct fsl_mc_command cmd = { 0 };
7719 struct dpni_cmd_set_errors_behavior *cmd_params;
7720
7721 /* prepare command */
7722 @@ -641,7 +616,7 @@ int dpni_get_buffer_layout(struct fsl_mc
7723 enum dpni_queue_type qtype,
7724 struct dpni_buffer_layout *layout)
7725 {
7726 - struct mc_command cmd = { 0 };
7727 + struct fsl_mc_command cmd = { 0 };
7728 struct dpni_cmd_get_buffer_layout *cmd_params;
7729 struct dpni_rsp_get_buffer_layout *rsp_params;
7730 int err;
7731 @@ -689,7 +664,7 @@ int dpni_set_buffer_layout(struct fsl_mc
7732 enum dpni_queue_type qtype,
7733 const struct dpni_buffer_layout *layout)
7734 {
7735 - struct mc_command cmd = { 0 };
7736 + struct fsl_mc_command cmd = { 0 };
7737 struct dpni_cmd_set_buffer_layout *cmd_params;
7738
7739 /* prepare command */
7740 @@ -731,7 +706,7 @@ int dpni_set_offload(struct fsl_mc_io *m
7741 enum dpni_offload type,
7742 u32 config)
7743 {
7744 - struct mc_command cmd = { 0 };
7745 + struct fsl_mc_command cmd = { 0 };
7746 struct dpni_cmd_set_offload *cmd_params;
7747
7748 cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_OFFLOAD,
7749 @@ -750,7 +725,7 @@ int dpni_get_offload(struct fsl_mc_io *m
7750 enum dpni_offload type,
7751 u32 *config)
7752 {
7753 - struct mc_command cmd = { 0 };
7754 + struct fsl_mc_command cmd = { 0 };
7755 struct dpni_cmd_get_offload *cmd_params;
7756 struct dpni_rsp_get_offload *rsp_params;
7757 int err;
7758 @@ -792,7 +767,7 @@ int dpni_get_qdid(struct fsl_mc_io *mc_i
7759 enum dpni_queue_type qtype,
7760 u16 *qdid)
7761 {
7762 - struct mc_command cmd = { 0 };
7763 + struct fsl_mc_command cmd = { 0 };
7764 struct dpni_cmd_get_qdid *cmd_params;
7765 struct dpni_rsp_get_qdid *rsp_params;
7766 int err;
7767 @@ -830,7 +805,7 @@ int dpni_get_tx_data_offset(struct fsl_m
7768 u16 token,
7769 u16 *data_offset)
7770 {
7771 - struct mc_command cmd = { 0 };
7772 + struct fsl_mc_command cmd = { 0 };
7773 struct dpni_rsp_get_tx_data_offset *rsp_params;
7774 int err;
7775
7776 @@ -865,7 +840,7 @@ int dpni_set_link_cfg(struct fsl_mc_io *
7777 u16 token,
7778 const struct dpni_link_cfg *cfg)
7779 {
7780 - struct mc_command cmd = { 0 };
7781 + struct fsl_mc_command cmd = { 0 };
7782 struct dpni_cmd_set_link_cfg *cmd_params;
7783
7784 /* prepare command */
7785 @@ -881,6 +856,36 @@ int dpni_set_link_cfg(struct fsl_mc_io *
7786 }
7787
7788 /**
7789 + * dpni_set_link_cfg_v2() - set the link configuration.
7790 + * @mc_io: Pointer to MC portal's I/O object
7791 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7792 + * @token: Token of DPNI object
7793 + * @cfg: Link configuration
7794 + *
7795 + * Return: '0' on Success; Error code otherwise.
7796 + */
7797 +int dpni_set_link_cfg_v2(struct fsl_mc_io *mc_io,
7798 + u32 cmd_flags,
7799 + u16 token,
7800 + const struct dpni_link_cfg *cfg)
7801 +{
7802 + struct fsl_mc_command cmd = { 0 };
7803 + struct dpni_cmd_set_link_cfg_v2 *cmd_params;
7804 +
7805 + /* prepare command */
7806 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG_V2,
7807 + cmd_flags,
7808 + token);
7809 + cmd_params = (struct dpni_cmd_set_link_cfg_v2 *)cmd.params;
7810 + cmd_params->rate = cpu_to_le32(cfg->rate);
7811 + cmd_params->options = cpu_to_le64(cfg->options);
7812 + cmd_params->advertising = cpu_to_le64(cfg->advertising);
7813 +
7814 + /* send command to mc*/
7815 + return mc_send_command(mc_io, &cmd);
7816 +}
7817 +
7818 +/**
7819 * dpni_get_link_state() - Return the link state (either up or down)
7820 * @mc_io: Pointer to MC portal's I/O object
7821 * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7822 @@ -894,7 +899,7 @@ int dpni_get_link_state(struct fsl_mc_io
7823 u16 token,
7824 struct dpni_link_state *state)
7825 {
7826 - struct mc_command cmd = { 0 };
7827 + struct fsl_mc_command cmd = { 0 };
7828 struct dpni_rsp_get_link_state *rsp_params;
7829 int err;
7830
7831 @@ -918,6 +923,84 @@ int dpni_get_link_state(struct fsl_mc_io
7832 }
7833
7834 /**
7835 + * dpni_get_link_state_v2() - Return the link state (either up or down)
7836 + * @mc_io: Pointer to MC portal's I/O object
7837 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7838 + * @token: Token of DPNI object
7839 + * @state: Returned link state;
7840 + *
7841 + * Return: '0' on Success; Error code otherwise.
7842 + */
7843 +int dpni_get_link_state_v2(struct fsl_mc_io *mc_io,
7844 + u32 cmd_flags,
7845 + u16 token,
7846 + struct dpni_link_state *state)
7847 +{
7848 + struct fsl_mc_command cmd = { 0 };
7849 + struct dpni_rsp_get_link_state_v2 *rsp_params;
7850 + int err;
7851 +
7852 + /* prepare command */
7853 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_STATE_V2,
7854 + cmd_flags,
7855 + token);
7856 +
7857 + /* send command to mc*/
7858 + err = mc_send_command(mc_io, &cmd);
7859 + if (err)
7860 + return err;
7861 +
7862 + /* retrieve response parameters */
7863 + rsp_params = (struct dpni_rsp_get_link_state_v2 *)cmd.params;
7864 + state->up = dpni_get_field(rsp_params->flags, LINK_STATE);
7865 + state->state_valid = dpni_get_field(rsp_params->flags, STATE_VALID);
7866 + state->rate = le32_to_cpu(rsp_params->rate);
7867 + state->options = le64_to_cpu(rsp_params->options);
7868 + state->supported = le64_to_cpu(rsp_params->supported);
7869 + state->advertising = le64_to_cpu(rsp_params->advertising);
7870 +
7871 + return 0;
7872 +}
7873 +
7874 +/**
7875 + * dpni_set_tx_shaping() - Set the transmit shaping
7876 + * @mc_io: Pointer to MC portal's I/O object
7877 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7878 + * @token: Token of DPNI object
7879 + * @tx_cr_shaper: TX committed rate shaping configuration
7880 + * @tx_er_shaper: TX excess rate shaping configuration
7881 + * @coupled: Committed and excess rate shapers are coupled
7882 + *
7883 + * Return: '0' on Success; Error code otherwise.
7884 + */
7885 +int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
7886 + u32 cmd_flags,
7887 + u16 token,
7888 + const struct dpni_tx_shaping_cfg *tx_cr_shaper,
7889 + const struct dpni_tx_shaping_cfg *tx_er_shaper,
7890 + int coupled)
7891 +{
7892 + struct fsl_mc_command cmd = { 0 };
7893 + struct dpni_cmd_set_tx_shaping *cmd_params;
7894 +
7895 + /* prepare command */
7896 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SHAPING,
7897 + cmd_flags,
7898 + token);
7899 + cmd_params = (struct dpni_cmd_set_tx_shaping *)cmd.params;
7900 + cmd_params->tx_cr_max_burst_size =
7901 + cpu_to_le16(tx_cr_shaper->max_burst_size);
7902 + cmd_params->tx_er_max_burst_size =
7903 + cpu_to_le16(tx_er_shaper->max_burst_size);
7904 + cmd_params->tx_cr_rate_limit = cpu_to_le32(tx_cr_shaper->rate_limit);
7905 + cmd_params->tx_er_rate_limit = cpu_to_le32(tx_er_shaper->rate_limit);
7906 + dpni_set_field(cmd_params->coupled, COUPLED, coupled);
7907 +
7908 + /* send command to mc*/
7909 + return mc_send_command(mc_io, &cmd);
7910 +}
7911 +
7912 +/**
7913 * dpni_set_max_frame_length() - Set the maximum received frame length.
7914 * @mc_io: Pointer to MC portal's I/O object
7915 * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7916 @@ -933,7 +1016,7 @@ int dpni_set_max_frame_length(struct fsl
7917 u16 token,
7918 u16 max_frame_length)
7919 {
7920 - struct mc_command cmd = { 0 };
7921 + struct fsl_mc_command cmd = { 0 };
7922 struct dpni_cmd_set_max_frame_length *cmd_params;
7923
7924 /* prepare command */
7925 @@ -963,7 +1046,7 @@ int dpni_get_max_frame_length(struct fsl
7926 u16 token,
7927 u16 *max_frame_length)
7928 {
7929 - struct mc_command cmd = { 0 };
7930 + struct fsl_mc_command cmd = { 0 };
7931 struct dpni_rsp_get_max_frame_length *rsp_params;
7932 int err;
7933
7934 @@ -998,7 +1081,7 @@ int dpni_set_multicast_promisc(struct fs
7935 u16 token,
7936 int en)
7937 {
7938 - struct mc_command cmd = { 0 };
7939 + struct fsl_mc_command cmd = { 0 };
7940 struct dpni_cmd_set_multicast_promisc *cmd_params;
7941
7942 /* prepare command */
7943 @@ -1026,7 +1109,7 @@ int dpni_get_multicast_promisc(struct fs
7944 u16 token,
7945 int *en)
7946 {
7947 - struct mc_command cmd = { 0 };
7948 + struct fsl_mc_command cmd = { 0 };
7949 struct dpni_rsp_get_multicast_promisc *rsp_params;
7950 int err;
7951
7952 @@ -1061,7 +1144,7 @@ int dpni_set_unicast_promisc(struct fsl_
7953 u16 token,
7954 int en)
7955 {
7956 - struct mc_command cmd = { 0 };
7957 + struct fsl_mc_command cmd = { 0 };
7958 struct dpni_cmd_set_unicast_promisc *cmd_params;
7959
7960 /* prepare command */
7961 @@ -1089,7 +1172,7 @@ int dpni_get_unicast_promisc(struct fsl_
7962 u16 token,
7963 int *en)
7964 {
7965 - struct mc_command cmd = { 0 };
7966 + struct fsl_mc_command cmd = { 0 };
7967 struct dpni_rsp_get_unicast_promisc *rsp_params;
7968 int err;
7969
7970 @@ -1124,7 +1207,7 @@ int dpni_set_primary_mac_addr(struct fsl
7971 u16 token,
7972 const u8 mac_addr[6])
7973 {
7974 - struct mc_command cmd = { 0 };
7975 + struct fsl_mc_command cmd = { 0 };
7976 struct dpni_cmd_set_primary_mac_addr *cmd_params;
7977 int i;
7978
7979 @@ -1154,7 +1237,7 @@ int dpni_get_primary_mac_addr(struct fsl
7980 u16 token,
7981 u8 mac_addr[6])
7982 {
7983 - struct mc_command cmd = { 0 };
7984 + struct fsl_mc_command cmd = { 0 };
7985 struct dpni_rsp_get_primary_mac_addr *rsp_params;
7986 int i, err;
7987
7988 @@ -1193,7 +1276,7 @@ int dpni_get_port_mac_addr(struct fsl_mc
7989 u16 token,
7990 u8 mac_addr[6])
7991 {
7992 - struct mc_command cmd = { 0 };
7993 + struct fsl_mc_command cmd = { 0 };
7994 struct dpni_rsp_get_port_mac_addr *rsp_params;
7995 int i, err;
7996
7997 @@ -1229,7 +1312,7 @@ int dpni_add_mac_addr(struct fsl_mc_io *
7998 u16 token,
7999 const u8 mac_addr[6])
8000 {
8001 - struct mc_command cmd = { 0 };
8002 + struct fsl_mc_command cmd = { 0 };
8003 struct dpni_cmd_add_mac_addr *cmd_params;
8004 int i;
8005
8006 @@ -1259,7 +1342,7 @@ int dpni_remove_mac_addr(struct fsl_mc_i
8007 u16 token,
8008 const u8 mac_addr[6])
8009 {
8010 - struct mc_command cmd = { 0 };
8011 + struct fsl_mc_command cmd = { 0 };
8012 struct dpni_cmd_remove_mac_addr *cmd_params;
8013 int i;
8014
8015 @@ -1293,7 +1376,7 @@ int dpni_clear_mac_filters(struct fsl_mc
8016 int unicast,
8017 int multicast)
8018 {
8019 - struct mc_command cmd = { 0 };
8020 + struct fsl_mc_command cmd = { 0 };
8021 struct dpni_cmd_clear_mac_filters *cmd_params;
8022
8023 /* prepare command */
8024 @@ -1309,6 +1392,55 @@ int dpni_clear_mac_filters(struct fsl_mc
8025 }
8026
8027 /**
8028 + * dpni_set_tx_priorities() - Set transmission TC priority configuration
8029 + * @mc_io: Pointer to MC portal's I/O object
8030 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8031 + * @token: Token of DPNI object
8032 + * @cfg: Transmission selection configuration
8033 + *
8034 + * warning: Allowed only when DPNI is disabled
8035 + *
8036 + * Return: '0' on Success; Error code otherwise.
8037 + */
8038 +int dpni_set_tx_priorities(struct fsl_mc_io *mc_io,
8039 + u32 cmd_flags,
8040 + u16 token,
8041 + const struct dpni_tx_priorities_cfg *cfg)
8042 +{
8043 + struct dpni_cmd_set_tx_priorities *cmd_params;
8044 + struct fsl_mc_command cmd = { 0 };
8045 + int i;
8046 +
8047 + /* prepare command */
8048 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_PRIORITIES,
8049 + cmd_flags,
8050 + token);
8051 + cmd_params = (struct dpni_cmd_set_tx_priorities *)cmd.params;
8052 + dpni_set_field(cmd_params->flags,
8053 + SEPARATE_GRP,
8054 + cfg->separate_groups);
8055 + cmd_params->prio_group_A = cfg->prio_group_A;
8056 + cmd_params->prio_group_B = cfg->prio_group_B;
8057 +
8058 + for (i = 0; i + 1 < DPNI_MAX_TC; i += 2) {
8059 + dpni_set_field(cmd_params->modes[i / 2],
8060 + MODE_1,
8061 + cfg->tc_sched[i].mode);
8062 + dpni_set_field(cmd_params->modes[i / 2],
8063 + MODE_2,
8064 + cfg->tc_sched[i + 1].mode);
8065 + }
8066 +
8067 + for (i = 0; i < DPNI_MAX_TC; i++) {
8068 + cmd_params->delta_bandwidth[i] =
8069 + cpu_to_le16(cfg->tc_sched[i].delta_bandwidth);
8070 + }
8071 +
8072 + /* send command to mc*/
8073 + return mc_send_command(mc_io, &cmd);
8074 +}
8075 +
8076 +/**
8077 * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration
8078 * @mc_io: Pointer to MC portal's I/O object
8079 * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8080 @@ -1327,7 +1459,7 @@ int dpni_set_rx_tc_dist(struct fsl_mc_io
8081 u8 tc_id,
8082 const struct dpni_rx_tc_dist_cfg *cfg)
8083 {
8084 - struct mc_command cmd = { 0 };
8085 + struct fsl_mc_command cmd = { 0 };
8086 struct dpni_cmd_set_rx_tc_dist *cmd_params;
8087
8088 /* prepare command */
8089 @@ -1346,6 +1478,215 @@ int dpni_set_rx_tc_dist(struct fsl_mc_io
8090 return mc_send_command(mc_io, &cmd);
8091 }
8092
8093 +/*
8094 + * dpni_set_qos_table() - Set QoS mapping table
8095 + * @mc_io: Pointer to MC portal's I/O object
8096 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8097 + * @token: Token of DPNI object
8098 + * @cfg: QoS table configuration
8099 + *
8100 + * This function and all QoS-related functions require that
8101 + *'max_tcs > 1' was set at DPNI creation.
8102 + *
8103 + * warning: Before calling this function, call dpkg_prepare_key_cfg() to
8104 + * prepare the key_cfg_iova parameter
8105 + *
8106 + * Return: '0' on Success; Error code otherwise.
8107 + */
8108 +int dpni_set_qos_table(struct fsl_mc_io *mc_io,
8109 + u32 cmd_flags,
8110 + u16 token,
8111 + const struct dpni_qos_tbl_cfg *cfg)
8112 +{
8113 + struct dpni_cmd_set_qos_table *cmd_params;
8114 + struct fsl_mc_command cmd = { 0 };
8115 +
8116 + /* prepare command */
8117 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QOS_TBL,
8118 + cmd_flags,
8119 + token);
8120 + cmd_params = (struct dpni_cmd_set_qos_table *)cmd.params;
8121 + cmd_params->default_tc = cfg->default_tc;
8122 + cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
8123 + dpni_set_field(cmd_params->discard_on_miss,
8124 + ENABLE,
8125 + cfg->discard_on_miss);
8126 +
8127 + /* send command to mc*/
8128 + return mc_send_command(mc_io, &cmd);
8129 +}
8130 +
8131 +/**
8132 + * dpni_add_qos_entry() - Add QoS mapping entry (to select a traffic class)
8133 + * @mc_io: Pointer to MC portal's I/O object
8134 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8135 + * @token: Token of DPNI object
8136 + * @cfg: QoS rule to add
8137 + * @tc_id: Traffic class selection (0-7)
8138 + * @index: Location in the QoS table where to insert the entry.
8139 + * Only relevant if MASKING is enabled for QoS classification on
8140 + * this DPNI, it is ignored for exact match.
8141 + *
8142 + * Return: '0' on Success; Error code otherwise.
8143 + */
8144 +int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
8145 + u32 cmd_flags,
8146 + u16 token,
8147 + const struct dpni_rule_cfg *cfg,
8148 + u8 tc_id,
8149 + u16 index)
8150 +{
8151 + struct dpni_cmd_add_qos_entry *cmd_params;
8152 + struct fsl_mc_command cmd = { 0 };
8153 +
8154 + /* prepare command */
8155 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_QOS_ENT,
8156 + cmd_flags,
8157 + token);
8158 + cmd_params = (struct dpni_cmd_add_qos_entry *)cmd.params;
8159 + cmd_params->tc_id = tc_id;
8160 + cmd_params->key_size = cfg->key_size;
8161 + cmd_params->index = cpu_to_le16(index);
8162 + cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
8163 + cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
8164 +
8165 + /* send command to mc*/
8166 + return mc_send_command(mc_io, &cmd);
8167 +}
8168 +
8169 +/**
8170 + * dpni_remove_qos_entry() - Remove QoS mapping entry
8171 + * @mc_io: Pointer to MC portal's I/O object
8172 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8173 + * @token: Token of DPNI object
8174 + * @cfg: QoS rule to remove
8175 + *
8176 + * Return: '0' on Success; Error code otherwise.
8177 + */
8178 +int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
8179 + u32 cmd_flags,
8180 + u16 token,
8181 + const struct dpni_rule_cfg *cfg)
8182 +{
8183 + struct dpni_cmd_remove_qos_entry *cmd_params;
8184 + struct fsl_mc_command cmd = { 0 };
8185 +
8186 + /* prepare command */
8187 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_QOS_ENT,
8188 + cmd_flags,
8189 + token);
8190 + cmd_params = (struct dpni_cmd_remove_qos_entry *)cmd.params;
8191 + cmd_params->key_size = cfg->key_size;
8192 + cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
8193 + cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
8194 +
8195 + /* send command to mc*/
8196 + return mc_send_command(mc_io, &cmd);
8197 +}
8198 +
8199 +/**
8200 + * dpni_set_congestion_notification() - Set traffic class congestion
8201 + * notification configuration
8202 + * @mc_io: Pointer to MC portal's I/O object
8203 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8204 + * @token: Token of DPNI object
8205 + * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported
8206 + * @tc_id: Traffic class selection (0-7)
8207 + * @cfg: Congestion notification configuration
8208 + *
8209 + * Return: '0' on Success; error code otherwise.
8210 + */
8211 +int dpni_set_congestion_notification(
8212 + struct fsl_mc_io *mc_io,
8213 + u32 cmd_flags,
8214 + u16 token,
8215 + enum dpni_queue_type qtype,
8216 + u8 tc_id,
8217 + const struct dpni_congestion_notification_cfg *cfg)
8218 +{
8219 + struct dpni_cmd_set_congestion_notification *cmd_params;
8220 + struct fsl_mc_command cmd = { 0 };
8221 +
8222 + /* prepare command */
8223 + cmd.header = mc_encode_cmd_header(
8224 + DPNI_CMDID_SET_CONGESTION_NOTIFICATION,
8225 + cmd_flags,
8226 + token);
8227 + cmd_params = (struct dpni_cmd_set_congestion_notification *)cmd.params;
8228 + cmd_params->qtype = qtype;
8229 + cmd_params->tc = tc_id;
8230 + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
8231 + cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
8232 + cmd_params->dest_priority = cfg->dest_cfg.priority;
8233 + dpni_set_field(cmd_params->type_units, DEST_TYPE,
8234 + cfg->dest_cfg.dest_type);
8235 + dpni_set_field(cmd_params->type_units, CONG_UNITS, cfg->units);
8236 + cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
8237 + cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
8238 + cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
8239 + cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
8240 +
8241 + /* send command to mc*/
8242 + return mc_send_command(mc_io, &cmd);
8243 +}
8244 +
8245 +/**
8246 + * dpni_get_congestion_notification() - Get traffic class congestion
8247 + * notification configuration
8248 + * @mc_io: Pointer to MC portal's I/O object
8249 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8250 + * @token: Token of DPNI object
8251 + * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported
8252 + * @tc_id: bits 7-4 contain ceetm channel index (valid only for TX);
8253 + * bits 3-0 contain traffic class.
8254 + * Use macro DPNI_BUILD_CH_TC() to build correct value for
8255 + * tc_id parameter.
8256 + * @cfg: congestion notification configuration
8257 + *
8258 + * Return: '0' on Success; error code otherwise.
8259 + */
8260 +int dpni_get_congestion_notification(
8261 + struct fsl_mc_io *mc_io,
8262 + u32 cmd_flags,
8263 + u16 token,
8264 + enum dpni_queue_type qtype,
8265 + u8 tc_id,
8266 + struct dpni_congestion_notification_cfg *cfg)
8267 +{
8268 + struct dpni_rsp_get_congestion_notification *rsp_params;
8269 + struct dpni_cmd_get_congestion_notification *cmd_params;
8270 + struct fsl_mc_command cmd = { 0 };
8271 + int err;
8272 +
8273 + /* prepare command */
8274 + cmd.header = mc_encode_cmd_header(
8275 + DPNI_CMDID_GET_CONGESTION_NOTIFICATION,
8276 + cmd_flags,
8277 + token);
8278 + cmd_params = (struct dpni_cmd_get_congestion_notification *)cmd.params;
8279 + cmd_params->qtype = qtype;
8280 + cmd_params->tc = tc_id;
8281 +
8282 + /* send command to mc*/
8283 + err = mc_send_command(mc_io, &cmd);
8284 + if (err)
8285 + return err;
8286 +
8287 + rsp_params = (struct dpni_rsp_get_congestion_notification *)cmd.params;
8288 + cfg->units = dpni_get_field(rsp_params->type_units, CONG_UNITS);
8289 + cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
8290 + cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
8291 + cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
8292 + cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
8293 + cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
8294 + cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
8295 + cfg->dest_cfg.priority = rsp_params->dest_priority;
8296 + cfg->dest_cfg.dest_type = dpni_get_field(rsp_params->type_units,
8297 + DEST_TYPE);
8298 +
8299 + return 0;
8300 +}
8301 +
8302 /**
8303 * dpni_set_queue() - Set queue parameters
8304 * @mc_io: Pointer to MC portal's I/O object
8305 @@ -1371,7 +1712,7 @@ int dpni_set_queue(struct fsl_mc_io *mc_
8306 u8 options,
8307 const struct dpni_queue *queue)
8308 {
8309 - struct mc_command cmd = { 0 };
8310 + struct fsl_mc_command cmd = { 0 };
8311 struct dpni_cmd_set_queue *cmd_params;
8312
8313 /* prepare command */
8314 @@ -1419,7 +1760,7 @@ int dpni_get_queue(struct fsl_mc_io *mc_
8315 struct dpni_queue *queue,
8316 struct dpni_queue_id *qid)
8317 {
8318 - struct mc_command cmd = { 0 };
8319 + struct fsl_mc_command cmd = { 0 };
8320 struct dpni_cmd_get_queue *cmd_params;
8321 struct dpni_rsp_get_queue *rsp_params;
8322 int err;
8323 @@ -1463,6 +1804,8 @@ int dpni_get_queue(struct fsl_mc_io *mc_
8324 * @token: Token of DPNI object
8325 * @page: Selects the statistics page to retrieve, see
8326 * DPNI_GET_STATISTICS output. Pages are numbered 0 to 2.
8327 + * @param: Custom parameter for some pages used to select a certain
8328 + * statistic source, for example the TC.
8329 * @stat: Structure containing the statistics
8330 *
8331 * Return: '0' on Success; Error code otherwise.
8332 @@ -1471,9 +1814,10 @@ int dpni_get_statistics(struct fsl_mc_io
8333 u32 cmd_flags,
8334 u16 token,
8335 u8 page,
8336 + u8 param,
8337 union dpni_statistics *stat)
8338 {
8339 - struct mc_command cmd = { 0 };
8340 + struct fsl_mc_command cmd = { 0 };
8341 struct dpni_cmd_get_statistics *cmd_params;
8342 struct dpni_rsp_get_statistics *rsp_params;
8343 int i, err;
8344 @@ -1484,6 +1828,7 @@ int dpni_get_statistics(struct fsl_mc_io
8345 token);
8346 cmd_params = (struct dpni_cmd_get_statistics *)cmd.params;
8347 cmd_params->page_number = page;
8348 + cmd_params->param = param;
8349
8350 /* send command to mc */
8351 err = mc_send_command(mc_io, &cmd);
8352 @@ -1499,6 +1844,29 @@ int dpni_get_statistics(struct fsl_mc_io
8353 }
8354
8355 /**
8356 + * dpni_reset_statistics() - Clears DPNI statistics
8357 + * @mc_io: Pointer to MC portal's I/O object
8358 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8359 + * @token: Token of DPNI object
8360 + *
8361 + * Return: '0' on Success; Error code otherwise.
8362 + */
8363 +int dpni_reset_statistics(struct fsl_mc_io *mc_io,
8364 + u32 cmd_flags,
8365 + u16 token)
8366 +{
8367 + struct fsl_mc_command cmd = { 0 };
8368 +
8369 + /* prepare command */
8370 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET_STATISTICS,
8371 + cmd_flags,
8372 + token);
8373 +
8374 + /* send command to mc*/
8375 + return mc_send_command(mc_io, &cmd);
8376 +}
8377 +
8378 +/**
8379 * dpni_set_taildrop() - Set taildrop per queue or TC
8380 * @mc_io: Pointer to MC portal's I/O object
8381 * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8382 @@ -1506,7 +1874,10 @@ int dpni_get_statistics(struct fsl_mc_io
8383 * @cg_point: Congestion point
8384 * @q_type: Queue type on which the taildrop is configured.
8385 * Only Rx queues are supported for now
8386 - * @tc: Traffic class to apply this taildrop to
8387 + * @tc: bits 7-4 contain ceetm channel index (valid only for TX);
8388 + * bits 3-0 contain traffic class.
8389 + * Use macro DPNI_BUILD_CH_TC() to build correct value for
8390 + * tc parameter.
8391 * @q_index: Index of the queue if the DPNI supports multiple queues for
8392 * traffic distribution. Ignored if CONGESTION_POINT is not 0.
8393 * @taildrop: Taildrop structure
8394 @@ -1522,7 +1893,7 @@ int dpni_set_taildrop(struct fsl_mc_io *
8395 u8 index,
8396 struct dpni_taildrop *taildrop)
8397 {
8398 - struct mc_command cmd = { 0 };
8399 + struct fsl_mc_command cmd = { 0 };
8400 struct dpni_cmd_set_taildrop *cmd_params;
8401
8402 /* prepare command */
8403 @@ -1550,7 +1921,10 @@ int dpni_set_taildrop(struct fsl_mc_io *
8404 * @cg_point: Congestion point
8405 * @q_type: Queue type on which the taildrop is configured.
8406 * Only Rx queues are supported for now
8407 - * @tc: Traffic class to apply this taildrop to
8408 + * @tc: bits 7-4 contain ceetm channel index (valid only for TX);
8409 + * bits 3-0 contain traffic class.
8410 + * Use macro DPNI_BUILD_CH_TC() to build correct value for
8411 + * tc parameter.
8412 * @q_index: Index of the queue if the DPNI supports multiple queues for
8413 * traffic distribution. Ignored if CONGESTION_POINT is not 0.
8414 * @taildrop: Taildrop structure
8415 @@ -1566,7 +1940,7 @@ int dpni_get_taildrop(struct fsl_mc_io *
8416 u8 index,
8417 struct dpni_taildrop *taildrop)
8418 {
8419 - struct mc_command cmd = { 0 };
8420 + struct fsl_mc_command cmd = { 0 };
8421 struct dpni_cmd_get_taildrop *cmd_params;
8422 struct dpni_rsp_get_taildrop *rsp_params;
8423 int err;
8424 @@ -1594,3 +1968,187 @@ int dpni_get_taildrop(struct fsl_mc_io *
8425
8426 return 0;
8427 }
8428 +
8429 +/**
8430 + * dpni_get_api_version() - Get Data Path Network Interface API version
8431 + * @mc_io: Pointer to MC portal's I/O object
8432 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8433 + * @major_ver: Major version of data path network interface API
8434 + * @minor_ver: Minor version of data path network interface API
8435 + *
8436 + * Return: '0' on Success; Error code otherwise.
8437 + */
8438 +int dpni_get_api_version(struct fsl_mc_io *mc_io,
8439 + u32 cmd_flags,
8440 + u16 *major_ver,
8441 + u16 *minor_ver)
8442 +{
8443 + struct dpni_rsp_get_api_version *rsp_params;
8444 + struct fsl_mc_command cmd = { 0 };
8445 + int err;
8446 +
8447 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_API_VERSION,
8448 + cmd_flags, 0);
8449 +
8450 + err = mc_send_command(mc_io, &cmd);
8451 + if (err)
8452 + return err;
8453 +
8454 + rsp_params = (struct dpni_rsp_get_api_version *)cmd.params;
8455 + *major_ver = le16_to_cpu(rsp_params->major);
8456 + *minor_ver = le16_to_cpu(rsp_params->minor);
8457 +
8458 + return 0;
8459 +}
8460 +
8461 +/**
8462 + * dpni_set_rx_fs_dist() - Set Rx traffic class FS distribution
8463 + * @mc_io: Pointer to MC portal's I/O object
8464 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8465 + * @token: Token of DPNI object
8466 + * @cfg: Distribution configuration
8467 + * If the FS is already enabled with a previous call the classification
8468 + * key will be changed but all the table rules are kept. If the
8469 + * existing rules do not match the key the results will not be
8470 + * predictable. It is the user responsibility to keep key integrity.
8471 + * If cfg.enable is set to 1 the command will create a flow steering table
8472 + * and will classify packets according to this table. The packets that
8473 + * miss all the table rules will be classified according to settings
8474 + * made in dpni_set_rx_hash_dist()
8475 + * If cfg.enable is set to 0 the command will clear flow steering table.
8476 + * The packets will be classified according to settings made in
8477 + * dpni_set_rx_hash_dist()
8478 + */
8479 +int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io,
8480 + u32 cmd_flags,
8481 + u16 token,
8482 + const struct dpni_rx_dist_cfg *cfg)
8483 +{
8484 + struct dpni_cmd_set_rx_fs_dist *cmd_params;
8485 + struct fsl_mc_command cmd = { 0 };
8486 +
8487 + /* prepare command */
8488 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_FS_DIST,
8489 + cmd_flags,
8490 + token);
8491 + cmd_params = (struct dpni_cmd_set_rx_fs_dist *)cmd.params;
8492 + cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
8493 + dpni_set_field(cmd_params->enable, RX_FS_DIST_ENABLE, cfg->enable);
8494 + cmd_params->tc = cfg->tc;
8495 + cmd_params->miss_flow_id = cpu_to_le16(cfg->fs_miss_flow_id);
8496 + cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
8497 +
8498 + /* send command to mc*/
8499 + return mc_send_command(mc_io, &cmd);
8500 +}
8501 +
8502 +/**
8503 + * dpni_set_rx_hash_dist() - Set Rx traffic class HASH distribution
8504 + * @mc_io: Pointer to MC portal's I/O object
8505 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8506 + * @token: Token of DPNI object
8507 + * @cfg: Distribution configuration
8508 + * If cfg.enable is set to 1 the packets will be classified using a hash
8509 + * function based on the key received in cfg.key_cfg_iova parameter.
8510 + * If cfg.enable is set to 0 the packets will be sent to the queue configured
8511 + * in dpni_set_rx_dist_default_queue() call
8512 + */
8513 +int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io,
8514 + u32 cmd_flags,
8515 + u16 token,
8516 + const struct dpni_rx_dist_cfg *cfg)
8517 +{
8518 + struct dpni_cmd_set_rx_hash_dist *cmd_params;
8519 + struct fsl_mc_command cmd = { 0 };
8520 +
8521 + /* prepare command */
8522 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_HASH_DIST,
8523 + cmd_flags,
8524 + token);
8525 + cmd_params = (struct dpni_cmd_set_rx_hash_dist *)cmd.params;
8526 + cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
8527 + dpni_set_field(cmd_params->enable, RX_FS_DIST_ENABLE, cfg->enable);
8528 + cmd_params->tc = cfg->tc;
8529 + cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
8530 +
8531 + /* send command to mc*/
8532 + return mc_send_command(mc_io, &cmd);
8533 +}
8534 +
8535 +/**
8536 + * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class
8537 + * (to select a flow ID)
8538 + * @mc_io: Pointer to MC portal's I/O object
8539 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8540 + * @token: Token of DPNI object
8541 + * @tc_id: Traffic class selection (0-7)
8542 + * @index: Location in the QoS table where to insert the entry.
8543 + * Only relevant if MASKING is enabled for QoS
8544 + * classification on this DPNI, it is ignored for exact match.
8545 + * @cfg: Flow steering rule to add
8546 + * @action: Action to be taken as result of a classification hit
8547 + *
8548 + * Return: '0' on Success; Error code otherwise.
8549 + */
8550 +int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
8551 + u32 cmd_flags,
8552 + u16 token,
8553 + u8 tc_id,
8554 + u16 index,
8555 + const struct dpni_rule_cfg *cfg,
8556 + const struct dpni_fs_action_cfg *action)
8557 +{
8558 + struct dpni_cmd_add_fs_entry *cmd_params;
8559 + struct fsl_mc_command cmd = { 0 };
8560 +
8561 + /* prepare command */
8562 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT,
8563 + cmd_flags,
8564 + token);
8565 + cmd_params = (struct dpni_cmd_add_fs_entry *)cmd.params;
8566 + cmd_params->tc_id = tc_id;
8567 + cmd_params->key_size = cfg->key_size;
8568 + cmd_params->index = cpu_to_le16(index);
8569 + cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
8570 + cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
8571 + cmd_params->options = cpu_to_le16(action->options);
8572 + cmd_params->flow_id = cpu_to_le16(action->flow_id);
8573 + cmd_params->flc = cpu_to_le64(action->flc);
8574 +
8575 + /* send command to mc*/
8576 + return mc_send_command(mc_io, &cmd);
8577 +}
8578 +
8579 +/**
8580 + * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific
8581 + * traffic class
8582 + * @mc_io: Pointer to MC portal's I/O object
8583 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8584 + * @token: Token of DPNI object
8585 + * @tc_id: Traffic class selection (0-7)
8586 + * @cfg: Flow steering rule to remove
8587 + *
8588 + * Return: '0' on Success; Error code otherwise.
8589 + */
8590 +int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
8591 + u32 cmd_flags,
8592 + u16 token,
8593 + u8 tc_id,
8594 + const struct dpni_rule_cfg *cfg)
8595 +{
8596 + struct dpni_cmd_remove_fs_entry *cmd_params;
8597 + struct fsl_mc_command cmd = { 0 };
8598 +
8599 + /* prepare command */
8600 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT,
8601 + cmd_flags,
8602 + token);
8603 + cmd_params = (struct dpni_cmd_remove_fs_entry *)cmd.params;
8604 + cmd_params->tc_id = tc_id;
8605 + cmd_params->key_size = cfg->key_size;
8606 + cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
8607 + cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
8608 +
8609 + /* send command to mc*/
8610 + return mc_send_command(mc_io, &cmd);
8611 +}
8612 --- a/drivers/staging/fsl-dpaa2/ethernet/dpni.h
8613 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.h
8614 @@ -1,34 +1,6 @@
8615 +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
8616 /* Copyright 2013-2016 Freescale Semiconductor Inc.
8617 * Copyright 2016 NXP
8618 - *
8619 - * Redistribution and use in source and binary forms, with or without
8620 - * modification, are permitted provided that the following conditions are met:
8621 - * * Redistributions of source code must retain the above copyright
8622 - * notice, this list of conditions and the following disclaimer.
8623 - * * Redistributions in binary form must reproduce the above copyright
8624 - * notice, this list of conditions and the following disclaimer in the
8625 - * documentation and/or other materials provided with the distribution.
8626 - * * Neither the name of the above-listed copyright holders nor the
8627 - * names of any contributors may be used to endorse or promote products
8628 - * derived from this software without specific prior written permission.
8629 - *
8630 - *
8631 - * ALTERNATIVELY, this software may be distributed under the terms of the
8632 - * GNU General Public License ("GPL") as published by the Free Software
8633 - * Foundation, either version 2 of that License or (at your option) any
8634 - * later version.
8635 - *
8636 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
8637 - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
8638 - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
8639 - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
8640 - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
8641 - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
8642 - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
8643 - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
8644 - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
8645 - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
8646 - * POSSIBILITY OF SUCH DAMAGE.
8647 */
8648 #ifndef __FSL_DPNI_H
8649 #define __FSL_DPNI_H
8650 @@ -52,6 +24,14 @@ struct fsl_mc_io;
8651 * Maximum number of buffer pools per DPNI
8652 */
8653 #define DPNI_MAX_DPBP 8
8654 +/**
8655 + * Maximum number of senders
8656 + */
8657 +#define DPNI_MAX_SENDERS 16
8658 +/**
8659 + * Maximum distribution size
8660 + */
8661 +#define DPNI_MAX_DIST_SIZE 16
8662
8663 /**
8664 * All traffic classes considered; see dpni_set_queue()
8665 @@ -123,13 +103,15 @@ struct dpni_pools_cfg {
8666 /**
8667 * struct pools - Buffer pools parameters
8668 * @dpbp_id: DPBP object ID
8669 + * @priority_mask: priorities served by DPBP
8670 * @buffer_size: Buffer size
8671 * @backup_pool: Backup pool
8672 */
8673 struct {
8674 - int dpbp_id;
8675 + u16 dpbp_id;
8676 + u8 priority_mask;
8677 u16 buffer_size;
8678 - int backup_pool;
8679 + u8 backup_pool;
8680 } pools[DPNI_MAX_DPBP];
8681 };
8682
8683 @@ -476,6 +458,24 @@ union dpni_statistics {
8684 u64 egress_confirmed_frames;
8685 } page_2;
8686 /**
8687 + * struct page_3 - Page_3 statistics structure with values for the
8688 + * selected TC
8689 + * @ceetm_dequeue_bytes: Cumulative count of the number of bytes
8690 + * dequeued
8691 + * @ceetm_dequeue_frames: Cumulative count of the number of frames
8692 + * dequeued
8693 + * @ceetm_reject_bytes: Cumulative count of the number of bytes in all
8694 + * frames whose enqueue was rejected
8695 + * @ceetm_reject_frames: Cumulative count of all frame enqueues
8696 + * rejected
8697 + */
8698 + struct {
8699 + u64 ceetm_dequeue_bytes;
8700 + u64 ceetm_dequeue_frames;
8701 + u64 ceetm_reject_bytes;
8702 + u64 ceetm_reject_frames;
8703 + } page_3;
8704 + /**
8705 * struct raw - raw statistics structure
8706 */
8707 struct {
8708 @@ -487,8 +487,13 @@ int dpni_get_statistics(struct fsl_mc_io
8709 u32 cmd_flags,
8710 u16 token,
8711 u8 page,
8712 + u8 param,
8713 union dpni_statistics *stat);
8714
8715 +int dpni_reset_statistics(struct fsl_mc_io *mc_io,
8716 + u32 cmd_flags,
8717 + u16 token);
8718 +
8719 /**
8720 * Enable auto-negotiation
8721 */
8722 @@ -505,6 +510,23 @@ int dpni_get_statistics(struct fsl_mc_io
8723 * Enable a-symmetric pause frames
8724 */
8725 #define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
8726 +/**
8727 + * Enable priority flow control pause frames
8728 + */
8729 +#define DPNI_LINK_OPT_PFC_PAUSE 0x0000000000000010ULL
8730 +/**
8731 + * Advertised link speeds
8732 + */
8733 +#define DPNI_ADVERTISED_10BASET_FULL 0x0000000000000001ULL
8734 +#define DPNI_ADVERTISED_100BASET_FULL 0x0000000000000002ULL
8735 +#define DPNI_ADVERTISED_1000BASET_FULL 0x0000000000000004ULL
8736 +#define DPNI_ADVERTISED_10000BASET_FULL 0x0000000000000010ULL
8737 +#define DPNI_ADVERTISED_2500BASEX_FULL 0x0000000000000020ULL
8738 +
8739 +/**
8740 + * Advertise auto-negotiation enabled
8741 + */
8742 +#define DPNI_ADVERTISED_AUTONEG 0x0000000000000008ULL
8743
8744 /**
8745 * struct - Structure representing DPNI link configuration
8746 @@ -514,6 +536,7 @@ int dpni_get_statistics(struct fsl_mc_io
8747 struct dpni_link_cfg {
8748 u32 rate;
8749 u64 options;
8750 + u64 advertising;
8751 };
8752
8753 int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
8754 @@ -521,6 +544,11 @@ int dpni_set_link_cfg(struct fsl_mc_io
8755 u16 token,
8756 const struct dpni_link_cfg *cfg);
8757
8758 +int dpni_set_link_cfg_v2(struct fsl_mc_io *mc_io,
8759 + u32 cmd_flags,
8760 + u16 token,
8761 + const struct dpni_link_cfg *cfg);
8762 +
8763 /**
8764 * struct dpni_link_state - Structure representing DPNI link state
8765 * @rate: Rate
8766 @@ -530,7 +558,10 @@ int dpni_set_link_cfg(struct fsl_mc_io
8767 struct dpni_link_state {
8768 u32 rate;
8769 u64 options;
8770 + u64 supported;
8771 + u64 advertising;
8772 int up;
8773 + int state_valid;
8774 };
8775
8776 int dpni_get_link_state(struct fsl_mc_io *mc_io,
8777 @@ -538,6 +569,28 @@ int dpni_get_link_state(struct fsl_mc_io
8778 u16 token,
8779 struct dpni_link_state *state);
8780
8781 +int dpni_get_link_state_v2(struct fsl_mc_io *mc_io,
8782 + u32 cmd_flags,
8783 + u16 token,
8784 + struct dpni_link_state *state);
8785 +
8786 +/**
8787 + * struct dpni_tx_shaping - Structure representing DPNI tx shaping configuration
8788 + * @rate_limit: rate in Mbps
8789 + * @max_burst_size: burst size in bytes (up to 64KB)
8790 + */
8791 +struct dpni_tx_shaping_cfg {
8792 + u32 rate_limit;
8793 + u16 max_burst_size;
8794 +};
8795 +
8796 +int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
8797 + u32 cmd_flags,
8798 + u16 token,
8799 + const struct dpni_tx_shaping_cfg *tx_cr_shaper,
8800 + const struct dpni_tx_shaping_cfg *tx_er_shaper,
8801 + int coupled);
8802 +
8803 int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
8804 u32 cmd_flags,
8805 u16 token,
8806 @@ -639,6 +692,70 @@ int dpni_prepare_key_cfg(const struct dp
8807 u8 *key_cfg_buf);
8808
8809 /**
8810 + * struct dpni_qos_tbl_cfg - Structure representing QOS table configuration
8811 + * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
8812 + * key extractions to be used as the QoS criteria by calling
8813 + * dpkg_prepare_key_cfg()
8814 + * @discard_on_miss: Set to '1' to discard frames in case of no match (miss);
8815 + * '0' to use the 'default_tc' in such cases
8816 + * @default_tc: Used in case of no-match and 'discard_on_miss'= 0
8817 + */
8818 +struct dpni_qos_tbl_cfg {
8819 + u64 key_cfg_iova;
8820 + int discard_on_miss;
8821 + u8 default_tc;
8822 +};
8823 +
8824 +int dpni_set_qos_table(struct fsl_mc_io *mc_io,
8825 + u32 cmd_flags,
8826 + u16 token,
8827 + const struct dpni_qos_tbl_cfg *cfg);
8828 +
8829 +/**
8830 + * enum dpni_tx_schedule_mode - DPNI Tx scheduling mode
8831 + * @DPNI_TX_SCHED_STRICT_PRIORITY: strict priority
8832 + * @DPNI_TX_SCHED_WEIGHTED_A: weighted based scheduling in group A
8833 + * @DPNI_TX_SCHED_WEIGHTED_B: weighted based scheduling in group B
8834 + */
8835 +enum dpni_tx_schedule_mode {
8836 + DPNI_TX_SCHED_STRICT_PRIORITY = 0,
8837 + DPNI_TX_SCHED_WEIGHTED_A,
8838 + DPNI_TX_SCHED_WEIGHTED_B,
8839 +};
8840 +
8841 +/**
8842 + * struct dpni_tx_schedule_cfg - Structure representing Tx scheduling conf
8843 + * @mode: Scheduling mode
8844 + * @delta_bandwidth: Bandwidth represented in weights from 100 to 10000;
8845 + * not applicable for 'strict-priority' mode;
8846 + */
8847 +struct dpni_tx_schedule_cfg {
8848 + enum dpni_tx_schedule_mode mode;
8849 + u16 delta_bandwidth;
8850 +};
8851 +
8852 +/**
8853 + * struct dpni_tx_priorities_cfg - Structure representing transmission
8854 + * priorities for DPNI TCs
8855 + * @tc_sched: An array of traffic-classes
8856 + * @prio_group_A: Priority of group A
8857 + * @prio_group_B: Priority of group B
8858 + * @separate_groups: Treat A and B groups as separate
8859 + * @ceetm_ch_idx: ceetm channel index to apply the changes
8860 + */
8861 +struct dpni_tx_priorities_cfg {
8862 + struct dpni_tx_schedule_cfg tc_sched[DPNI_MAX_TC];
8863 + u8 prio_group_A;
8864 + u8 prio_group_B;
8865 + u8 separate_groups;
8866 +};
8867 +
8868 +int dpni_set_tx_priorities(struct fsl_mc_io *mc_io,
8869 + u32 cmd_flags,
8870 + u16 token,
8871 + const struct dpni_tx_priorities_cfg *cfg);
8872 +
8873 +/**
8874 * struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration
8875 * @dist_size: Set the distribution size;
8876 * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96,
8877 @@ -784,6 +901,108 @@ enum dpni_congestion_point {
8878 };
8879
8880 /**
8881 + * struct dpni_dest_cfg - Structure representing DPNI destination parameters
8882 + * @dest_type: Destination type
8883 + * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
8884 + * @priority: Priority selection within the DPIO or DPCON channel; valid
8885 + * values are 0-1 or 0-7, depending on the number of priorities
8886 + * in that channel; not relevant for 'DPNI_DEST_NONE' option
8887 + */
8888 +struct dpni_dest_cfg {
8889 + enum dpni_dest dest_type;
8890 + int dest_id;
8891 + u8 priority;
8892 +};
8893 +
8894 +/* DPNI congestion options */
8895 +
8896 +/**
8897 + * CSCN message is written to message_iova once entering a
8898 + * congestion state (see 'threshold_entry')
8899 + */
8900 +#define DPNI_CONG_OPT_WRITE_MEM_ON_ENTER 0x00000001
8901 +/**
8902 + * CSCN message is written to message_iova once exiting a
8903 + * congestion state (see 'threshold_exit')
8904 + */
8905 +#define DPNI_CONG_OPT_WRITE_MEM_ON_EXIT 0x00000002
8906 +/**
8907 + * CSCN write will attempt to allocate into a cache (coherent write);
8908 + * valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>' is selected
8909 + */
8910 +#define DPNI_CONG_OPT_COHERENT_WRITE 0x00000004
8911 +/**
8912 + * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
8913 + * DPIO/DPCON's WQ channel once entering a congestion state
8914 + * (see 'threshold_entry')
8915 + */
8916 +#define DPNI_CONG_OPT_NOTIFY_DEST_ON_ENTER 0x00000008
8917 +/**
8918 + * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
8919 + * DPIO/DPCON's WQ channel once exiting a congestion state
8920 + * (see 'threshold_exit')
8921 + */
8922 +#define DPNI_CONG_OPT_NOTIFY_DEST_ON_EXIT 0x00000010
8923 +/**
8924 + * if 'dest_cfg.dest_type != DPNI_DEST_NONE' when the CSCN is written to the
8925 + * sw-portal's DQRR, the DQRI interrupt is asserted immediately (if enabled)
8926 + */
8927 +#define DPNI_CONG_OPT_INTR_COALESCING_DISABLED 0x00000020
8928 +/**
8929 + * This congestion will trigger flow control or priority flow control.
8930 + * This will have effect only if flow control is enabled with
8931 + * dpni_set_link_cfg().
8932 + */
8933 +#define DPNI_CONG_OPT_FLOW_CONTROL 0x00000040
8934 +
8935 +/**
8936 + * struct dpni_congestion_notification_cfg - congestion notification
8937 + * configuration
8938 + * @units: Units type
8939 + * @threshold_entry: Above this threshold we enter a congestion state.
8940 + * set it to '0' to disable it
8941 + * @threshold_exit: Below this threshold we exit the congestion state.
8942 + * @message_ctx: The context that will be part of the CSCN message
8943 + * @message_iova: I/O virtual address (must be in DMA-able memory),
8944 + * must be 16B aligned; valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>'
8945 + * is contained in 'options'
8946 + * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
8947 + * @notification_mode: Mask of available options; use 'DPNI_CONG_OPT_<X>' values
8948 + */
8949 +
8950 +struct dpni_congestion_notification_cfg {
8951 + enum dpni_congestion_unit units;
8952 + u32 threshold_entry;
8953 + u32 threshold_exit;
8954 + u64 message_ctx;
8955 + u64 message_iova;
8956 + struct dpni_dest_cfg dest_cfg;
8957 + u16 notification_mode;
8958 +};
8959 +
8960 +/** Compose TC parameter for function dpni_set_congestion_notification()
8961 + * and dpni_get_congestion_notification().
8962 + */
8963 +#define DPNI_BUILD_CH_TC(ceetm_ch_idx, tc) \
8964 + ((((ceetm_ch_idx) & 0x0F) << 4) | ((tc) & 0x0F))
8965 +
8966 +int dpni_set_congestion_notification(
8967 + struct fsl_mc_io *mc_io,
8968 + u32 cmd_flags,
8969 + u16 token,
8970 + enum dpni_queue_type qtype,
8971 + u8 tc_id,
8972 + const struct dpni_congestion_notification_cfg *cfg);
8973 +
8974 +int dpni_get_congestion_notification(
8975 + struct fsl_mc_io *mc_io,
8976 + u32 cmd_flags,
8977 + u16 token,
8978 + enum dpni_queue_type qtype,
8979 + u8 tc_id,
8980 + struct dpni_congestion_notification_cfg *cfg);
8981 +
8982 +/**
8983 * struct dpni_taildrop - Structure representing the taildrop
8984 * @enable: Indicates whether the taildrop is active or not.
8985 * @units: Indicates the unit of THRESHOLD. Queue taildrop only supports
8986 @@ -829,4 +1048,124 @@ struct dpni_rule_cfg {
8987 u8 key_size;
8988 };
8989
8990 +int dpni_get_api_version(struct fsl_mc_io *mc_io,
8991 + u32 cmd_flags,
8992 + u16 *major_ver,
8993 + u16 *minor_ver);
8994 +
8995 +int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
8996 + u32 cmd_flags,
8997 + u16 token,
8998 + const struct dpni_rule_cfg *cfg,
8999 + u8 tc_id,
9000 + u16 index);
9001 +
9002 +int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
9003 + u32 cmd_flags,
9004 + u16 token,
9005 + const struct dpni_rule_cfg *cfg);
9006 +
9007 +int dpni_clear_qos_table(struct fsl_mc_io *mc_io,
9008 + u32 cmd_flags,
9009 + u16 token);
9010 +
9011 +/**
9012 + * Discard matching traffic. If set, this takes precedence over any other
9013 + * configuration and matching traffic is always discarded.
9014 + */
9015 + #define DPNI_FS_OPT_DISCARD 0x1
9016 +
9017 +/**
9018 + * Set FLC value. If set, flc member of struct dpni_fs_action_cfg is used to
9019 + * override the FLC value set per queue.
9020 + * For more details check the Frame Descriptor section in the hardware
9021 + * documentation.
9022 + */
9023 +#define DPNI_FS_OPT_SET_FLC 0x2
9024 +
9025 +/*
9026 + * Indicates whether the 6 lowest significant bits of FLC are used for stash
9027 + * control. If set, the 6 least significant bits in value are interpreted as
9028 + * follows:
9029 + * - bits 0-1: indicates the number of 64 byte units of context that are
9030 + * stashed. FLC value is interpreted as a memory address in this case,
9031 + * excluding the 6 LS bits.
9032 + * - bits 2-3: indicates the number of 64 byte units of frame annotation
9033 + * to be stashed. Annotation is placed at FD[ADDR].
9034 + * - bits 4-5: indicates the number of 64 byte units of frame data to be
9035 + * stashed. Frame data is placed at FD[ADDR] + FD[OFFSET].
9036 + * This flag is ignored if DPNI_FS_OPT_SET_FLC is not specified.
9037 + */
9038 +#define DPNI_FS_OPT_SET_STASH_CONTROL 0x4
9039 +
9040 +/**
9041 + * struct dpni_fs_action_cfg - Action configuration for table look-up
9042 + * @flc: FLC value for traffic matching this rule. Please check the
9043 + * Frame Descriptor section in the hardware documentation for
9044 + * more information.
9045 + * @flow_id: Identifies the Rx queue used for matching traffic. Supported
9046 + * values are in range 0 to num_queue-1.
9047 + * @options: Any combination of DPNI_FS_OPT_ values.
9048 + */
9049 +struct dpni_fs_action_cfg {
9050 + u64 flc;
9051 + u16 flow_id;
9052 + u16 options;
9053 +};
9054 +
9055 +int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
9056 + u32 cmd_flags,
9057 + u16 token,
9058 + u8 tc_id,
9059 + u16 index,
9060 + const struct dpni_rule_cfg *cfg,
9061 + const struct dpni_fs_action_cfg *action);
9062 +
9063 +int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
9064 + u32 cmd_flags,
9065 + u16 token,
9066 + u8 tc_id,
9067 + const struct dpni_rule_cfg *cfg);
9068 +
9069 +/**
9070 + * When used for queue_idx in function dpni_set_rx_dist_default_queue
9071 + * will signal to dpni to drop all unclassified frames
9072 + */
9073 +#define DPNI_FS_MISS_DROP ((uint16_t)-1)
9074 +
9075 +/**
9076 + * struct dpni_rx_dist_cfg - distribution configuration
9077 + * @dist_size: distribution size; supported values: 1,2,3,4,6,7,8,
9078 + * 12,14,16,24,28,32,48,56,64,96,112,128,192,224,256,384,448,
9079 + * 512,768,896,1024
9080 + * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
9081 + * the extractions to be used for the distribution key by calling
9082 + * dpkg_prepare_key_cfg() relevant only when enable!=0 otherwise
9083 + * it can be '0'
9084 + * @enable: enable/disable the distribution.
9085 + * @tc: TC id for which distribution is set
9086 + * @fs_miss_flow_id: when packet misses all rules from flow steering table and
9087 + * hash is disabled it will be put into this queue id; use
9088 + * DPNI_FS_MISS_DROP to drop frames. The value of this field is
9089 + * used only when flow steering distribution is enabled and hash
9090 + * distribution is disabled
9091 + */
9092 +struct dpni_rx_dist_cfg {
9093 + u16 dist_size;
9094 + u64 key_cfg_iova;
9095 + u8 enable;
9096 + u8 tc;
9097 + u16 fs_miss_flow_id;
9098 +};
9099 +
9100 +int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io,
9101 + u32 cmd_flags,
9102 + u16 token,
9103 + const struct dpni_rx_dist_cfg *cfg);
9104 +
9105 +int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io,
9106 + u32 cmd_flags,
9107 + u16 token,
9108 + const struct dpni_rx_dist_cfg *cfg);
9109 +
9110 #endif /* __FSL_DPNI_H */
9111 --- a/drivers/staging/fsl-dpaa2/ethernet/net.h
9112 +++ b/drivers/staging/fsl-dpaa2/ethernet/net.h
9113 @@ -1,33 +1,5 @@
9114 +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
9115 /* Copyright 2013-2015 Freescale Semiconductor Inc.
9116 - *
9117 - * Redistribution and use in source and binary forms, with or without
9118 - * modification, are permitted provided that the following conditions are met:
9119 - * * Redistributions of source code must retain the above copyright
9120 - * notice, this list of conditions and the following disclaimer.
9121 - * * Redistributions in binary form must reproduce the above copyright
9122 - * notice, this list of conditions and the following disclaimer in the
9123 - * documentation and/or other materials provided with the distribution.
9124 - * * Neither the name of the above-listed copyright holders nor the
9125 - * names of any contributors may be used to endorse or promote products
9126 - * derived from this software without specific prior written permission.
9127 - *
9128 - *
9129 - * ALTERNATIVELY, this software may be distributed under the terms of the
9130 - * GNU General Public License ("GPL") as published by the Free Software
9131 - * Foundation, either version 2 of that License or (at your option) any
9132 - * later version.
9133 - *
9134 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
9135 - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
9136 - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
9137 - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
9138 - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
9139 - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
9140 - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
9141 - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
9142 - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
9143 - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
9144 - * POSSIBILITY OF SUCH DAMAGE.
9145 */
9146 #ifndef __FSL_NET_H
9147 #define __FSL_NET_H