ccc56d115e07a2eafc74bd26739fe70c2ba880d6
[openwrt/staging/stintel.git] / target / linux / ipq40xx / patches-6.1 / 700-net-ipqess-introduce-the-Qualcomm-IPQESS-driver.patch
1 From 76e25c1f46456416ba5358be8a0677f1ab8196b6 Mon Sep 17 00:00:00 2001
2 From: Maxime Chevallier <maxime.chevallier@bootlin.com>
3 Date: Fri, 4 Nov 2022 18:41:48 +0100
4 Subject: [PATCH] net: ipqess: introduce the Qualcomm IPQESS driver
5
6 The Qualcomm IPQESS controller is a simple 1G Ethernet controller found
7 on the IPQ4019 chip. This controller has some specificities, in that the
8 IPQ4019 platform that includes that controller also has an internal
9 switch, based on the QCA8K IP.
10
11 It is connected to that switch through an internal link, and doesn't
12 expose directly any external interface, hence it only supports the
13 PHY_INTERFACE_MODE_INTERNAL for now.
14
15 It has 16 RX and TX queues, with a very basic RSS fanout configured at
16 init time.
17
18 Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
19 ---
20 MAINTAINERS | 7 +
21 drivers/net/ethernet/qualcomm/Kconfig | 11 +
22 drivers/net/ethernet/qualcomm/Makefile | 2 +
23 drivers/net/ethernet/qualcomm/ipqess/Makefile | 8 +
24 drivers/net/ethernet/qualcomm/ipqess/ipqess.c | 1246 +++++++++++++++++
25 drivers/net/ethernet/qualcomm/ipqess/ipqess.h | 518 +++++++
26 .../ethernet/qualcomm/ipqess/ipqess_ethtool.c | 164 +++
27 7 files changed, 1956 insertions(+)
28 create mode 100644 drivers/net/ethernet/qualcomm/ipqess/Makefile
29 create mode 100644 drivers/net/ethernet/qualcomm/ipqess/ipqess.c
30 create mode 100644 drivers/net/ethernet/qualcomm/ipqess/ipqess.h
31 create mode 100644 drivers/net/ethernet/qualcomm/ipqess/ipqess_ethtool.c
32
33 --- a/MAINTAINERS
34 +++ b/MAINTAINERS
35 @@ -17064,6 +17064,13 @@ L: netdev@vger.kernel.org
36 S: Maintained
37 F: drivers/net/ethernet/qualcomm/emac/
38
39 +QUALCOMM IPQESS ETHERNET DRIVER
40 +M: Maxime Chevallier <maxime.chevallier@bootlin.com>
41 +L: netdev@vger.kernel.org
42 +S: Maintained
43 +F: Documentation/devicetree/bindings/net/qcom,ipq4019-ess-edma.yaml
44 +F: drivers/net/ethernet/qualcomm/ipqess/
45 +
46 QUALCOMM ETHQOS ETHERNET DRIVER
47 M: Vinod Koul <vkoul@kernel.org>
48 R: Bhupesh Sharma <bhupesh.sharma@linaro.org>
49 --- a/drivers/net/ethernet/qualcomm/Kconfig
50 +++ b/drivers/net/ethernet/qualcomm/Kconfig
51 @@ -60,6 +60,17 @@ config QCOM_EMAC
52 low power, Receive-Side Scaling (RSS), and IEEE 1588-2008
53 Precision Clock Synchronization Protocol.
54
55 +config QCOM_IPQ4019_ESS_EDMA
56 + tristate "Qualcomm Atheros IPQ4019 ESS EDMA support"
57 + depends on (OF && ARCH_QCOM) || COMPILE_TEST
58 + select PHYLINK
59 + help
60 + This driver supports the Qualcomm Atheros IPQ40xx built-in
61 + ESS EDMA ethernet controller.
62 +
63 + To compile this driver as a module, choose M here: the
64 + module will be called ipqess.
65 +
66 source "drivers/net/ethernet/qualcomm/rmnet/Kconfig"
67
68 endif # NET_VENDOR_QUALCOMM
69 --- a/drivers/net/ethernet/qualcomm/Makefile
70 +++ b/drivers/net/ethernet/qualcomm/Makefile
71 @@ -11,4 +11,6 @@ qcauart-objs := qca_uart.o
72
73 obj-y += emac/
74
75 +obj-$(CONFIG_QCOM_IPQ4019_ESS_EDMA) += ipqess/
76 +
77 obj-$(CONFIG_RMNET) += rmnet/
78 --- /dev/null
79 +++ b/drivers/net/ethernet/qualcomm/ipqess/Makefile
80 @@ -0,0 +1,8 @@
81 +# SPDX-License-Identifier: GPL-2.0-only
82 +#
83 +# Makefile for the IPQ ESS driver
84 +#
85 +
86 +obj-$(CONFIG_QCOM_IPQ4019_ESS_EDMA) += ipq_ess.o
87 +
88 +ipq_ess-objs := ipqess.o ipqess_ethtool.o
89 --- /dev/null
90 +++ b/drivers/net/ethernet/qualcomm/ipqess/ipqess.c
91 @@ -0,0 +1,1246 @@
92 +// SPDX-License-Identifier: GPL-2.0 OR ISC
93 +/* Copyright (c) 2014 - 2017, The Linux Foundation. All rights reserved.
94 + * Copyright (c) 2017 - 2018, John Crispin <john@phrozen.org>
95 + * Copyright (c) 2018 - 2019, Christian Lamparter <chunkeey@gmail.com>
96 + * Copyright (c) 2020 - 2021, Gabor Juhos <j4g8y7@gmail.com>
97 + * Copyright (c) 2021 - 2022, Maxime Chevallier <maxime.chevallier@bootlin.com>
98 + *
99 + */
100 +
101 +#include <linux/bitfield.h>
102 +#include <linux/clk.h>
103 +#include <linux/if_vlan.h>
104 +#include <linux/interrupt.h>
105 +#include <linux/module.h>
106 +#include <linux/of.h>
107 +#include <linux/of_device.h>
108 +#include <linux/of_mdio.h>
109 +#include <linux/of_net.h>
110 +#include <linux/phylink.h>
111 +#include <linux/platform_device.h>
112 +#include <linux/reset.h>
113 +#include <linux/skbuff.h>
114 +#include <linux/vmalloc.h>
115 +#include <net/checksum.h>
116 +#include <net/ip6_checksum.h>
117 +
118 +#include "ipqess.h"
119 +
120 +#define IPQESS_RRD_SIZE 16
121 +#define IPQESS_NEXT_IDX(X, Y) (((X) + 1) & ((Y) - 1))
122 +#define IPQESS_TX_DMA_BUF_LEN 0x3fff
123 +
124 +static void ipqess_w32(struct ipqess *ess, u32 reg, u32 val)
125 +{
126 + writel(val, ess->hw_addr + reg);
127 +}
128 +
129 +static u32 ipqess_r32(struct ipqess *ess, u16 reg)
130 +{
131 + return readl(ess->hw_addr + reg);
132 +}
133 +
134 +static void ipqess_m32(struct ipqess *ess, u32 mask, u32 val, u16 reg)
135 +{
136 + u32 _val = ipqess_r32(ess, reg);
137 +
138 + _val &= ~mask;
139 + _val |= val;
140 +
141 + ipqess_w32(ess, reg, _val);
142 +}
143 +
144 +void ipqess_update_hw_stats(struct ipqess *ess)
145 +{
146 + u32 *p;
147 + u32 stat;
148 + int i;
149 +
150 + lockdep_assert_held(&ess->stats_lock);
151 +
152 + p = (u32 *)&ess->ipqess_stats;
153 + for (i = 0; i < IPQESS_MAX_TX_QUEUE; i++) {
154 + stat = ipqess_r32(ess, IPQESS_REG_TX_STAT_PKT_Q(i));
155 + *p += stat;
156 + p++;
157 + }
158 +
159 + for (i = 0; i < IPQESS_MAX_TX_QUEUE; i++) {
160 + stat = ipqess_r32(ess, IPQESS_REG_TX_STAT_BYTE_Q(i));
161 + *p += stat;
162 + p++;
163 + }
164 +
165 + for (i = 0; i < IPQESS_MAX_RX_QUEUE; i++) {
166 + stat = ipqess_r32(ess, IPQESS_REG_RX_STAT_PKT_Q(i));
167 + *p += stat;
168 + p++;
169 + }
170 +
171 + for (i = 0; i < IPQESS_MAX_RX_QUEUE; i++) {
172 + stat = ipqess_r32(ess, IPQESS_REG_RX_STAT_BYTE_Q(i));
173 + *p += stat;
174 + p++;
175 + }
176 +}
177 +
178 +static int ipqess_tx_ring_alloc(struct ipqess *ess)
179 +{
180 + struct device *dev = &ess->pdev->dev;
181 + int i;
182 +
183 + for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
184 + struct ipqess_tx_ring *tx_ring = &ess->tx_ring[i];
185 + size_t size;
186 + u32 idx;
187 +
188 + tx_ring->ess = ess;
189 + tx_ring->ring_id = i;
190 + tx_ring->idx = i * 4;
191 + tx_ring->count = IPQESS_TX_RING_SIZE;
192 + tx_ring->nq = netdev_get_tx_queue(ess->netdev, i);
193 +
194 + size = sizeof(struct ipqess_buf) * IPQESS_TX_RING_SIZE;
195 + tx_ring->buf = devm_kzalloc(dev, size, GFP_KERNEL);
196 + if (!tx_ring->buf)
197 + return -ENOMEM;
198 +
199 + size = sizeof(struct ipqess_tx_desc) * IPQESS_TX_RING_SIZE;
200 + tx_ring->hw_desc = dmam_alloc_coherent(dev, size, &tx_ring->dma,
201 + GFP_KERNEL);
202 + if (!tx_ring->hw_desc)
203 + return -ENOMEM;
204 +
205 + ipqess_w32(ess, IPQESS_REG_TPD_BASE_ADDR_Q(tx_ring->idx),
206 + (u32)tx_ring->dma);
207 +
208 + idx = ipqess_r32(ess, IPQESS_REG_TPD_IDX_Q(tx_ring->idx));
209 + idx >>= IPQESS_TPD_CONS_IDX_SHIFT; /* need u32 here */
210 + idx &= 0xffff;
211 + tx_ring->head = idx;
212 + tx_ring->tail = idx;
213 +
214 + ipqess_m32(ess, IPQESS_TPD_PROD_IDX_MASK << IPQESS_TPD_PROD_IDX_SHIFT,
215 + idx, IPQESS_REG_TPD_IDX_Q(tx_ring->idx));
216 + ipqess_w32(ess, IPQESS_REG_TX_SW_CONS_IDX_Q(tx_ring->idx), idx);
217 + ipqess_w32(ess, IPQESS_REG_TPD_RING_SIZE, IPQESS_TX_RING_SIZE);
218 + }
219 +
220 + return 0;
221 +}
222 +
223 +static int ipqess_tx_unmap_and_free(struct device *dev, struct ipqess_buf *buf)
224 +{
225 + int len = 0;
226 +
227 + if (buf->flags & IPQESS_DESC_SINGLE)
228 + dma_unmap_single(dev, buf->dma, buf->length, DMA_TO_DEVICE);
229 + else if (buf->flags & IPQESS_DESC_PAGE)
230 + dma_unmap_page(dev, buf->dma, buf->length, DMA_TO_DEVICE);
231 +
232 + if (buf->flags & IPQESS_DESC_LAST) {
233 + len = buf->skb->len;
234 + dev_kfree_skb_any(buf->skb);
235 + }
236 +
237 + buf->flags = 0;
238 +
239 + return len;
240 +}
241 +
242 +static void ipqess_tx_ring_free(struct ipqess *ess)
243 +{
244 + int i;
245 +
246 + for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
247 + int j;
248 +
249 + if (ess->tx_ring[i].hw_desc)
250 + continue;
251 +
252 + for (j = 0; j < IPQESS_TX_RING_SIZE; j++) {
253 + struct ipqess_buf *buf = &ess->tx_ring[i].buf[j];
254 +
255 + ipqess_tx_unmap_and_free(&ess->pdev->dev, buf);
256 + }
257 +
258 + ess->tx_ring[i].buf = NULL;
259 + }
260 +}
261 +
262 +static int ipqess_rx_buf_prepare(struct ipqess_buf *buf,
263 + struct ipqess_rx_ring *rx_ring)
264 +{
265 + memset(buf->skb->data, 0, sizeof(struct ipqess_rx_desc));
266 +
267 + buf->dma = dma_map_single(rx_ring->ppdev, buf->skb->data,
268 + IPQESS_RX_HEAD_BUFF_SIZE, DMA_FROM_DEVICE);
269 + if (dma_mapping_error(rx_ring->ppdev, buf->dma)) {
270 + dev_kfree_skb_any(buf->skb);
271 + buf->skb = NULL;
272 + return -EFAULT;
273 + }
274 +
275 + buf->length = IPQESS_RX_HEAD_BUFF_SIZE;
276 + rx_ring->hw_desc[rx_ring->head] = (struct ipqess_rx_desc *)buf->dma;
277 + rx_ring->head = (rx_ring->head + 1) % IPQESS_RX_RING_SIZE;
278 +
279 + ipqess_m32(rx_ring->ess, IPQESS_RFD_PROD_IDX_BITS,
280 + (rx_ring->head + IPQESS_RX_RING_SIZE - 1) % IPQESS_RX_RING_SIZE,
281 + IPQESS_REG_RFD_IDX_Q(rx_ring->idx));
282 +
283 + return 0;
284 +}
285 +
286 +/* locking is handled by the caller */
287 +static int ipqess_rx_buf_alloc_napi(struct ipqess_rx_ring *rx_ring)
288 +{
289 + struct ipqess_buf *buf = &rx_ring->buf[rx_ring->head];
290 +
291 + buf->skb = napi_alloc_skb(&rx_ring->napi_rx, IPQESS_RX_HEAD_BUFF_SIZE);
292 + if (!buf->skb)
293 + return -ENOMEM;
294 +
295 + return ipqess_rx_buf_prepare(buf, rx_ring);
296 +}
297 +
298 +static int ipqess_rx_buf_alloc(struct ipqess_rx_ring *rx_ring)
299 +{
300 + struct ipqess_buf *buf = &rx_ring->buf[rx_ring->head];
301 +
302 + buf->skb = netdev_alloc_skb_ip_align(rx_ring->ess->netdev,
303 + IPQESS_RX_HEAD_BUFF_SIZE);
304 +
305 + if (!buf->skb)
306 + return -ENOMEM;
307 +
308 + return ipqess_rx_buf_prepare(buf, rx_ring);
309 +}
310 +
311 +static void ipqess_refill_work(struct work_struct *work)
312 +{
313 + struct ipqess_rx_ring_refill *rx_refill = container_of(work,
314 + struct ipqess_rx_ring_refill, refill_work);
315 + struct ipqess_rx_ring *rx_ring = rx_refill->rx_ring;
316 + int refill = 0;
317 +
318 + /* don't let this loop by accident. */
319 + while (atomic_dec_and_test(&rx_ring->refill_count)) {
320 + napi_disable(&rx_ring->napi_rx);
321 + if (ipqess_rx_buf_alloc(rx_ring)) {
322 + refill++;
323 + dev_dbg(rx_ring->ppdev,
324 + "Not all buffers were reallocated");
325 + }
326 + napi_enable(&rx_ring->napi_rx);
327 + }
328 +
329 + if (atomic_add_return(refill, &rx_ring->refill_count))
330 + schedule_work(&rx_refill->refill_work);
331 +}
332 +
333 +static int ipqess_rx_ring_alloc(struct ipqess *ess)
334 +{
335 + int i;
336 +
337 + for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
338 + int j;
339 +
340 + ess->rx_ring[i].ess = ess;
341 + ess->rx_ring[i].ppdev = &ess->pdev->dev;
342 + ess->rx_ring[i].ring_id = i;
343 + ess->rx_ring[i].idx = i * 2;
344 +
345 + ess->rx_ring[i].buf = devm_kzalloc(&ess->pdev->dev,
346 + sizeof(struct ipqess_buf) * IPQESS_RX_RING_SIZE,
347 + GFP_KERNEL);
348 +
349 + if (!ess->rx_ring[i].buf)
350 + return -ENOMEM;
351 +
352 + ess->rx_ring[i].hw_desc =
353 + dmam_alloc_coherent(&ess->pdev->dev,
354 + sizeof(struct ipqess_rx_desc) * IPQESS_RX_RING_SIZE,
355 + &ess->rx_ring[i].dma, GFP_KERNEL);
356 +
357 + if (!ess->rx_ring[i].hw_desc)
358 + return -ENOMEM;
359 +
360 + for (j = 0; j < IPQESS_RX_RING_SIZE; j++)
361 + if (ipqess_rx_buf_alloc(&ess->rx_ring[i]) < 0)
362 + return -ENOMEM;
363 +
364 + ess->rx_refill[i].rx_ring = &ess->rx_ring[i];
365 + INIT_WORK(&ess->rx_refill[i].refill_work, ipqess_refill_work);
366 +
367 + ipqess_w32(ess, IPQESS_REG_RFD_BASE_ADDR_Q(ess->rx_ring[i].idx),
368 + (u32)(ess->rx_ring[i].dma));
369 + }
370 +
371 + ipqess_w32(ess, IPQESS_REG_RX_DESC0,
372 + (IPQESS_RX_HEAD_BUFF_SIZE << IPQESS_RX_BUF_SIZE_SHIFT) |
373 + (IPQESS_RX_RING_SIZE << IPQESS_RFD_RING_SIZE_SHIFT));
374 +
375 + return 0;
376 +}
377 +
378 +static void ipqess_rx_ring_free(struct ipqess *ess)
379 +{
380 + int i;
381 +
382 + for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
383 + int j;
384 +
385 + cancel_work_sync(&ess->rx_refill[i].refill_work);
386 + atomic_set(&ess->rx_ring[i].refill_count, 0);
387 +
388 + for (j = 0; j < IPQESS_RX_RING_SIZE; j++) {
389 + dma_unmap_single(&ess->pdev->dev,
390 + ess->rx_ring[i].buf[j].dma,
391 + ess->rx_ring[i].buf[j].length,
392 + DMA_FROM_DEVICE);
393 + dev_kfree_skb_any(ess->rx_ring[i].buf[j].skb);
394 + }
395 + }
396 +}
397 +
398 +static struct net_device_stats *ipqess_get_stats(struct net_device *netdev)
399 +{
400 + struct ipqess *ess = netdev_priv(netdev);
401 +
402 + spin_lock(&ess->stats_lock);
403 + ipqess_update_hw_stats(ess);
404 + spin_unlock(&ess->stats_lock);
405 +
406 + return &ess->stats;
407 +}
408 +
409 +static int ipqess_rx_poll(struct ipqess_rx_ring *rx_ring, int budget)
410 +{
411 + u32 length = 0, num_desc, tail, rx_ring_tail;
412 + int done = 0;
413 +
414 + rx_ring_tail = rx_ring->tail;
415 +
416 + tail = ipqess_r32(rx_ring->ess, IPQESS_REG_RFD_IDX_Q(rx_ring->idx));
417 + tail >>= IPQESS_RFD_CONS_IDX_SHIFT;
418 + tail &= IPQESS_RFD_CONS_IDX_MASK;
419 +
420 + while (done < budget) {
421 + struct ipqess_rx_desc *rd;
422 + struct sk_buff *skb;
423 +
424 + if (rx_ring_tail == tail)
425 + break;
426 +
427 + dma_unmap_single(rx_ring->ppdev,
428 + rx_ring->buf[rx_ring_tail].dma,
429 + rx_ring->buf[rx_ring_tail].length,
430 + DMA_FROM_DEVICE);
431 +
432 + skb = xchg(&rx_ring->buf[rx_ring_tail].skb, NULL);
433 + rd = (struct ipqess_rx_desc *)skb->data;
434 + rx_ring_tail = IPQESS_NEXT_IDX(rx_ring_tail, IPQESS_RX_RING_SIZE);
435 +
436 + /* Check if RRD is valid */
437 + if (!(rd->rrd7 & cpu_to_le16(IPQESS_RRD_DESC_VALID))) {
438 + num_desc = 1;
439 + dev_kfree_skb_any(skb);
440 + goto skip;
441 + }
442 +
443 + num_desc = le16_to_cpu(rd->rrd1) & IPQESS_RRD_NUM_RFD_MASK;
444 + length = le16_to_cpu(rd->rrd6) & IPQESS_RRD_PKT_SIZE_MASK;
445 +
446 + skb_reserve(skb, IPQESS_RRD_SIZE);
447 + if (num_desc > 1) {
448 + struct sk_buff *skb_prev = NULL;
449 + int size_remaining;
450 + int i;
451 +
452 + skb->data_len = 0;
453 + skb->tail += (IPQESS_RX_HEAD_BUFF_SIZE - IPQESS_RRD_SIZE);
454 + skb->len = length;
455 + skb->truesize = length;
456 + size_remaining = length - (IPQESS_RX_HEAD_BUFF_SIZE - IPQESS_RRD_SIZE);
457 +
458 + for (i = 1; i < num_desc; i++) {
459 + struct sk_buff *skb_temp = rx_ring->buf[rx_ring_tail].skb;
460 +
461 + dma_unmap_single(rx_ring->ppdev,
462 + rx_ring->buf[rx_ring_tail].dma,
463 + rx_ring->buf[rx_ring_tail].length,
464 + DMA_FROM_DEVICE);
465 +
466 + skb_put(skb_temp, min(size_remaining, IPQESS_RX_HEAD_BUFF_SIZE));
467 + if (skb_prev)
468 + skb_prev->next = rx_ring->buf[rx_ring_tail].skb;
469 + else
470 + skb_shinfo(skb)->frag_list = rx_ring->buf[rx_ring_tail].skb;
471 + skb_prev = rx_ring->buf[rx_ring_tail].skb;
472 + rx_ring->buf[rx_ring_tail].skb->next = NULL;
473 +
474 + skb->data_len += rx_ring->buf[rx_ring_tail].skb->len;
475 + size_remaining -= rx_ring->buf[rx_ring_tail].skb->len;
476 +
477 + rx_ring_tail = IPQESS_NEXT_IDX(rx_ring_tail, IPQESS_RX_RING_SIZE);
478 + }
479 +
480 + } else {
481 + skb_put(skb, length);
482 + }
483 +
484 + skb->dev = rx_ring->ess->netdev;
485 + skb->protocol = eth_type_trans(skb, rx_ring->ess->netdev);
486 + skb_record_rx_queue(skb, rx_ring->ring_id);
487 +
488 + if (rd->rrd6 & cpu_to_le16(IPQESS_RRD_CSUM_FAIL_MASK))
489 + skb_checksum_none_assert(skb);
490 + else
491 + skb->ip_summed = CHECKSUM_UNNECESSARY;
492 +
493 + if (rd->rrd7 & cpu_to_le16(IPQESS_RRD_CVLAN))
494 + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
495 + le16_to_cpu(rd->rrd4));
496 + else if (rd->rrd1 & cpu_to_le16(IPQESS_RRD_SVLAN))
497 + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD),
498 + le16_to_cpu(rd->rrd4));
499 +
500 + napi_gro_receive(&rx_ring->napi_rx, skb);
501 +
502 + rx_ring->ess->stats.rx_packets++;
503 + rx_ring->ess->stats.rx_bytes += length;
504 +
505 + done++;
506 +skip:
507 +
508 + num_desc += atomic_xchg(&rx_ring->refill_count, 0);
509 + while (num_desc) {
510 + if (ipqess_rx_buf_alloc_napi(rx_ring)) {
511 + num_desc = atomic_add_return(num_desc,
512 + &rx_ring->refill_count);
513 + if (num_desc >= DIV_ROUND_UP(IPQESS_RX_RING_SIZE * 4, 7))
514 + schedule_work(&rx_ring->ess->rx_refill[rx_ring->ring_id].refill_work);
515 + break;
516 + }
517 + num_desc--;
518 + }
519 + }
520 +
521 + ipqess_w32(rx_ring->ess, IPQESS_REG_RX_SW_CONS_IDX_Q(rx_ring->idx),
522 + rx_ring_tail);
523 + rx_ring->tail = rx_ring_tail;
524 +
525 + return done;
526 +}
527 +
528 +static int ipqess_tx_complete(struct ipqess_tx_ring *tx_ring, int budget)
529 +{
530 + int total = 0, ret;
531 + int done = 0;
532 + u32 tail;
533 +
534 + tail = ipqess_r32(tx_ring->ess, IPQESS_REG_TPD_IDX_Q(tx_ring->idx));
535 + tail >>= IPQESS_TPD_CONS_IDX_SHIFT;
536 + tail &= IPQESS_TPD_CONS_IDX_MASK;
537 +
538 + do {
539 + ret = ipqess_tx_unmap_and_free(&tx_ring->ess->pdev->dev,
540 + &tx_ring->buf[tx_ring->tail]);
541 + tx_ring->tail = IPQESS_NEXT_IDX(tx_ring->tail, tx_ring->count);
542 +
543 + total += ret;
544 + } while ((++done < budget) && (tx_ring->tail != tail));
545 +
546 + ipqess_w32(tx_ring->ess, IPQESS_REG_TX_SW_CONS_IDX_Q(tx_ring->idx),
547 + tx_ring->tail);
548 +
549 + if (netif_tx_queue_stopped(tx_ring->nq)) {
550 + netdev_dbg(tx_ring->ess->netdev, "waking up tx queue %d\n",
551 + tx_ring->idx);
552 + netif_tx_wake_queue(tx_ring->nq);
553 + }
554 +
555 + netdev_tx_completed_queue(tx_ring->nq, done, total);
556 +
557 + return done;
558 +}
559 +
560 +static int ipqess_tx_napi(struct napi_struct *napi, int budget)
561 +{
562 + struct ipqess_tx_ring *tx_ring = container_of(napi, struct ipqess_tx_ring,
563 + napi_tx);
564 + int work_done = 0;
565 + u32 tx_status;
566 +
567 + tx_status = ipqess_r32(tx_ring->ess, IPQESS_REG_TX_ISR);
568 + tx_status &= BIT(tx_ring->idx);
569 +
570 + work_done = ipqess_tx_complete(tx_ring, budget);
571 +
572 + ipqess_w32(tx_ring->ess, IPQESS_REG_TX_ISR, tx_status);
573 +
574 + if (likely(work_done < budget)) {
575 + if (napi_complete_done(napi, work_done))
576 + ipqess_w32(tx_ring->ess,
577 + IPQESS_REG_TX_INT_MASK_Q(tx_ring->idx), 0x1);
578 + }
579 +
580 + return work_done;
581 +}
582 +
583 +static int ipqess_rx_napi(struct napi_struct *napi, int budget)
584 +{
585 + struct ipqess_rx_ring *rx_ring = container_of(napi, struct ipqess_rx_ring,
586 + napi_rx);
587 + struct ipqess *ess = rx_ring->ess;
588 + u32 rx_mask = BIT(rx_ring->idx);
589 + int remaining_budget = budget;
590 + int rx_done;
591 + u32 status;
592 +
593 + do {
594 + ipqess_w32(ess, IPQESS_REG_RX_ISR, rx_mask);
595 + rx_done = ipqess_rx_poll(rx_ring, remaining_budget);
596 + remaining_budget -= rx_done;
597 +
598 + status = ipqess_r32(ess, IPQESS_REG_RX_ISR);
599 + } while (remaining_budget > 0 && (status & rx_mask));
600 +
601 + if (remaining_budget <= 0)
602 + return budget;
603 +
604 + if (napi_complete_done(napi, budget - remaining_budget))
605 + ipqess_w32(ess, IPQESS_REG_RX_INT_MASK_Q(rx_ring->idx), 0x1);
606 +
607 + return budget - remaining_budget;
608 +}
609 +
610 +static irqreturn_t ipqess_interrupt_tx(int irq, void *priv)
611 +{
612 + struct ipqess_tx_ring *tx_ring = (struct ipqess_tx_ring *)priv;
613 +
614 + if (likely(napi_schedule_prep(&tx_ring->napi_tx))) {
615 + __napi_schedule(&tx_ring->napi_tx);
616 + ipqess_w32(tx_ring->ess, IPQESS_REG_TX_INT_MASK_Q(tx_ring->idx),
617 + 0x0);
618 + }
619 +
620 + return IRQ_HANDLED;
621 +}
622 +
623 +static irqreturn_t ipqess_interrupt_rx(int irq, void *priv)
624 +{
625 + struct ipqess_rx_ring *rx_ring = (struct ipqess_rx_ring *)priv;
626 +
627 + if (likely(napi_schedule_prep(&rx_ring->napi_rx))) {
628 + __napi_schedule(&rx_ring->napi_rx);
629 + ipqess_w32(rx_ring->ess, IPQESS_REG_RX_INT_MASK_Q(rx_ring->idx),
630 + 0x0);
631 + }
632 +
633 + return IRQ_HANDLED;
634 +}
635 +
636 +static void ipqess_irq_enable(struct ipqess *ess)
637 +{
638 + int i;
639 +
640 + ipqess_w32(ess, IPQESS_REG_RX_ISR, 0xff);
641 + ipqess_w32(ess, IPQESS_REG_TX_ISR, 0xffff);
642 + for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
643 + ipqess_w32(ess, IPQESS_REG_RX_INT_MASK_Q(ess->rx_ring[i].idx), 1);
644 + ipqess_w32(ess, IPQESS_REG_TX_INT_MASK_Q(ess->tx_ring[i].idx), 1);
645 + }
646 +}
647 +
648 +static void ipqess_irq_disable(struct ipqess *ess)
649 +{
650 + int i;
651 +
652 + for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
653 + ipqess_w32(ess, IPQESS_REG_RX_INT_MASK_Q(ess->rx_ring[i].idx), 0);
654 + ipqess_w32(ess, IPQESS_REG_TX_INT_MASK_Q(ess->tx_ring[i].idx), 0);
655 + }
656 +}
657 +
658 +static int __init ipqess_init(struct net_device *netdev)
659 +{
660 + struct ipqess *ess = netdev_priv(netdev);
661 + struct device_node *of_node = ess->pdev->dev.of_node;
662 + int ret;
663 +
664 + ret = of_get_ethdev_address(of_node, netdev);
665 + if (ret)
666 + eth_hw_addr_random(netdev);
667 +
668 + return phylink_of_phy_connect(ess->phylink, of_node, 0);
669 +}
670 +
671 +static void ipqess_uninit(struct net_device *netdev)
672 +{
673 + struct ipqess *ess = netdev_priv(netdev);
674 +
675 + phylink_disconnect_phy(ess->phylink);
676 +}
677 +
678 +static int ipqess_open(struct net_device *netdev)
679 +{
680 + struct ipqess *ess = netdev_priv(netdev);
681 + int i, err;
682 +
683 + for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
684 + int qid;
685 +
686 + qid = ess->tx_ring[i].idx;
687 + err = devm_request_irq(&netdev->dev, ess->tx_irq[qid],
688 + ipqess_interrupt_tx, 0,
689 + ess->tx_irq_names[qid],
690 + &ess->tx_ring[i]);
691 + if (err)
692 + return err;
693 +
694 + qid = ess->rx_ring[i].idx;
695 + err = devm_request_irq(&netdev->dev, ess->rx_irq[qid],
696 + ipqess_interrupt_rx, 0,
697 + ess->rx_irq_names[qid],
698 + &ess->rx_ring[i]);
699 + if (err)
700 + return err;
701 +
702 + napi_enable(&ess->tx_ring[i].napi_tx);
703 + napi_enable(&ess->rx_ring[i].napi_rx);
704 + }
705 +
706 + ipqess_irq_enable(ess);
707 + phylink_start(ess->phylink);
708 + netif_tx_start_all_queues(netdev);
709 +
710 + return 0;
711 +}
712 +
713 +static int ipqess_stop(struct net_device *netdev)
714 +{
715 + struct ipqess *ess = netdev_priv(netdev);
716 + int i;
717 +
718 + netif_tx_stop_all_queues(netdev);
719 + phylink_stop(ess->phylink);
720 + ipqess_irq_disable(ess);
721 + for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
722 + napi_disable(&ess->tx_ring[i].napi_tx);
723 + napi_disable(&ess->rx_ring[i].napi_rx);
724 + }
725 +
726 + return 0;
727 +}
728 +
729 +static int ipqess_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
730 +{
731 + struct ipqess *ess = netdev_priv(netdev);
732 +
733 + return phylink_mii_ioctl(ess->phylink, ifr, cmd);
734 +}
735 +
736 +static u16 ipqess_tx_desc_available(struct ipqess_tx_ring *tx_ring)
737 +{
738 + u16 count = 0;
739 +
740 + if (tx_ring->tail <= tx_ring->head)
741 + count = IPQESS_TX_RING_SIZE;
742 +
743 + count += tx_ring->tail - tx_ring->head - 1;
744 +
745 + return count;
746 +}
747 +
748 +static int ipqess_cal_txd_req(struct sk_buff *skb)
749 +{
750 + int tpds;
751 +
752 + /* one TPD for the header, and one for each fragments */
753 + tpds = 1 + skb_shinfo(skb)->nr_frags;
754 + if (skb_is_gso(skb) && skb_is_gso_v6(skb)) {
755 + /* for LSOv2 one extra TPD is needed */
756 + tpds++;
757 + }
758 +
759 + return tpds;
760 +}
761 +
762 +static struct ipqess_buf *ipqess_get_tx_buffer(struct ipqess_tx_ring *tx_ring,
763 + struct ipqess_tx_desc *desc)
764 +{
765 + return &tx_ring->buf[desc - tx_ring->hw_desc];
766 +}
767 +
768 +static struct ipqess_tx_desc *ipqess_tx_desc_next(struct ipqess_tx_ring *tx_ring)
769 +{
770 + struct ipqess_tx_desc *desc;
771 +
772 + desc = &tx_ring->hw_desc[tx_ring->head];
773 + tx_ring->head = IPQESS_NEXT_IDX(tx_ring->head, tx_ring->count);
774 +
775 + return desc;
776 +}
777 +
778 +static void ipqess_rollback_tx(struct ipqess *eth,
779 + struct ipqess_tx_desc *first_desc, int ring_id)
780 +{
781 + struct ipqess_tx_ring *tx_ring = &eth->tx_ring[ring_id];
782 + struct ipqess_tx_desc *desc = NULL;
783 + struct ipqess_buf *buf;
784 + u16 start_index, index;
785 +
786 + start_index = first_desc - tx_ring->hw_desc;
787 +
788 + index = start_index;
789 + while (index != tx_ring->head) {
790 + desc = &tx_ring->hw_desc[index];
791 + buf = &tx_ring->buf[index];
792 + ipqess_tx_unmap_and_free(&eth->pdev->dev, buf);
793 + memset(desc, 0, sizeof(*desc));
794 + if (++index == tx_ring->count)
795 + index = 0;
796 + }
797 + tx_ring->head = start_index;
798 +}
799 +
800 +static int ipqess_tx_map_and_fill(struct ipqess_tx_ring *tx_ring,
801 + struct sk_buff *skb)
802 +{
803 + struct ipqess_tx_desc *desc = NULL, *first_desc = NULL;
804 + u32 word1 = 0, word3 = 0, lso_word1 = 0, svlan_tag = 0;
805 + struct platform_device *pdev = tx_ring->ess->pdev;
806 + struct ipqess_buf *buf = NULL;
807 + u16 len;
808 + int i;
809 +
810 + if (skb_is_gso(skb)) {
811 + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
812 + lso_word1 |= IPQESS_TPD_IPV4_EN;
813 + ip_hdr(skb)->check = 0;
814 + tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
815 + ip_hdr(skb)->daddr,
816 + 0, IPPROTO_TCP, 0);
817 + } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
818 + lso_word1 |= IPQESS_TPD_LSO_V2_EN;
819 + ipv6_hdr(skb)->payload_len = 0;
820 + tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
821 + &ipv6_hdr(skb)->daddr,
822 + 0, IPPROTO_TCP, 0);
823 + }
824 +
825 + lso_word1 |= IPQESS_TPD_LSO_EN |
826 + ((skb_shinfo(skb)->gso_size & IPQESS_TPD_MSS_MASK) <<
827 + IPQESS_TPD_MSS_SHIFT) |
828 + (skb_transport_offset(skb) << IPQESS_TPD_HDR_SHIFT);
829 + } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
830 + u8 css, cso;
831 +
832 + cso = skb_checksum_start_offset(skb);
833 + css = cso + skb->csum_offset;
834 +
835 + word1 |= (IPQESS_TPD_CUSTOM_CSUM_EN);
836 + word1 |= (cso >> 1) << IPQESS_TPD_HDR_SHIFT;
837 + word1 |= ((css >> 1) << IPQESS_TPD_CUSTOM_CSUM_SHIFT);
838 + }
839 +
840 + if (skb_vlan_tag_present(skb)) {
841 + switch (skb->vlan_proto) {
842 + case htons(ETH_P_8021Q):
843 + word3 |= BIT(IPQESS_TX_INS_CVLAN);
844 + word3 |= skb_vlan_tag_get(skb) << IPQESS_TX_CVLAN_TAG_SHIFT;
845 + break;
846 + case htons(ETH_P_8021AD):
847 + word1 |= BIT(IPQESS_TX_INS_SVLAN);
848 + svlan_tag = skb_vlan_tag_get(skb);
849 + break;
850 + default:
851 + dev_err(&pdev->dev, "no ctag or stag present\n");
852 + goto vlan_tag_error;
853 + }
854 + }
855 +
856 + if (eth_type_vlan(skb->protocol))
857 + word1 |= IPQESS_TPD_VLAN_TAGGED;
858 +
859 + if (skb->protocol == htons(ETH_P_PPP_SES))
860 + word1 |= IPQESS_TPD_PPPOE_EN;
861 +
862 + len = skb_headlen(skb);
863 +
864 + first_desc = ipqess_tx_desc_next(tx_ring);
865 + desc = first_desc;
866 + if (lso_word1 & IPQESS_TPD_LSO_V2_EN) {
867 + desc->addr = cpu_to_le32(skb->len);
868 + desc->word1 = cpu_to_le32(word1 | lso_word1);
869 + desc->svlan_tag = cpu_to_le16(svlan_tag);
870 + desc->word3 = cpu_to_le32(word3);
871 + desc = ipqess_tx_desc_next(tx_ring);
872 + }
873 +
874 + buf = ipqess_get_tx_buffer(tx_ring, desc);
875 + buf->length = len;
876 + buf->dma = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
877 +
878 + if (dma_mapping_error(&pdev->dev, buf->dma))
879 + goto dma_error;
880 +
881 + desc->addr = cpu_to_le32(buf->dma);
882 + desc->len = cpu_to_le16(len);
883 +
884 + buf->flags |= IPQESS_DESC_SINGLE;
885 + desc->word1 = cpu_to_le32(word1 | lso_word1);
886 + desc->svlan_tag = cpu_to_le16(svlan_tag);
887 + desc->word3 = cpu_to_le32(word3);
888 +
889 + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
890 + skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
891 +
892 + len = skb_frag_size(frag);
893 + desc = ipqess_tx_desc_next(tx_ring);
894 + buf = ipqess_get_tx_buffer(tx_ring, desc);
895 + buf->length = len;
896 + buf->flags |= IPQESS_DESC_PAGE;
897 + buf->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len,
898 + DMA_TO_DEVICE);
899 +
900 + if (dma_mapping_error(&pdev->dev, buf->dma))
901 + goto dma_error;
902 +
903 + desc->addr = cpu_to_le32(buf->dma);
904 + desc->len = cpu_to_le16(len);
905 + desc->svlan_tag = cpu_to_le16(svlan_tag);
906 + desc->word1 = cpu_to_le32(word1 | lso_word1);
907 + desc->word3 = cpu_to_le32(word3);
908 + }
909 + desc->word1 |= cpu_to_le32(1 << IPQESS_TPD_EOP_SHIFT);
910 + buf->skb = skb;
911 + buf->flags |= IPQESS_DESC_LAST;
912 +
913 + return 0;
914 +
915 +dma_error:
916 + ipqess_rollback_tx(tx_ring->ess, first_desc, tx_ring->ring_id);
917 + dev_err(&pdev->dev, "TX DMA map failed\n");
918 +
919 +vlan_tag_error:
920 + return -ENOMEM;
921 +}
922 +
923 +static void ipqess_kick_tx(struct ipqess_tx_ring *tx_ring)
924 +{
925 + /* Ensure that all TPDs has been written completely */
926 + dma_wmb();
927 +
928 + /* update software producer index */
929 + ipqess_w32(tx_ring->ess, IPQESS_REG_TPD_IDX_Q(tx_ring->idx),
930 + tx_ring->head);
931 +}
932 +
933 +static netdev_tx_t ipqess_xmit(struct sk_buff *skb, struct net_device *netdev)
934 +{
935 + struct ipqess *ess = netdev_priv(netdev);
936 + struct ipqess_tx_ring *tx_ring;
937 + int avail;
938 + int tx_num;
939 + int ret;
940 +
941 + tx_ring = &ess->tx_ring[skb_get_queue_mapping(skb)];
942 + tx_num = ipqess_cal_txd_req(skb);
943 + avail = ipqess_tx_desc_available(tx_ring);
944 + if (avail < tx_num) {
945 + netdev_dbg(netdev,
946 + "stopping tx queue %d, avail=%d req=%d im=%x\n",
947 + tx_ring->idx, avail, tx_num,
948 + ipqess_r32(tx_ring->ess,
949 + IPQESS_REG_TX_INT_MASK_Q(tx_ring->idx)));
950 + netif_tx_stop_queue(tx_ring->nq);
951 + ipqess_w32(tx_ring->ess, IPQESS_REG_TX_INT_MASK_Q(tx_ring->idx), 0x1);
952 + ipqess_kick_tx(tx_ring);
953 + return NETDEV_TX_BUSY;
954 + }
955 +
956 + ret = ipqess_tx_map_and_fill(tx_ring, skb);
957 + if (ret) {
958 + dev_kfree_skb_any(skb);
959 + ess->stats.tx_errors++;
960 + goto err_out;
961 + }
962 +
963 + ess->stats.tx_packets++;
964 + ess->stats.tx_bytes += skb->len;
965 + netdev_tx_sent_queue(tx_ring->nq, skb->len);
966 +
967 + if (!netdev_xmit_more() || netif_xmit_stopped(tx_ring->nq))
968 + ipqess_kick_tx(tx_ring);
969 +
970 +err_out:
971 + return NETDEV_TX_OK;
972 +}
973 +
974 +static int ipqess_set_mac_address(struct net_device *netdev, void *p)
975 +{
976 + struct ipqess *ess = netdev_priv(netdev);
977 + const char *macaddr = netdev->dev_addr;
978 + int ret = eth_mac_addr(netdev, p);
979 +
980 + if (ret)
981 + return ret;
982 +
983 + ipqess_w32(ess, IPQESS_REG_MAC_CTRL1, (macaddr[0] << 8) | macaddr[1]);
984 + ipqess_w32(ess, IPQESS_REG_MAC_CTRL0,
985 + (macaddr[2] << 24) | (macaddr[3] << 16) | (macaddr[4] << 8) |
986 + macaddr[5]);
987 +
988 + return 0;
989 +}
990 +
991 +static void ipqess_tx_timeout(struct net_device *netdev, unsigned int txq_id)
992 +{
993 + struct ipqess *ess = netdev_priv(netdev);
994 + struct ipqess_tx_ring *tr = &ess->tx_ring[txq_id];
995 +
996 + netdev_warn(netdev, "TX timeout on queue %d\n", tr->idx);
997 +}
998 +
999 +static const struct net_device_ops ipqess_axi_netdev_ops = {
1000 + .ndo_init = ipqess_init,
1001 + .ndo_uninit = ipqess_uninit,
1002 + .ndo_open = ipqess_open,
1003 + .ndo_stop = ipqess_stop,
1004 + .ndo_do_ioctl = ipqess_do_ioctl,
1005 + .ndo_start_xmit = ipqess_xmit,
1006 + .ndo_get_stats = ipqess_get_stats,
1007 + .ndo_set_mac_address = ipqess_set_mac_address,
1008 + .ndo_tx_timeout = ipqess_tx_timeout,
1009 +};
1010 +
1011 +static void ipqess_hw_stop(struct ipqess *ess)
1012 +{
1013 + int i;
1014 +
1015 + /* disable all RX queue IRQs */
1016 + for (i = 0; i < IPQESS_MAX_RX_QUEUE; i++)
1017 + ipqess_w32(ess, IPQESS_REG_RX_INT_MASK_Q(i), 0);
1018 +
1019 + /* disable all TX queue IRQs */
1020 + for (i = 0; i < IPQESS_MAX_TX_QUEUE; i++)
1021 + ipqess_w32(ess, IPQESS_REG_TX_INT_MASK_Q(i), 0);
1022 +
1023 + /* disable all other IRQs */
1024 + ipqess_w32(ess, IPQESS_REG_MISC_IMR, 0);
1025 + ipqess_w32(ess, IPQESS_REG_WOL_IMR, 0);
1026 +
1027 + /* clear the IRQ status registers */
1028 + ipqess_w32(ess, IPQESS_REG_RX_ISR, 0xff);
1029 + ipqess_w32(ess, IPQESS_REG_TX_ISR, 0xffff);
1030 + ipqess_w32(ess, IPQESS_REG_MISC_ISR, 0x1fff);
1031 + ipqess_w32(ess, IPQESS_REG_WOL_ISR, 0x1);
1032 + ipqess_w32(ess, IPQESS_REG_WOL_CTRL, 0);
1033 +
1034 + /* disable RX and TX queues */
1035 + ipqess_m32(ess, IPQESS_RXQ_CTRL_EN_MASK, 0, IPQESS_REG_RXQ_CTRL);
1036 + ipqess_m32(ess, IPQESS_TXQ_CTRL_TXQ_EN, 0, IPQESS_REG_TXQ_CTRL);
1037 +}
1038 +
1039 +static int ipqess_hw_init(struct ipqess *ess)
1040 +{
1041 + int i, err;
1042 + u32 tmp;
1043 +
1044 + ipqess_hw_stop(ess);
1045 +
1046 + ipqess_m32(ess, BIT(IPQESS_INTR_SW_IDX_W_TYP_SHIFT),
1047 + IPQESS_INTR_SW_IDX_W_TYPE << IPQESS_INTR_SW_IDX_W_TYP_SHIFT,
1048 + IPQESS_REG_INTR_CTRL);
1049 +
1050 + /* enable IRQ delay slot */
1051 + ipqess_w32(ess, IPQESS_REG_IRQ_MODRT_TIMER_INIT,
1052 + (IPQESS_TX_IMT << IPQESS_IRQ_MODRT_TX_TIMER_SHIFT) |
1053 + (IPQESS_RX_IMT << IPQESS_IRQ_MODRT_RX_TIMER_SHIFT));
1054 +
1055 + /* Set Customer and Service VLAN TPIDs */
1056 + ipqess_w32(ess, IPQESS_REG_VLAN_CFG,
1057 + (ETH_P_8021Q << IPQESS_VLAN_CFG_CVLAN_TPID_SHIFT) |
1058 + (ETH_P_8021AD << IPQESS_VLAN_CFG_SVLAN_TPID_SHIFT));
1059 +
1060 + /* Configure the TX Queue bursting */
1061 + ipqess_w32(ess, IPQESS_REG_TXQ_CTRL,
1062 + (IPQESS_TPD_BURST << IPQESS_TXQ_NUM_TPD_BURST_SHIFT) |
1063 + (IPQESS_TXF_BURST << IPQESS_TXQ_TXF_BURST_NUM_SHIFT) |
1064 + IPQESS_TXQ_CTRL_TPD_BURST_EN);
1065 +
1066 + /* Set RSS type */
1067 + ipqess_w32(ess, IPQESS_REG_RSS_TYPE,
1068 + IPQESS_RSS_TYPE_IPV4TCP | IPQESS_RSS_TYPE_IPV6_TCP |
1069 + IPQESS_RSS_TYPE_IPV4_UDP | IPQESS_RSS_TYPE_IPV6UDP |
1070 + IPQESS_RSS_TYPE_IPV4 | IPQESS_RSS_TYPE_IPV6);
1071 +
1072 + /* Set RFD ring burst and threshold */
1073 + ipqess_w32(ess, IPQESS_REG_RX_DESC1,
1074 + (IPQESS_RFD_BURST << IPQESS_RXQ_RFD_BURST_NUM_SHIFT) |
1075 + (IPQESS_RFD_THR << IPQESS_RXQ_RFD_PF_THRESH_SHIFT) |
1076 + (IPQESS_RFD_LTHR << IPQESS_RXQ_RFD_LOW_THRESH_SHIFT));
1077 +
1078 + /* Set Rx FIFO
1079 + * - threshold to start to DMA data to host
1080 + */
1081 + ipqess_w32(ess, IPQESS_REG_RXQ_CTRL,
1082 + IPQESS_FIFO_THRESH_128_BYTE | IPQESS_RXQ_CTRL_RMV_VLAN);
1083 +
1084 + err = ipqess_rx_ring_alloc(ess);
1085 + if (err)
1086 + return err;
1087 +
1088 + err = ipqess_tx_ring_alloc(ess);
1089 + if (err)
1090 + goto err_rx_ring_free;
1091 +
1092 + /* Load all of ring base addresses above into the dma engine */
1093 + ipqess_m32(ess, 0, BIT(IPQESS_LOAD_PTR_SHIFT), IPQESS_REG_TX_SRAM_PART);
1094 +
1095 + /* Disable TX FIFO low watermark and high watermark */
1096 + ipqess_w32(ess, IPQESS_REG_TXF_WATER_MARK, 0);
1097 +
1098 + /* Configure RSS indirection table.
1099 + * 128 hash will be configured in the following
1100 + * pattern: hash{0,1,2,3} = {Q0,Q2,Q4,Q6} respectively
1101 + * and so on
1102 + */
1103 + for (i = 0; i < IPQESS_NUM_IDT; i++)
1104 + ipqess_w32(ess, IPQESS_REG_RSS_IDT(i), IPQESS_RSS_IDT_VALUE);
1105 +
1106 + /* Configure load balance mapping table.
1107 + * 4 table entry will be configured according to the
1108 + * following pattern: load_balance{0,1,2,3} = {Q0,Q1,Q3,Q4}
1109 + * respectively.
1110 + */
1111 + ipqess_w32(ess, IPQESS_REG_LB_RING, IPQESS_LB_REG_VALUE);
1112 +
1113 + /* Configure Virtual queue for Tx rings */
1114 + ipqess_w32(ess, IPQESS_REG_VQ_CTRL0, IPQESS_VQ_REG_VALUE);
1115 + ipqess_w32(ess, IPQESS_REG_VQ_CTRL1, IPQESS_VQ_REG_VALUE);
1116 +
1117 + /* Configure Max AXI Burst write size to 128 bytes*/
1118 + ipqess_w32(ess, IPQESS_REG_AXIW_CTRL_MAXWRSIZE,
1119 + IPQESS_AXIW_MAXWRSIZE_VALUE);
1120 +
1121 + /* Enable TX queues */
1122 + ipqess_m32(ess, 0, IPQESS_TXQ_CTRL_TXQ_EN, IPQESS_REG_TXQ_CTRL);
1123 +
1124 + /* Enable RX queues */
1125 + tmp = 0;
1126 + for (i = 0; i < IPQESS_NETDEV_QUEUES; i++)
1127 + tmp |= IPQESS_RXQ_CTRL_EN(ess->rx_ring[i].idx);
1128 +
1129 + ipqess_m32(ess, IPQESS_RXQ_CTRL_EN_MASK, tmp, IPQESS_REG_RXQ_CTRL);
1130 +
1131 + return 0;
1132 +
1133 +err_rx_ring_free:
1134 +
1135 + ipqess_rx_ring_free(ess);
1136 + return err;
1137 +}
1138 +
1139 +static void ipqess_mac_config(struct phylink_config *config, unsigned int mode,
1140 + const struct phylink_link_state *state)
1141 +{
1142 + /* Nothing to do, use fixed Internal mode */
1143 +}
1144 +
1145 +static void ipqess_mac_link_down(struct phylink_config *config,
1146 + unsigned int mode,
1147 + phy_interface_t interface)
1148 +{
1149 + /* Nothing to do, use fixed Internal mode */
1150 +}
1151 +
1152 +static void ipqess_mac_link_up(struct phylink_config *config,
1153 + struct phy_device *phy, unsigned int mode,
1154 + phy_interface_t interface,
1155 + int speed, int duplex,
1156 + bool tx_pause, bool rx_pause)
1157 +{
1158 + /* Nothing to do, use fixed Internal mode */
1159 +}
1160 +
1161 +static struct phylink_mac_ops ipqess_phylink_mac_ops = {
1162 + .validate = phylink_generic_validate,
1163 + .mac_config = ipqess_mac_config,
1164 + .mac_link_up = ipqess_mac_link_up,
1165 + .mac_link_down = ipqess_mac_link_down,
1166 +};
1167 +
1168 +static void ipqess_reset(struct ipqess *ess)
1169 +{
1170 + reset_control_assert(ess->ess_rst);
1171 +
1172 + mdelay(10);
1173 +
1174 + reset_control_deassert(ess->ess_rst);
1175 +
1176 + /* Waiting for all inner tables to be flushed and reinitialized.
1177 + * This takes between 5 and 10 ms
1178 + */
1179 +
1180 + mdelay(10);
1181 +}
1182 +
1183 +static int ipqess_axi_probe(struct platform_device *pdev)
1184 +{
1185 + struct device_node *np = pdev->dev.of_node;
1186 + struct net_device *netdev;
1187 + phy_interface_t phy_mode;
1188 + struct ipqess *ess;
1189 + int i, err = 0;
1190 +
1191 + netdev = devm_alloc_etherdev_mqs(&pdev->dev, sizeof(*ess),
1192 + IPQESS_NETDEV_QUEUES,
1193 + IPQESS_NETDEV_QUEUES);
1194 + if (!netdev)
1195 + return -ENOMEM;
1196 +
1197 + ess = netdev_priv(netdev);
1198 + ess->netdev = netdev;
1199 + ess->pdev = pdev;
1200 + spin_lock_init(&ess->stats_lock);
1201 + SET_NETDEV_DEV(netdev, &pdev->dev);
1202 + platform_set_drvdata(pdev, netdev);
1203 +
1204 + ess->hw_addr = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
1205 + if (IS_ERR(ess->hw_addr))
1206 + return PTR_ERR(ess->hw_addr);
1207 +
1208 + err = of_get_phy_mode(np, &phy_mode);
1209 + if (err) {
1210 + dev_err(&pdev->dev, "incorrect phy-mode\n");
1211 + return err;
1212 + }
1213 +
1214 + ess->ess_clk = devm_clk_get(&pdev->dev, NULL);
1215 + if (!IS_ERR(ess->ess_clk))
1216 + clk_prepare_enable(ess->ess_clk);
1217 +
1218 + ess->ess_rst = devm_reset_control_get(&pdev->dev, NULL);
1219 + if (IS_ERR(ess->ess_rst))
1220 + goto err_clk;
1221 +
1222 + ipqess_reset(ess);
1223 +
1224 + ess->phylink_config.dev = &netdev->dev;
1225 + ess->phylink_config.type = PHYLINK_NETDEV;
1226 + ess->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 |
1227 + MAC_100 | MAC_1000FD;
1228 +
1229 + __set_bit(PHY_INTERFACE_MODE_INTERNAL,
1230 + ess->phylink_config.supported_interfaces);
1231 +
1232 + ess->phylink = phylink_create(&ess->phylink_config,
1233 + of_fwnode_handle(np), phy_mode,
1234 + &ipqess_phylink_mac_ops);
1235 + if (IS_ERR(ess->phylink)) {
1236 + err = PTR_ERR(ess->phylink);
1237 + goto err_clk;
1238 + }
1239 +
1240 + for (i = 0; i < IPQESS_MAX_TX_QUEUE; i++) {
1241 + ess->tx_irq[i] = platform_get_irq(pdev, i);
1242 + scnprintf(ess->tx_irq_names[i], sizeof(ess->tx_irq_names[i]),
1243 + "%s:txq%d", pdev->name, i);
1244 + }
1245 +
1246 + for (i = 0; i < IPQESS_MAX_RX_QUEUE; i++) {
1247 + ess->rx_irq[i] = platform_get_irq(pdev, i + IPQESS_MAX_TX_QUEUE);
1248 + scnprintf(ess->rx_irq_names[i], sizeof(ess->rx_irq_names[i]),
1249 + "%s:rxq%d", pdev->name, i);
1250 + }
1251 +
1252 + netdev->netdev_ops = &ipqess_axi_netdev_ops;
1253 + netdev->features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
1254 + NETIF_F_HW_VLAN_CTAG_RX |
1255 + NETIF_F_HW_VLAN_CTAG_TX |
1256 + NETIF_F_TSO | NETIF_F_GRO | NETIF_F_SG;
1257 + /* feature change is not supported yet */
1258 + netdev->hw_features = 0;
1259 + netdev->vlan_features = NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_RXCSUM |
1260 + NETIF_F_TSO |
1261 + NETIF_F_GRO;
1262 + netdev->watchdog_timeo = 5 * HZ;
1263 + netdev->base_addr = (u32)ess->hw_addr;
1264 + netdev->max_mtu = 9000;
1265 + netdev->gso_max_segs = IPQESS_TX_RING_SIZE / 2;
1266 +
1267 + ipqess_set_ethtool_ops(netdev);
1268 +
1269 + err = ipqess_hw_init(ess);
1270 + if (err)
1271 + goto err_phylink;
1272 +
1273 + for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
1274 + netif_napi_add_tx(netdev, &ess->tx_ring[i].napi_tx, ipqess_tx_napi);
1275 + netif_napi_add(netdev, &ess->rx_ring[i].napi_rx, ipqess_rx_napi);
1276 + }
1277 +
1278 + err = register_netdev(netdev);
1279 + if (err)
1280 + goto err_hw_stop;
1281 +
1282 + return 0;
1283 +
1284 +err_hw_stop:
1285 + ipqess_hw_stop(ess);
1286 +
1287 + ipqess_tx_ring_free(ess);
1288 + ipqess_rx_ring_free(ess);
1289 +err_phylink:
1290 + phylink_destroy(ess->phylink);
1291 +
1292 +err_clk:
1293 + clk_disable_unprepare(ess->ess_clk);
1294 +
1295 + return err;
1296 +}
1297 +
1298 +static int ipqess_axi_remove(struct platform_device *pdev)
1299 +{
1300 + const struct net_device *netdev = platform_get_drvdata(pdev);
1301 + struct ipqess *ess = netdev_priv(netdev);
1302 +
1303 + unregister_netdev(ess->netdev);
1304 + ipqess_hw_stop(ess);
1305 +
1306 + ipqess_tx_ring_free(ess);
1307 + ipqess_rx_ring_free(ess);
1308 +
1309 + phylink_destroy(ess->phylink);
1310 + clk_disable_unprepare(ess->ess_clk);
1311 +
1312 + return 0;
1313 +}
1314 +
1315 +static const struct of_device_id ipqess_of_mtable[] = {
1316 + {.compatible = "qcom,ipq4019-ess-edma" },
1317 + {}
1318 +};
1319 +MODULE_DEVICE_TABLE(of, ipqess_of_mtable);
1320 +
1321 +static struct platform_driver ipqess_axi_driver = {
1322 + .driver = {
1323 + .name = "ipqess-edma",
1324 + .of_match_table = ipqess_of_mtable,
1325 + },
1326 + .probe = ipqess_axi_probe,
1327 + .remove = ipqess_axi_remove,
1328 +};
1329 +
1330 +module_platform_driver(ipqess_axi_driver);
1331 +
1332 +MODULE_AUTHOR("Qualcomm Atheros Inc");
1333 +MODULE_AUTHOR("John Crispin <john@phrozen.org>");
1334 +MODULE_AUTHOR("Christian Lamparter <chunkeey@gmail.com>");
1335 +MODULE_AUTHOR("Gabor Juhos <j4g8y7@gmail.com>");
1336 +MODULE_AUTHOR("Maxime Chevallier <maxime.chevallier@bootlin.com>");
1337 +MODULE_LICENSE("GPL");
1338 --- /dev/null
1339 +++ b/drivers/net/ethernet/qualcomm/ipqess/ipqess.h
1340 @@ -0,0 +1,518 @@
1341 +/* SPDX-License-Identifier: (GPL-2.0 OR ISC) */
1342 +/* Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved.
1343 + * Copyright (c) 2017 - 2018, John Crispin <john@phrozen.org>
1344 + * Copyright (c) 2018 - 2019, Christian Lamparter <chunkeey@gmail.com>
1345 + * Copyright (c) 2020 - 2021, Gabor Juhos <j4g8y7@gmail.com>
1346 + * Copyright (c) 2021 - 2022, Maxime Chevallier <maxime.chevallier@bootlin.com>
1347 + *
1348 + */
1349 +
1350 +#ifndef _IPQESS_H_
1351 +#define _IPQESS_H_
1352 +
1353 +#define IPQESS_NETDEV_QUEUES 4
1354 +
1355 +#define IPQESS_TPD_EOP_SHIFT 31
1356 +
1357 +#define IPQESS_PORT_ID_SHIFT 12
1358 +#define IPQESS_PORT_ID_MASK 0x7
1359 +
1360 +/* tpd word 3 bit 18-28 */
1361 +#define IPQESS_TPD_PORT_BITMAP_SHIFT 18
1362 +
1363 +#define IPQESS_TPD_FROM_CPU_SHIFT 25
1364 +
1365 +#define IPQESS_RX_RING_SIZE 128
1366 +#define IPQESS_RX_HEAD_BUFF_SIZE 1540
1367 +#define IPQESS_TX_RING_SIZE 128
1368 +#define IPQESS_MAX_RX_QUEUE 8
1369 +#define IPQESS_MAX_TX_QUEUE 16
1370 +
1371 +/* Configurations */
1372 +#define IPQESS_INTR_CLEAR_TYPE 0
1373 +#define IPQESS_INTR_SW_IDX_W_TYPE 0
1374 +#define IPQESS_FIFO_THRESH_TYPE 0
1375 +#define IPQESS_RSS_TYPE 0
1376 +#define IPQESS_RX_IMT 0x0020
1377 +#define IPQESS_TX_IMT 0x0050
1378 +#define IPQESS_TPD_BURST 5
1379 +#define IPQESS_TXF_BURST 0x100
1380 +#define IPQESS_RFD_BURST 8
1381 +#define IPQESS_RFD_THR 16
1382 +#define IPQESS_RFD_LTHR 0
1383 +
1384 +/* Flags used in transmit direction */
1385 +#define IPQESS_DESC_LAST 0x1
1386 +#define IPQESS_DESC_SINGLE 0x2
1387 +#define IPQESS_DESC_PAGE 0x4
1388 +
1389 +struct ipqess_statistics {
1390 + u32 tx_q0_pkt;
1391 + u32 tx_q1_pkt;
1392 + u32 tx_q2_pkt;
1393 + u32 tx_q3_pkt;
1394 + u32 tx_q4_pkt;
1395 + u32 tx_q5_pkt;
1396 + u32 tx_q6_pkt;
1397 + u32 tx_q7_pkt;
1398 + u32 tx_q8_pkt;
1399 + u32 tx_q9_pkt;
1400 + u32 tx_q10_pkt;
1401 + u32 tx_q11_pkt;
1402 + u32 tx_q12_pkt;
1403 + u32 tx_q13_pkt;
1404 + u32 tx_q14_pkt;
1405 + u32 tx_q15_pkt;
1406 + u32 tx_q0_byte;
1407 + u32 tx_q1_byte;
1408 + u32 tx_q2_byte;
1409 + u32 tx_q3_byte;
1410 + u32 tx_q4_byte;
1411 + u32 tx_q5_byte;
1412 + u32 tx_q6_byte;
1413 + u32 tx_q7_byte;
1414 + u32 tx_q8_byte;
1415 + u32 tx_q9_byte;
1416 + u32 tx_q10_byte;
1417 + u32 tx_q11_byte;
1418 + u32 tx_q12_byte;
1419 + u32 tx_q13_byte;
1420 + u32 tx_q14_byte;
1421 + u32 tx_q15_byte;
1422 + u32 rx_q0_pkt;
1423 + u32 rx_q1_pkt;
1424 + u32 rx_q2_pkt;
1425 + u32 rx_q3_pkt;
1426 + u32 rx_q4_pkt;
1427 + u32 rx_q5_pkt;
1428 + u32 rx_q6_pkt;
1429 + u32 rx_q7_pkt;
1430 + u32 rx_q0_byte;
1431 + u32 rx_q1_byte;
1432 + u32 rx_q2_byte;
1433 + u32 rx_q3_byte;
1434 + u32 rx_q4_byte;
1435 + u32 rx_q5_byte;
1436 + u32 rx_q6_byte;
1437 + u32 rx_q7_byte;
1438 + u32 tx_desc_error;
1439 +};
1440 +
1441 +struct ipqess_tx_desc {
1442 + __le16 len;
1443 + __le16 svlan_tag;
1444 + __le32 word1;
1445 + __le32 addr;
1446 + __le32 word3;
1447 +} __aligned(16) __packed;
1448 +
1449 +struct ipqess_rx_desc {
1450 + __le16 rrd0;
1451 + __le16 rrd1;
1452 + __le16 rrd2;
1453 + __le16 rrd3;
1454 + __le16 rrd4;
1455 + __le16 rrd5;
1456 + __le16 rrd6;
1457 + __le16 rrd7;
1458 +} __aligned(16) __packed;
1459 +
1460 +struct ipqess_buf {
1461 + struct sk_buff *skb;
1462 + dma_addr_t dma;
1463 + u32 flags;
1464 + u16 length;
1465 +};
1466 +
1467 +struct ipqess_tx_ring {
1468 + struct napi_struct napi_tx;
1469 + u32 idx;
1470 + int ring_id;
1471 + struct ipqess *ess;
1472 + struct netdev_queue *nq;
1473 + struct ipqess_tx_desc *hw_desc;
1474 + struct ipqess_buf *buf;
1475 + dma_addr_t dma;
1476 + u16 count;
1477 + u16 head;
1478 + u16 tail;
1479 +};
1480 +
1481 +struct ipqess_rx_ring {
1482 + struct napi_struct napi_rx;
1483 + u32 idx;
1484 + int ring_id;
1485 + struct ipqess *ess;
1486 + struct device *ppdev;
1487 + struct ipqess_rx_desc **hw_desc;
1488 + struct ipqess_buf *buf;
1489 + dma_addr_t dma;
1490 + u16 head;
1491 + u16 tail;
1492 + atomic_t refill_count;
1493 +};
1494 +
1495 +struct ipqess_rx_ring_refill {
1496 + struct ipqess_rx_ring *rx_ring;
1497 + struct work_struct refill_work;
1498 +};
1499 +
1500 +#define IPQESS_IRQ_NAME_LEN 32
1501 +
1502 +struct ipqess {
1503 + struct net_device *netdev;
1504 + void __iomem *hw_addr;
1505 +
1506 + struct clk *ess_clk;
1507 + struct reset_control *ess_rst;
1508 +
1509 + struct ipqess_rx_ring rx_ring[IPQESS_NETDEV_QUEUES];
1510 +
1511 + struct platform_device *pdev;
1512 + struct phylink *phylink;
1513 + struct phylink_config phylink_config;
1514 + struct ipqess_tx_ring tx_ring[IPQESS_NETDEV_QUEUES];
1515 +
1516 + struct ipqess_statistics ipqess_stats;
1517 +
1518 + /* Protects stats */
1519 + spinlock_t stats_lock;
1520 + struct net_device_stats stats;
1521 +
1522 + struct ipqess_rx_ring_refill rx_refill[IPQESS_NETDEV_QUEUES];
1523 + u32 tx_irq[IPQESS_MAX_TX_QUEUE];
1524 + char tx_irq_names[IPQESS_MAX_TX_QUEUE][IPQESS_IRQ_NAME_LEN];
1525 + u32 rx_irq[IPQESS_MAX_RX_QUEUE];
1526 + char rx_irq_names[IPQESS_MAX_TX_QUEUE][IPQESS_IRQ_NAME_LEN];
1527 +};
1528 +
1529 +void ipqess_set_ethtool_ops(struct net_device *netdev);
1530 +void ipqess_update_hw_stats(struct ipqess *ess);
1531 +
1532 +/* register definition */
1533 +#define IPQESS_REG_MAS_CTRL 0x0
1534 +#define IPQESS_REG_TIMEOUT_CTRL 0x004
1535 +#define IPQESS_REG_DBG0 0x008
1536 +#define IPQESS_REG_DBG1 0x00C
1537 +#define IPQESS_REG_SW_CTRL0 0x100
1538 +#define IPQESS_REG_SW_CTRL1 0x104
1539 +
1540 +/* Interrupt Status Register */
1541 +#define IPQESS_REG_RX_ISR 0x200
1542 +#define IPQESS_REG_TX_ISR 0x208
1543 +#define IPQESS_REG_MISC_ISR 0x210
1544 +#define IPQESS_REG_WOL_ISR 0x218
1545 +
1546 +#define IPQESS_MISC_ISR_RX_URG_Q(x) (1 << (x))
1547 +
1548 +#define IPQESS_MISC_ISR_AXIR_TIMEOUT 0x00000100
1549 +#define IPQESS_MISC_ISR_AXIR_ERR 0x00000200
1550 +#define IPQESS_MISC_ISR_TXF_DEAD 0x00000400
1551 +#define IPQESS_MISC_ISR_AXIW_ERR 0x00000800
1552 +#define IPQESS_MISC_ISR_AXIW_TIMEOUT 0x00001000
1553 +
1554 +#define IPQESS_WOL_ISR 0x00000001
1555 +
1556 +/* Interrupt Mask Register */
1557 +#define IPQESS_REG_MISC_IMR 0x214
1558 +#define IPQESS_REG_WOL_IMR 0x218
1559 +
1560 +#define IPQESS_RX_IMR_NORMAL_MASK 0x1
1561 +#define IPQESS_TX_IMR_NORMAL_MASK 0x1
1562 +#define IPQESS_MISC_IMR_NORMAL_MASK 0x80001FFF
1563 +#define IPQESS_WOL_IMR_NORMAL_MASK 0x1
1564 +
1565 +/* Edma receive consumer index */
1566 +#define IPQESS_REG_RX_SW_CONS_IDX_Q(x) (0x220 + ((x) << 2)) /* x is the queue id */
1567 +
1568 +/* Edma transmit consumer index */
1569 +#define IPQESS_REG_TX_SW_CONS_IDX_Q(x) (0x240 + ((x) << 2)) /* x is the queue id */
1570 +
1571 +/* IRQ Moderator Initial Timer Register */
1572 +#define IPQESS_REG_IRQ_MODRT_TIMER_INIT 0x280
1573 +#define IPQESS_IRQ_MODRT_TIMER_MASK 0xFFFF
1574 +#define IPQESS_IRQ_MODRT_RX_TIMER_SHIFT 0
1575 +#define IPQESS_IRQ_MODRT_TX_TIMER_SHIFT 16
1576 +
1577 +/* Interrupt Control Register */
1578 +#define IPQESS_REG_INTR_CTRL 0x284
1579 +#define IPQESS_INTR_CLR_TYP_SHIFT 0
1580 +#define IPQESS_INTR_SW_IDX_W_TYP_SHIFT 1
1581 +#define IPQESS_INTR_CLEAR_TYPE_W1 0
1582 +#define IPQESS_INTR_CLEAR_TYPE_R 1
1583 +
1584 +/* RX Interrupt Mask Register */
1585 +#define IPQESS_REG_RX_INT_MASK_Q(x) (0x300 + ((x) << 2)) /* x = queue id */
1586 +
1587 +/* TX Interrupt mask register */
1588 +#define IPQESS_REG_TX_INT_MASK_Q(x) (0x340 + ((x) << 2)) /* x = queue id */
1589 +
1590 +/* Load Ptr Register
1591 + * Software sets this bit after the initialization of the head and tail
1592 + */
1593 +#define IPQESS_REG_TX_SRAM_PART 0x400
1594 +#define IPQESS_LOAD_PTR_SHIFT 16
1595 +
1596 +/* TXQ Control Register */
1597 +#define IPQESS_REG_TXQ_CTRL 0x404
1598 +#define IPQESS_TXQ_CTRL_IP_OPTION_EN 0x10
1599 +#define IPQESS_TXQ_CTRL_TXQ_EN 0x20
1600 +#define IPQESS_TXQ_CTRL_ENH_MODE 0x40
1601 +#define IPQESS_TXQ_CTRL_LS_8023_EN 0x80
1602 +#define IPQESS_TXQ_CTRL_TPD_BURST_EN 0x100
1603 +#define IPQESS_TXQ_CTRL_LSO_BREAK_EN 0x200
1604 +#define IPQESS_TXQ_NUM_TPD_BURST_MASK 0xF
1605 +#define IPQESS_TXQ_TXF_BURST_NUM_MASK 0xFFFF
1606 +#define IPQESS_TXQ_NUM_TPD_BURST_SHIFT 0
1607 +#define IPQESS_TXQ_TXF_BURST_NUM_SHIFT 16
1608 +
1609 +#define IPQESS_REG_TXF_WATER_MARK 0x408 /* In 8-bytes */
1610 +#define IPQESS_TXF_WATER_MARK_MASK 0x0FFF
1611 +#define IPQESS_TXF_LOW_WATER_MARK_SHIFT 0
1612 +#define IPQESS_TXF_HIGH_WATER_MARK_SHIFT 16
1613 +#define IPQESS_TXQ_CTRL_BURST_MODE_EN 0x80000000
1614 +
1615 +/* WRR Control Register */
1616 +#define IPQESS_REG_WRR_CTRL_Q0_Q3 0x40c
1617 +#define IPQESS_REG_WRR_CTRL_Q4_Q7 0x410
1618 +#define IPQESS_REG_WRR_CTRL_Q8_Q11 0x414
1619 +#define IPQESS_REG_WRR_CTRL_Q12_Q15 0x418
1620 +
1621 +/* Weight round robin(WRR), it takes queue as input, and computes
1622 + * starting bits where we need to write the weight for a particular
1623 + * queue
1624 + */
1625 +#define IPQESS_WRR_SHIFT(x) (((x) * 5) % 20)
1626 +
1627 +/* Tx Descriptor Control Register */
1628 +#define IPQESS_REG_TPD_RING_SIZE 0x41C
1629 +#define IPQESS_TPD_RING_SIZE_SHIFT 0
1630 +#define IPQESS_TPD_RING_SIZE_MASK 0xFFFF
1631 +
1632 +/* Transmit descriptor base address */
1633 +#define IPQESS_REG_TPD_BASE_ADDR_Q(x) (0x420 + ((x) << 2)) /* x = queue id */
1634 +
1635 +/* TPD Index Register */
1636 +#define IPQESS_REG_TPD_IDX_Q(x) (0x460 + ((x) << 2)) /* x = queue id */
1637 +
1638 +#define IPQESS_TPD_PROD_IDX_BITS 0x0000FFFF
1639 +#define IPQESS_TPD_CONS_IDX_BITS 0xFFFF0000
1640 +#define IPQESS_TPD_PROD_IDX_MASK 0xFFFF
1641 +#define IPQESS_TPD_CONS_IDX_MASK 0xFFFF
1642 +#define IPQESS_TPD_PROD_IDX_SHIFT 0
1643 +#define IPQESS_TPD_CONS_IDX_SHIFT 16
1644 +
1645 +/* TX Virtual Queue Mapping Control Register */
1646 +#define IPQESS_REG_VQ_CTRL0 0x4A0
1647 +#define IPQESS_REG_VQ_CTRL1 0x4A4
1648 +
1649 +/* Virtual QID shift, it takes queue as input, and computes
1650 + * Virtual QID position in virtual qid control register
1651 + */
1652 +#define IPQESS_VQ_ID_SHIFT(i) (((i) * 3) % 24)
1653 +
1654 +/* Virtual Queue Default Value */
1655 +#define IPQESS_VQ_REG_VALUE 0x240240
1656 +
1657 +/* Tx side Port Interface Control Register */
1658 +#define IPQESS_REG_PORT_CTRL 0x4A8
1659 +#define IPQESS_PAD_EN_SHIFT 15
1660 +
1661 +/* Tx side VLAN Configuration Register */
1662 +#define IPQESS_REG_VLAN_CFG 0x4AC
1663 +
1664 +#define IPQESS_VLAN_CFG_SVLAN_TPID_SHIFT 0
1665 +#define IPQESS_VLAN_CFG_SVLAN_TPID_MASK 0xffff
1666 +#define IPQESS_VLAN_CFG_CVLAN_TPID_SHIFT 16
1667 +#define IPQESS_VLAN_CFG_CVLAN_TPID_MASK 0xffff
1668 +
1669 +#define IPQESS_TX_CVLAN 16
1670 +#define IPQESS_TX_INS_CVLAN 17
1671 +#define IPQESS_TX_CVLAN_TAG_SHIFT 0
1672 +
1673 +#define IPQESS_TX_SVLAN 14
1674 +#define IPQESS_TX_INS_SVLAN 15
1675 +#define IPQESS_TX_SVLAN_TAG_SHIFT 16
1676 +
1677 +/* Tx Queue Packet Statistic Register */
1678 +#define IPQESS_REG_TX_STAT_PKT_Q(x) (0x700 + ((x) << 3)) /* x = queue id */
1679 +
1680 +#define IPQESS_TX_STAT_PKT_MASK 0xFFFFFF
1681 +
1682 +/* Tx Queue Byte Statistic Register */
1683 +#define IPQESS_REG_TX_STAT_BYTE_Q(x) (0x704 + ((x) << 3)) /* x = queue id */
1684 +
1685 +/* Load Balance Based Ring Offset Register */
1686 +#define IPQESS_REG_LB_RING 0x800
1687 +#define IPQESS_LB_RING_ENTRY_MASK 0xff
1688 +#define IPQESS_LB_RING_ID_MASK 0x7
1689 +#define IPQESS_LB_RING_PROFILE_ID_MASK 0x3
1690 +#define IPQESS_LB_RING_ENTRY_BIT_OFFSET 8
1691 +#define IPQESS_LB_RING_ID_OFFSET 0
1692 +#define IPQESS_LB_RING_PROFILE_ID_OFFSET 3
1693 +#define IPQESS_LB_REG_VALUE 0x6040200
1694 +
1695 +/* Load Balance Priority Mapping Register */
1696 +#define IPQESS_REG_LB_PRI_START 0x804
1697 +#define IPQESS_REG_LB_PRI_END 0x810
1698 +#define IPQESS_LB_PRI_REG_INC 4
1699 +#define IPQESS_LB_PRI_ENTRY_BIT_OFFSET 4
1700 +#define IPQESS_LB_PRI_ENTRY_MASK 0xf
1701 +
1702 +/* RSS Priority Mapping Register */
1703 +#define IPQESS_REG_RSS_PRI 0x820
1704 +#define IPQESS_RSS_PRI_ENTRY_MASK 0xf
1705 +#define IPQESS_RSS_RING_ID_MASK 0x7
1706 +#define IPQESS_RSS_PRI_ENTRY_BIT_OFFSET 4
1707 +
1708 +/* RSS Indirection Register */
1709 +#define IPQESS_REG_RSS_IDT(x) (0x840 + ((x) << 2)) /* x = No. of indirection table */
1710 +#define IPQESS_NUM_IDT 16
1711 +#define IPQESS_RSS_IDT_VALUE 0x64206420
1712 +
1713 +/* Default RSS Ring Register */
1714 +#define IPQESS_REG_DEF_RSS 0x890
1715 +#define IPQESS_DEF_RSS_MASK 0x7
1716 +
1717 +/* RSS Hash Function Type Register */
1718 +#define IPQESS_REG_RSS_TYPE 0x894
1719 +#define IPQESS_RSS_TYPE_NONE 0x01
1720 +#define IPQESS_RSS_TYPE_IPV4TCP 0x02
1721 +#define IPQESS_RSS_TYPE_IPV6_TCP 0x04
1722 +#define IPQESS_RSS_TYPE_IPV4_UDP 0x08
1723 +#define IPQESS_RSS_TYPE_IPV6UDP 0x10
1724 +#define IPQESS_RSS_TYPE_IPV4 0x20
1725 +#define IPQESS_RSS_TYPE_IPV6 0x40
1726 +#define IPQESS_RSS_HASH_MODE_MASK 0x7f
1727 +
1728 +#define IPQESS_REG_RSS_HASH_VALUE 0x8C0
1729 +
1730 +#define IPQESS_REG_RSS_TYPE_RESULT 0x8C4
1731 +
1732 +#define IPQESS_HASH_TYPE_START 0
1733 +#define IPQESS_HASH_TYPE_END 5
1734 +#define IPQESS_HASH_TYPE_SHIFT 12
1735 +
1736 +#define IPQESS_RFS_FLOW_ENTRIES 1024
1737 +#define IPQESS_RFS_FLOW_ENTRIES_MASK (IPQESS_RFS_FLOW_ENTRIES - 1)
1738 +#define IPQESS_RFS_EXPIRE_COUNT_PER_CALL 128
1739 +
1740 +/* RFD Base Address Register */
1741 +#define IPQESS_REG_RFD_BASE_ADDR_Q(x) (0x950 + ((x) << 2)) /* x = queue id */
1742 +
1743 +/* RFD Index Register */
1744 +#define IPQESS_REG_RFD_IDX_Q(x) (0x9B0 + ((x) << 2)) /* x = queue id */
1745 +
1746 +#define IPQESS_RFD_PROD_IDX_BITS 0x00000FFF
1747 +#define IPQESS_RFD_CONS_IDX_BITS 0x0FFF0000
1748 +#define IPQESS_RFD_PROD_IDX_MASK 0xFFF
1749 +#define IPQESS_RFD_CONS_IDX_MASK 0xFFF
1750 +#define IPQESS_RFD_PROD_IDX_SHIFT 0
1751 +#define IPQESS_RFD_CONS_IDX_SHIFT 16
1752 +
1753 +/* Rx Descriptor Control Register */
1754 +#define IPQESS_REG_RX_DESC0 0xA10
1755 +#define IPQESS_RFD_RING_SIZE_MASK 0xFFF
1756 +#define IPQESS_RX_BUF_SIZE_MASK 0xFFFF
1757 +#define IPQESS_RFD_RING_SIZE_SHIFT 0
1758 +#define IPQESS_RX_BUF_SIZE_SHIFT 16
1759 +
1760 +#define IPQESS_REG_RX_DESC1 0xA14
1761 +#define IPQESS_RXQ_RFD_BURST_NUM_MASK 0x3F
1762 +#define IPQESS_RXQ_RFD_PF_THRESH_MASK 0x1F
1763 +#define IPQESS_RXQ_RFD_LOW_THRESH_MASK 0xFFF
1764 +#define IPQESS_RXQ_RFD_BURST_NUM_SHIFT 0
1765 +#define IPQESS_RXQ_RFD_PF_THRESH_SHIFT 8
1766 +#define IPQESS_RXQ_RFD_LOW_THRESH_SHIFT 16
1767 +
1768 +/* RXQ Control Register */
1769 +#define IPQESS_REG_RXQ_CTRL 0xA18
1770 +#define IPQESS_FIFO_THRESH_TYPE_SHIF 0
1771 +#define IPQESS_FIFO_THRESH_128_BYTE 0x0
1772 +#define IPQESS_FIFO_THRESH_64_BYTE 0x1
1773 +#define IPQESS_RXQ_CTRL_RMV_VLAN 0x00000002
1774 +#define IPQESS_RXQ_CTRL_EN_MASK GENMASK(15, 8)
1775 +#define IPQESS_RXQ_CTRL_EN(__qid) BIT(8 + (__qid))
1776 +
1777 +/* AXI Burst Size Config */
1778 +#define IPQESS_REG_AXIW_CTRL_MAXWRSIZE 0xA1C
1779 +#define IPQESS_AXIW_MAXWRSIZE_VALUE 0x0
1780 +
1781 +/* Rx Statistics Register */
1782 +#define IPQESS_REG_RX_STAT_BYTE_Q(x) (0xA30 + ((x) << 2)) /* x = queue id */
1783 +#define IPQESS_REG_RX_STAT_PKT_Q(x) (0xA50 + ((x) << 2)) /* x = queue id */
1784 +
1785 +/* WoL Pattern Length Register */
1786 +#define IPQESS_REG_WOL_PATTERN_LEN0 0xC00
1787 +#define IPQESS_WOL_PT_LEN_MASK 0xFF
1788 +#define IPQESS_WOL_PT0_LEN_SHIFT 0
1789 +#define IPQESS_WOL_PT1_LEN_SHIFT 8
1790 +#define IPQESS_WOL_PT2_LEN_SHIFT 16
1791 +#define IPQESS_WOL_PT3_LEN_SHIFT 24
1792 +
1793 +#define IPQESS_REG_WOL_PATTERN_LEN1 0xC04
1794 +#define IPQESS_WOL_PT4_LEN_SHIFT 0
1795 +#define IPQESS_WOL_PT5_LEN_SHIFT 8
1796 +#define IPQESS_WOL_PT6_LEN_SHIFT 16
1797 +
1798 +/* WoL Control Register */
1799 +#define IPQESS_REG_WOL_CTRL 0xC08
1800 +#define IPQESS_WOL_WK_EN 0x00000001
1801 +#define IPQESS_WOL_MG_EN 0x00000002
1802 +#define IPQESS_WOL_PT0_EN 0x00000004
1803 +#define IPQESS_WOL_PT1_EN 0x00000008
1804 +#define IPQESS_WOL_PT2_EN 0x00000010
1805 +#define IPQESS_WOL_PT3_EN 0x00000020
1806 +#define IPQESS_WOL_PT4_EN 0x00000040
1807 +#define IPQESS_WOL_PT5_EN 0x00000080
1808 +#define IPQESS_WOL_PT6_EN 0x00000100
1809 +
1810 +/* MAC Control Register */
1811 +#define IPQESS_REG_MAC_CTRL0 0xC20
1812 +#define IPQESS_REG_MAC_CTRL1 0xC24
1813 +
1814 +/* WoL Pattern Register */
1815 +#define IPQESS_REG_WOL_PATTERN_START 0x5000
1816 +#define IPQESS_PATTERN_PART_REG_OFFSET 0x40
1817 +
1818 +/* TX descriptor fields */
1819 +#define IPQESS_TPD_HDR_SHIFT 0
1820 +#define IPQESS_TPD_PPPOE_EN 0x00000100
1821 +#define IPQESS_TPD_IP_CSUM_EN 0x00000200
1822 +#define IPQESS_TPD_TCP_CSUM_EN 0x0000400
1823 +#define IPQESS_TPD_UDP_CSUM_EN 0x00000800
1824 +#define IPQESS_TPD_CUSTOM_CSUM_EN 0x00000C00
1825 +#define IPQESS_TPD_LSO_EN 0x00001000
1826 +#define IPQESS_TPD_LSO_V2_EN 0x00002000
1827 +/* The VLAN_TAGGED bit is not used in the publicly available
1828 + * drivers. The definition has been stolen from the Atheros
1829 + * 'alx' driver (drivers/net/ethernet/atheros/alx/hw.h). It
1830 + * seems that it has the same meaning in regard to the EDMA
1831 + * hardware.
1832 + */
1833 +#define IPQESS_TPD_VLAN_TAGGED 0x00004000
1834 +#define IPQESS_TPD_IPV4_EN 0x00010000
1835 +#define IPQESS_TPD_MSS_MASK 0x1FFF
1836 +#define IPQESS_TPD_MSS_SHIFT 18
1837 +#define IPQESS_TPD_CUSTOM_CSUM_SHIFT 18
1838 +
1839 +/* RRD descriptor fields */
1840 +#define IPQESS_RRD_NUM_RFD_MASK 0x000F
1841 +#define IPQESS_RRD_PKT_SIZE_MASK 0x3FFF
1842 +#define IPQESS_RRD_SRC_PORT_NUM_MASK 0x4000
1843 +#define IPQESS_RRD_SVLAN 0x8000
1844 +#define IPQESS_RRD_FLOW_COOKIE_MASK 0x07FF
1845 +
1846 +#define IPQESS_RRD_PKT_SIZE_MASK 0x3FFF
1847 +#define IPQESS_RRD_CSUM_FAIL_MASK 0xC000
1848 +#define IPQESS_RRD_CVLAN 0x0001
1849 +#define IPQESS_RRD_DESC_VALID 0x8000
1850 +
1851 +#define IPQESS_RRD_PRIORITY_SHIFT 4
1852 +#define IPQESS_RRD_PRIORITY_MASK 0x7
1853 +#define IPQESS_RRD_PORT_TYPE_SHIFT 7
1854 +#define IPQESS_RRD_PORT_TYPE_MASK 0x1F
1855 +
1856 +#define IPQESS_RRD_PORT_ID_MASK 0x7000
1857 +
1858 +#endif
1859 --- /dev/null
1860 +++ b/drivers/net/ethernet/qualcomm/ipqess/ipqess_ethtool.c
1861 @@ -0,0 +1,164 @@
1862 +// SPDX-License-Identifier: GPL-2.0 OR ISC
1863 +/* Copyright (c) 2015 - 2016, The Linux Foundation. All rights reserved.
1864 + * Copyright (c) 2017 - 2018, John Crispin <john@phrozen.org>
1865 + * Copyright (c) 2021 - 2022, Maxime Chevallier <maxime.chevallier@bootlin.com>
1866 + *
1867 + */
1868 +
1869 +#include <linux/ethtool.h>
1870 +#include <linux/netdevice.h>
1871 +#include <linux/string.h>
1872 +#include <linux/phylink.h>
1873 +
1874 +#include "ipqess.h"
1875 +
1876 +struct ipqess_ethtool_stats {
1877 + u8 string[ETH_GSTRING_LEN];
1878 + u32 offset;
1879 +};
1880 +
1881 +#define IPQESS_STAT(m) offsetof(struct ipqess_statistics, m)
1882 +#define DRVINFO_LEN 32
1883 +
1884 +static const struct ipqess_ethtool_stats ipqess_stats[] = {
1885 + {"tx_q0_pkt", IPQESS_STAT(tx_q0_pkt)},
1886 + {"tx_q1_pkt", IPQESS_STAT(tx_q1_pkt)},
1887 + {"tx_q2_pkt", IPQESS_STAT(tx_q2_pkt)},
1888 + {"tx_q3_pkt", IPQESS_STAT(tx_q3_pkt)},
1889 + {"tx_q4_pkt", IPQESS_STAT(tx_q4_pkt)},
1890 + {"tx_q5_pkt", IPQESS_STAT(tx_q5_pkt)},
1891 + {"tx_q6_pkt", IPQESS_STAT(tx_q6_pkt)},
1892 + {"tx_q7_pkt", IPQESS_STAT(tx_q7_pkt)},
1893 + {"tx_q8_pkt", IPQESS_STAT(tx_q8_pkt)},
1894 + {"tx_q9_pkt", IPQESS_STAT(tx_q9_pkt)},
1895 + {"tx_q10_pkt", IPQESS_STAT(tx_q10_pkt)},
1896 + {"tx_q11_pkt", IPQESS_STAT(tx_q11_pkt)},
1897 + {"tx_q12_pkt", IPQESS_STAT(tx_q12_pkt)},
1898 + {"tx_q13_pkt", IPQESS_STAT(tx_q13_pkt)},
1899 + {"tx_q14_pkt", IPQESS_STAT(tx_q14_pkt)},
1900 + {"tx_q15_pkt", IPQESS_STAT(tx_q15_pkt)},
1901 + {"tx_q0_byte", IPQESS_STAT(tx_q0_byte)},
1902 + {"tx_q1_byte", IPQESS_STAT(tx_q1_byte)},
1903 + {"tx_q2_byte", IPQESS_STAT(tx_q2_byte)},
1904 + {"tx_q3_byte", IPQESS_STAT(tx_q3_byte)},
1905 + {"tx_q4_byte", IPQESS_STAT(tx_q4_byte)},
1906 + {"tx_q5_byte", IPQESS_STAT(tx_q5_byte)},
1907 + {"tx_q6_byte", IPQESS_STAT(tx_q6_byte)},
1908 + {"tx_q7_byte", IPQESS_STAT(tx_q7_byte)},
1909 + {"tx_q8_byte", IPQESS_STAT(tx_q8_byte)},
1910 + {"tx_q9_byte", IPQESS_STAT(tx_q9_byte)},
1911 + {"tx_q10_byte", IPQESS_STAT(tx_q10_byte)},
1912 + {"tx_q11_byte", IPQESS_STAT(tx_q11_byte)},
1913 + {"tx_q12_byte", IPQESS_STAT(tx_q12_byte)},
1914 + {"tx_q13_byte", IPQESS_STAT(tx_q13_byte)},
1915 + {"tx_q14_byte", IPQESS_STAT(tx_q14_byte)},
1916 + {"tx_q15_byte", IPQESS_STAT(tx_q15_byte)},
1917 + {"rx_q0_pkt", IPQESS_STAT(rx_q0_pkt)},
1918 + {"rx_q1_pkt", IPQESS_STAT(rx_q1_pkt)},
1919 + {"rx_q2_pkt", IPQESS_STAT(rx_q2_pkt)},
1920 + {"rx_q3_pkt", IPQESS_STAT(rx_q3_pkt)},
1921 + {"rx_q4_pkt", IPQESS_STAT(rx_q4_pkt)},
1922 + {"rx_q5_pkt", IPQESS_STAT(rx_q5_pkt)},
1923 + {"rx_q6_pkt", IPQESS_STAT(rx_q6_pkt)},
1924 + {"rx_q7_pkt", IPQESS_STAT(rx_q7_pkt)},
1925 + {"rx_q0_byte", IPQESS_STAT(rx_q0_byte)},
1926 + {"rx_q1_byte", IPQESS_STAT(rx_q1_byte)},
1927 + {"rx_q2_byte", IPQESS_STAT(rx_q2_byte)},
1928 + {"rx_q3_byte", IPQESS_STAT(rx_q3_byte)},
1929 + {"rx_q4_byte", IPQESS_STAT(rx_q4_byte)},
1930 + {"rx_q5_byte", IPQESS_STAT(rx_q5_byte)},
1931 + {"rx_q6_byte", IPQESS_STAT(rx_q6_byte)},
1932 + {"rx_q7_byte", IPQESS_STAT(rx_q7_byte)},
1933 + {"tx_desc_error", IPQESS_STAT(tx_desc_error)},
1934 +};
1935 +
1936 +static int ipqess_get_strset_count(struct net_device *netdev, int sset)
1937 +{
1938 + switch (sset) {
1939 + case ETH_SS_STATS:
1940 + return ARRAY_SIZE(ipqess_stats);
1941 + default:
1942 + netdev_dbg(netdev, "%s: Unsupported string set", __func__);
1943 + return -EOPNOTSUPP;
1944 + }
1945 +}
1946 +
1947 +static void ipqess_get_strings(struct net_device *netdev, u32 stringset,
1948 + u8 *data)
1949 +{
1950 + u8 *p = data;
1951 + u32 i;
1952 +
1953 + switch (stringset) {
1954 + case ETH_SS_STATS:
1955 + for (i = 0; i < ARRAY_SIZE(ipqess_stats); i++)
1956 + ethtool_puts(&p, ipqess_stats[i].string);
1957 + break;
1958 + }
1959 +}
1960 +
1961 +static void ipqess_get_ethtool_stats(struct net_device *netdev,
1962 + struct ethtool_stats *stats,
1963 + uint64_t *data)
1964 +{
1965 + struct ipqess *ess = netdev_priv(netdev);
1966 + u32 *essstats = (u32 *)&ess->ipqess_stats;
1967 + int i;
1968 +
1969 + spin_lock(&ess->stats_lock);
1970 +
1971 + ipqess_update_hw_stats(ess);
1972 +
1973 + for (i = 0; i < ARRAY_SIZE(ipqess_stats); i++)
1974 + data[i] = *(u32 *)(essstats + (ipqess_stats[i].offset / sizeof(u32)));
1975 +
1976 + spin_unlock(&ess->stats_lock);
1977 +}
1978 +
1979 +static void ipqess_get_drvinfo(struct net_device *dev,
1980 + struct ethtool_drvinfo *info)
1981 +{
1982 + strscpy(info->driver, "qca_ipqess", DRVINFO_LEN);
1983 + strscpy(info->bus_info, "axi", ETHTOOL_BUSINFO_LEN);
1984 +}
1985 +
1986 +static int ipqess_get_link_ksettings(struct net_device *netdev,
1987 + struct ethtool_link_ksettings *cmd)
1988 +{
1989 + struct ipqess *ess = netdev_priv(netdev);
1990 +
1991 + return phylink_ethtool_ksettings_get(ess->phylink, cmd);
1992 +}
1993 +
1994 +static int ipqess_set_link_ksettings(struct net_device *netdev,
1995 + const struct ethtool_link_ksettings *cmd)
1996 +{
1997 + struct ipqess *ess = netdev_priv(netdev);
1998 +
1999 + return phylink_ethtool_ksettings_set(ess->phylink, cmd);
2000 +}
2001 +
2002 +static void ipqess_get_ringparam(struct net_device *netdev,
2003 + struct ethtool_ringparam *ring,
2004 + struct kernel_ethtool_ringparam *kernel_ering,
2005 + struct netlink_ext_ack *extack)
2006 +{
2007 + ring->tx_max_pending = IPQESS_TX_RING_SIZE;
2008 + ring->rx_max_pending = IPQESS_RX_RING_SIZE;
2009 +}
2010 +
2011 +static const struct ethtool_ops ipqesstool_ops = {
2012 + .get_drvinfo = &ipqess_get_drvinfo,
2013 + .get_link = &ethtool_op_get_link,
2014 + .get_link_ksettings = &ipqess_get_link_ksettings,
2015 + .set_link_ksettings = &ipqess_set_link_ksettings,
2016 + .get_strings = &ipqess_get_strings,
2017 + .get_sset_count = &ipqess_get_strset_count,
2018 + .get_ethtool_stats = &ipqess_get_ethtool_stats,
2019 + .get_ringparam = ipqess_get_ringparam,
2020 +};
2021 +
2022 +void ipqess_set_ethtool_ops(struct net_device *netdev)
2023 +{
2024 + netdev->ethtool_ops = &ipqesstool_ops;
2025 +}