kernel: backport fixes for realtek r8152
[openwrt/openwrt.git] / target / linux / generic / backport-5.15 / 771-v6.0-03-net-dsa-qca8k-move-mib-struct-to-common-code.patch
1 From 027152b830434e3632ad5dd678cc5d4740358dbb Mon Sep 17 00:00:00 2001
2 From: Christian Marangi <ansuelsmth@gmail.com>
3 Date: Wed, 27 Jul 2022 13:35:12 +0200
4 Subject: [PATCH 03/14] net: dsa: qca8k: move mib struct to common code
5
6 The same MIB struct is used by drivers based on qca8k family switch. Move
7 it to common code to make it accessible also by other drivers.
8
9 Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
10 Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
11 Signed-off-by: Jakub Kicinski <kuba@kernel.org>
12 ---
13 drivers/net/dsa/qca/Makefile | 1 +
14 drivers/net/dsa/qca/{qca8k.c => qca8k-8xxx.c} | 51 ---------------
15 drivers/net/dsa/qca/qca8k-common.c | 63 +++++++++++++++++++
16 drivers/net/dsa/qca/qca8k.h | 3 +
17 4 files changed, 67 insertions(+), 51 deletions(-)
18 rename drivers/net/dsa/qca/{qca8k.c => qca8k-8xxx.c} (98%)
19 create mode 100644 drivers/net/dsa/qca/qca8k-common.c
20
21 --- a/drivers/net/dsa/qca/Makefile
22 +++ b/drivers/net/dsa/qca/Makefile
23 @@ -1,3 +1,4 @@
24 # SPDX-License-Identifier: GPL-2.0-only
25 obj-$(CONFIG_NET_DSA_AR9331) += ar9331.o
26 obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
27 +qca8k-y += qca8k-common.o qca8k-8xxx.o
28 --- a/drivers/net/dsa/qca/qca8k.c
29 +++ /dev/null
30 @@ -1,3237 +0,0 @@
31 -// SPDX-License-Identifier: GPL-2.0
32 -/*
33 - * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
34 - * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
35 - * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
36 - * Copyright (c) 2016 John Crispin <john@phrozen.org>
37 - */
38 -
39 -#include <linux/module.h>
40 -#include <linux/phy.h>
41 -#include <linux/netdevice.h>
42 -#include <linux/bitfield.h>
43 -#include <linux/regmap.h>
44 -#include <net/dsa.h>
45 -#include <linux/of_net.h>
46 -#include <linux/of_mdio.h>
47 -#include <linux/of_platform.h>
48 -#include <linux/if_bridge.h>
49 -#include <linux/mdio.h>
50 -#include <linux/phylink.h>
51 -#include <linux/gpio/consumer.h>
52 -#include <linux/etherdevice.h>
53 -#include <linux/dsa/tag_qca.h>
54 -
55 -#include "qca8k.h"
56 -
57 -#define MIB_DESC(_s, _o, _n) \
58 - { \
59 - .size = (_s), \
60 - .offset = (_o), \
61 - .name = (_n), \
62 - }
63 -
64 -static const struct qca8k_mib_desc ar8327_mib[] = {
65 - MIB_DESC(1, 0x00, "RxBroad"),
66 - MIB_DESC(1, 0x04, "RxPause"),
67 - MIB_DESC(1, 0x08, "RxMulti"),
68 - MIB_DESC(1, 0x0c, "RxFcsErr"),
69 - MIB_DESC(1, 0x10, "RxAlignErr"),
70 - MIB_DESC(1, 0x14, "RxRunt"),
71 - MIB_DESC(1, 0x18, "RxFragment"),
72 - MIB_DESC(1, 0x1c, "Rx64Byte"),
73 - MIB_DESC(1, 0x20, "Rx128Byte"),
74 - MIB_DESC(1, 0x24, "Rx256Byte"),
75 - MIB_DESC(1, 0x28, "Rx512Byte"),
76 - MIB_DESC(1, 0x2c, "Rx1024Byte"),
77 - MIB_DESC(1, 0x30, "Rx1518Byte"),
78 - MIB_DESC(1, 0x34, "RxMaxByte"),
79 - MIB_DESC(1, 0x38, "RxTooLong"),
80 - MIB_DESC(2, 0x3c, "RxGoodByte"),
81 - MIB_DESC(2, 0x44, "RxBadByte"),
82 - MIB_DESC(1, 0x4c, "RxOverFlow"),
83 - MIB_DESC(1, 0x50, "Filtered"),
84 - MIB_DESC(1, 0x54, "TxBroad"),
85 - MIB_DESC(1, 0x58, "TxPause"),
86 - MIB_DESC(1, 0x5c, "TxMulti"),
87 - MIB_DESC(1, 0x60, "TxUnderRun"),
88 - MIB_DESC(1, 0x64, "Tx64Byte"),
89 - MIB_DESC(1, 0x68, "Tx128Byte"),
90 - MIB_DESC(1, 0x6c, "Tx256Byte"),
91 - MIB_DESC(1, 0x70, "Tx512Byte"),
92 - MIB_DESC(1, 0x74, "Tx1024Byte"),
93 - MIB_DESC(1, 0x78, "Tx1518Byte"),
94 - MIB_DESC(1, 0x7c, "TxMaxByte"),
95 - MIB_DESC(1, 0x80, "TxOverSize"),
96 - MIB_DESC(2, 0x84, "TxByte"),
97 - MIB_DESC(1, 0x8c, "TxCollision"),
98 - MIB_DESC(1, 0x90, "TxAbortCol"),
99 - MIB_DESC(1, 0x94, "TxMultiCol"),
100 - MIB_DESC(1, 0x98, "TxSingleCol"),
101 - MIB_DESC(1, 0x9c, "TxExcDefer"),
102 - MIB_DESC(1, 0xa0, "TxDefer"),
103 - MIB_DESC(1, 0xa4, "TxLateCol"),
104 - MIB_DESC(1, 0xa8, "RXUnicast"),
105 - MIB_DESC(1, 0xac, "TXUnicast"),
106 -};
107 -
108 -static void
109 -qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
110 -{
111 - regaddr >>= 1;
112 - *r1 = regaddr & 0x1e;
113 -
114 - regaddr >>= 5;
115 - *r2 = regaddr & 0x7;
116 -
117 - regaddr >>= 3;
118 - *page = regaddr & 0x3ff;
119 -}
120 -
121 -static int
122 -qca8k_set_lo(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 lo)
123 -{
124 - u16 *cached_lo = &priv->mdio_cache.lo;
125 - struct mii_bus *bus = priv->bus;
126 - int ret;
127 -
128 - if (lo == *cached_lo)
129 - return 0;
130 -
131 - ret = bus->write(bus, phy_id, regnum, lo);
132 - if (ret < 0)
133 - dev_err_ratelimited(&bus->dev,
134 - "failed to write qca8k 32bit lo register\n");
135 -
136 - *cached_lo = lo;
137 - return 0;
138 -}
139 -
140 -static int
141 -qca8k_set_hi(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 hi)
142 -{
143 - u16 *cached_hi = &priv->mdio_cache.hi;
144 - struct mii_bus *bus = priv->bus;
145 - int ret;
146 -
147 - if (hi == *cached_hi)
148 - return 0;
149 -
150 - ret = bus->write(bus, phy_id, regnum, hi);
151 - if (ret < 0)
152 - dev_err_ratelimited(&bus->dev,
153 - "failed to write qca8k 32bit hi register\n");
154 -
155 - *cached_hi = hi;
156 - return 0;
157 -}
158 -
159 -static int
160 -qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
161 -{
162 - int ret;
163 -
164 - ret = bus->read(bus, phy_id, regnum);
165 - if (ret >= 0) {
166 - *val = ret;
167 - ret = bus->read(bus, phy_id, regnum + 1);
168 - *val |= ret << 16;
169 - }
170 -
171 - if (ret < 0) {
172 - dev_err_ratelimited(&bus->dev,
173 - "failed to read qca8k 32bit register\n");
174 - *val = 0;
175 - return ret;
176 - }
177 -
178 - return 0;
179 -}
180 -
181 -static void
182 -qca8k_mii_write32(struct qca8k_priv *priv, int phy_id, u32 regnum, u32 val)
183 -{
184 - u16 lo, hi;
185 - int ret;
186 -
187 - lo = val & 0xffff;
188 - hi = (u16)(val >> 16);
189 -
190 - ret = qca8k_set_lo(priv, phy_id, regnum, lo);
191 - if (ret >= 0)
192 - ret = qca8k_set_hi(priv, phy_id, regnum + 1, hi);
193 -}
194 -
195 -static int
196 -qca8k_set_page(struct qca8k_priv *priv, u16 page)
197 -{
198 - u16 *cached_page = &priv->mdio_cache.page;
199 - struct mii_bus *bus = priv->bus;
200 - int ret;
201 -
202 - if (page == *cached_page)
203 - return 0;
204 -
205 - ret = bus->write(bus, 0x18, 0, page);
206 - if (ret < 0) {
207 - dev_err_ratelimited(&bus->dev,
208 - "failed to set qca8k page\n");
209 - return ret;
210 - }
211 -
212 - *cached_page = page;
213 - usleep_range(1000, 2000);
214 - return 0;
215 -}
216 -
217 -static int
218 -qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val)
219 -{
220 - return regmap_read(priv->regmap, reg, val);
221 -}
222 -
223 -static int
224 -qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
225 -{
226 - return regmap_write(priv->regmap, reg, val);
227 -}
228 -
229 -static int
230 -qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
231 -{
232 - return regmap_update_bits(priv->regmap, reg, mask, write_val);
233 -}
234 -
235 -static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
236 -{
237 - struct qca8k_mgmt_eth_data *mgmt_eth_data;
238 - struct qca8k_priv *priv = ds->priv;
239 - struct qca_mgmt_ethhdr *mgmt_ethhdr;
240 - u8 len, cmd;
241 -
242 - mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb_mac_header(skb);
243 - mgmt_eth_data = &priv->mgmt_eth_data;
244 -
245 - cmd = FIELD_GET(QCA_HDR_MGMT_CMD, mgmt_ethhdr->command);
246 - len = FIELD_GET(QCA_HDR_MGMT_LENGTH, mgmt_ethhdr->command);
247 -
248 - /* Make sure the seq match the requested packet */
249 - if (mgmt_ethhdr->seq == mgmt_eth_data->seq)
250 - mgmt_eth_data->ack = true;
251 -
252 - if (cmd == MDIO_READ) {
253 - mgmt_eth_data->data[0] = mgmt_ethhdr->mdio_data;
254 -
255 - /* Get the rest of the 12 byte of data.
256 - * The read/write function will extract the requested data.
257 - */
258 - if (len > QCA_HDR_MGMT_DATA1_LEN)
259 - memcpy(mgmt_eth_data->data + 1, skb->data,
260 - QCA_HDR_MGMT_DATA2_LEN);
261 - }
262 -
263 - complete(&mgmt_eth_data->rw_done);
264 -}
265 -
266 -static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *val,
267 - int priority, unsigned int len)
268 -{
269 - struct qca_mgmt_ethhdr *mgmt_ethhdr;
270 - unsigned int real_len;
271 - struct sk_buff *skb;
272 - u32 *data2;
273 - u16 hdr;
274 -
275 - skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN);
276 - if (!skb)
277 - return NULL;
278 -
279 - /* Max value for len reg is 15 (0xf) but the switch actually return 16 byte
280 - * Actually for some reason the steps are:
281 - * 0: nothing
282 - * 1-4: first 4 byte
283 - * 5-6: first 12 byte
284 - * 7-15: all 16 byte
285 - */
286 - if (len == 16)
287 - real_len = 15;
288 - else
289 - real_len = len;
290 -
291 - skb_reset_mac_header(skb);
292 - skb_set_network_header(skb, skb->len);
293 -
294 - mgmt_ethhdr = skb_push(skb, QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN);
295 -
296 - hdr = FIELD_PREP(QCA_HDR_XMIT_VERSION, QCA_HDR_VERSION);
297 - hdr |= FIELD_PREP(QCA_HDR_XMIT_PRIORITY, priority);
298 - hdr |= QCA_HDR_XMIT_FROM_CPU;
299 - hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(0));
300 - hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG);
301 -
302 - mgmt_ethhdr->command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg);
303 - mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len);
304 - mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd);
305 - mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE,
306 - QCA_HDR_MGMT_CHECK_CODE_VAL);
307 -
308 - if (cmd == MDIO_WRITE)
309 - mgmt_ethhdr->mdio_data = *val;
310 -
311 - mgmt_ethhdr->hdr = htons(hdr);
312 -
313 - data2 = skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN);
314 - if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN)
315 - memcpy(data2, val + 1, len - QCA_HDR_MGMT_DATA1_LEN);
316 -
317 - return skb;
318 -}
319 -
320 -static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num)
321 -{
322 - struct qca_mgmt_ethhdr *mgmt_ethhdr;
323 -
324 - mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb->data;
325 - mgmt_ethhdr->seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num);
326 -}
327 -
328 -static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
329 -{
330 - struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
331 - struct sk_buff *skb;
332 - bool ack;
333 - int ret;
334 -
335 - skb = qca8k_alloc_mdio_header(MDIO_READ, reg, NULL,
336 - QCA8K_ETHERNET_MDIO_PRIORITY, len);
337 - if (!skb)
338 - return -ENOMEM;
339 -
340 - mutex_lock(&mgmt_eth_data->mutex);
341 -
342 - /* Check mgmt_master if is operational */
343 - if (!priv->mgmt_master) {
344 - kfree_skb(skb);
345 - mutex_unlock(&mgmt_eth_data->mutex);
346 - return -EINVAL;
347 - }
348 -
349 - skb->dev = priv->mgmt_master;
350 -
351 - reinit_completion(&mgmt_eth_data->rw_done);
352 -
353 - /* Increment seq_num and set it in the mdio pkt */
354 - mgmt_eth_data->seq++;
355 - qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
356 - mgmt_eth_data->ack = false;
357 -
358 - dev_queue_xmit(skb);
359 -
360 - ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
361 - msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
362 -
363 - *val = mgmt_eth_data->data[0];
364 - if (len > QCA_HDR_MGMT_DATA1_LEN)
365 - memcpy(val + 1, mgmt_eth_data->data + 1, len - QCA_HDR_MGMT_DATA1_LEN);
366 -
367 - ack = mgmt_eth_data->ack;
368 -
369 - mutex_unlock(&mgmt_eth_data->mutex);
370 -
371 - if (ret <= 0)
372 - return -ETIMEDOUT;
373 -
374 - if (!ack)
375 - return -EINVAL;
376 -
377 - return 0;
378 -}
379 -
380 -static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
381 -{
382 - struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
383 - struct sk_buff *skb;
384 - bool ack;
385 - int ret;
386 -
387 - skb = qca8k_alloc_mdio_header(MDIO_WRITE, reg, val,
388 - QCA8K_ETHERNET_MDIO_PRIORITY, len);
389 - if (!skb)
390 - return -ENOMEM;
391 -
392 - mutex_lock(&mgmt_eth_data->mutex);
393 -
394 - /* Check mgmt_master if is operational */
395 - if (!priv->mgmt_master) {
396 - kfree_skb(skb);
397 - mutex_unlock(&mgmt_eth_data->mutex);
398 - return -EINVAL;
399 - }
400 -
401 - skb->dev = priv->mgmt_master;
402 -
403 - reinit_completion(&mgmt_eth_data->rw_done);
404 -
405 - /* Increment seq_num and set it in the mdio pkt */
406 - mgmt_eth_data->seq++;
407 - qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
408 - mgmt_eth_data->ack = false;
409 -
410 - dev_queue_xmit(skb);
411 -
412 - ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
413 - msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
414 -
415 - ack = mgmt_eth_data->ack;
416 -
417 - mutex_unlock(&mgmt_eth_data->mutex);
418 -
419 - if (ret <= 0)
420 - return -ETIMEDOUT;
421 -
422 - if (!ack)
423 - return -EINVAL;
424 -
425 - return 0;
426 -}
427 -
428 -static int
429 -qca8k_regmap_update_bits_eth(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
430 -{
431 - u32 val = 0;
432 - int ret;
433 -
434 - ret = qca8k_read_eth(priv, reg, &val, sizeof(val));
435 - if (ret)
436 - return ret;
437 -
438 - val &= ~mask;
439 - val |= write_val;
440 -
441 - return qca8k_write_eth(priv, reg, &val, sizeof(val));
442 -}
443 -
444 -static int
445 -qca8k_bulk_read(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
446 -{
447 - int i, count = len / sizeof(u32), ret;
448 -
449 - if (priv->mgmt_master && !qca8k_read_eth(priv, reg, val, len))
450 - return 0;
451 -
452 - for (i = 0; i < count; i++) {
453 - ret = regmap_read(priv->regmap, reg + (i * 4), val + i);
454 - if (ret < 0)
455 - return ret;
456 - }
457 -
458 - return 0;
459 -}
460 -
461 -static int
462 -qca8k_bulk_write(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
463 -{
464 - int i, count = len / sizeof(u32), ret;
465 - u32 tmp;
466 -
467 - if (priv->mgmt_master && !qca8k_write_eth(priv, reg, val, len))
468 - return 0;
469 -
470 - for (i = 0; i < count; i++) {
471 - tmp = val[i];
472 -
473 - ret = regmap_write(priv->regmap, reg + (i * 4), tmp);
474 - if (ret < 0)
475 - return ret;
476 - }
477 -
478 - return 0;
479 -}
480 -
481 -static int
482 -qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
483 -{
484 - struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
485 - struct mii_bus *bus = priv->bus;
486 - u16 r1, r2, page;
487 - int ret;
488 -
489 - if (!qca8k_read_eth(priv, reg, val, sizeof(*val)))
490 - return 0;
491 -
492 - qca8k_split_addr(reg, &r1, &r2, &page);
493 -
494 - mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
495 -
496 - ret = qca8k_set_page(priv, page);
497 - if (ret < 0)
498 - goto exit;
499 -
500 - ret = qca8k_mii_read32(bus, 0x10 | r2, r1, val);
501 -
502 -exit:
503 - mutex_unlock(&bus->mdio_lock);
504 - return ret;
505 -}
506 -
507 -static int
508 -qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
509 -{
510 - struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
511 - struct mii_bus *bus = priv->bus;
512 - u16 r1, r2, page;
513 - int ret;
514 -
515 - if (!qca8k_write_eth(priv, reg, &val, sizeof(val)))
516 - return 0;
517 -
518 - qca8k_split_addr(reg, &r1, &r2, &page);
519 -
520 - mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
521 -
522 - ret = qca8k_set_page(priv, page);
523 - if (ret < 0)
524 - goto exit;
525 -
526 - qca8k_mii_write32(priv, 0x10 | r2, r1, val);
527 -
528 -exit:
529 - mutex_unlock(&bus->mdio_lock);
530 - return ret;
531 -}
532 -
533 -static int
534 -qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val)
535 -{
536 - struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
537 - struct mii_bus *bus = priv->bus;
538 - u16 r1, r2, page;
539 - u32 val;
540 - int ret;
541 -
542 - if (!qca8k_regmap_update_bits_eth(priv, reg, mask, write_val))
543 - return 0;
544 -
545 - qca8k_split_addr(reg, &r1, &r2, &page);
546 -
547 - mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
548 -
549 - ret = qca8k_set_page(priv, page);
550 - if (ret < 0)
551 - goto exit;
552 -
553 - ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
554 - if (ret < 0)
555 - goto exit;
556 -
557 - val &= ~mask;
558 - val |= write_val;
559 - qca8k_mii_write32(priv, 0x10 | r2, r1, val);
560 -
561 -exit:
562 - mutex_unlock(&bus->mdio_lock);
563 -
564 - return ret;
565 -}
566 -
567 -static const struct regmap_range qca8k_readable_ranges[] = {
568 - regmap_reg_range(0x0000, 0x00e4), /* Global control */
569 - regmap_reg_range(0x0100, 0x0168), /* EEE control */
570 - regmap_reg_range(0x0200, 0x0270), /* Parser control */
571 - regmap_reg_range(0x0400, 0x0454), /* ACL */
572 - regmap_reg_range(0x0600, 0x0718), /* Lookup */
573 - regmap_reg_range(0x0800, 0x0b70), /* QM */
574 - regmap_reg_range(0x0c00, 0x0c80), /* PKT */
575 - regmap_reg_range(0x0e00, 0x0e98), /* L3 */
576 - regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */
577 - regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */
578 - regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */
579 - regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */
580 - regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */
581 - regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */
582 - regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */
583 -
584 -};
585 -
586 -static const struct regmap_access_table qca8k_readable_table = {
587 - .yes_ranges = qca8k_readable_ranges,
588 - .n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges),
589 -};
590 -
591 -static struct regmap_config qca8k_regmap_config = {
592 - .reg_bits = 16,
593 - .val_bits = 32,
594 - .reg_stride = 4,
595 - .max_register = 0x16ac, /* end MIB - Port6 range */
596 - .reg_read = qca8k_regmap_read,
597 - .reg_write = qca8k_regmap_write,
598 - .reg_update_bits = qca8k_regmap_update_bits,
599 - .rd_table = &qca8k_readable_table,
600 - .disable_locking = true, /* Locking is handled by qca8k read/write */
601 - .cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */
602 -};
603 -
604 -static int
605 -qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
606 -{
607 - u32 val;
608 -
609 - return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0,
610 - QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC);
611 -}
612 -
613 -static int
614 -qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
615 -{
616 - u32 reg[3];
617 - int ret;
618 -
619 - /* load the ARL table into an array */
620 - ret = qca8k_bulk_read(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
621 - if (ret)
622 - return ret;
623 -
624 - /* vid - 83:72 */
625 - fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]);
626 - /* aging - 67:64 */
627 - fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]);
628 - /* portmask - 54:48 */
629 - fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]);
630 - /* mac - 47:0 */
631 - fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]);
632 - fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]);
633 - fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]);
634 - fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]);
635 - fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]);
636 - fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]);
637 -
638 - return 0;
639 -}
640 -
641 -static void
642 -qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask, const u8 *mac,
643 - u8 aging)
644 -{
645 - u32 reg[3] = { 0 };
646 -
647 - /* vid - 83:72 */
648 - reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid);
649 - /* aging - 67:64 */
650 - reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging);
651 - /* portmask - 54:48 */
652 - reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask);
653 - /* mac - 47:0 */
654 - reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]);
655 - reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]);
656 - reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]);
657 - reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]);
658 - reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]);
659 - reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]);
660 -
661 - /* load the array into the ARL table */
662 - qca8k_bulk_write(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
663 -}
664 -
665 -static int
666 -qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd, int port)
667 -{
668 - u32 reg;
669 - int ret;
670 -
671 - /* Set the command and FDB index */
672 - reg = QCA8K_ATU_FUNC_BUSY;
673 - reg |= cmd;
674 - if (port >= 0) {
675 - reg |= QCA8K_ATU_FUNC_PORT_EN;
676 - reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port);
677 - }
678 -
679 - /* Write the function register triggering the table access */
680 - ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg);
681 - if (ret)
682 - return ret;
683 -
684 - /* wait for completion */
685 - ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY);
686 - if (ret)
687 - return ret;
688 -
689 - /* Check for table full violation when adding an entry */
690 - if (cmd == QCA8K_FDB_LOAD) {
691 - ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, &reg);
692 - if (ret < 0)
693 - return ret;
694 - if (reg & QCA8K_ATU_FUNC_FULL)
695 - return -1;
696 - }
697 -
698 - return 0;
699 -}
700 -
701 -static int
702 -qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb, int port)
703 -{
704 - int ret;
705 -
706 - qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging);
707 - ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port);
708 - if (ret < 0)
709 - return ret;
710 -
711 - return qca8k_fdb_read(priv, fdb);
712 -}
713 -
714 -static int
715 -qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac, u16 port_mask,
716 - u16 vid, u8 aging)
717 -{
718 - int ret;
719 -
720 - mutex_lock(&priv->reg_mutex);
721 - qca8k_fdb_write(priv, vid, port_mask, mac, aging);
722 - ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
723 - mutex_unlock(&priv->reg_mutex);
724 -
725 - return ret;
726 -}
727 -
728 -static int
729 -qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac, u16 port_mask, u16 vid)
730 -{
731 - int ret;
732 -
733 - mutex_lock(&priv->reg_mutex);
734 - qca8k_fdb_write(priv, vid, port_mask, mac, 0);
735 - ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
736 - mutex_unlock(&priv->reg_mutex);
737 -
738 - return ret;
739 -}
740 -
741 -static void
742 -qca8k_fdb_flush(struct qca8k_priv *priv)
743 -{
744 - mutex_lock(&priv->reg_mutex);
745 - qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1);
746 - mutex_unlock(&priv->reg_mutex);
747 -}
748 -
749 -static int
750 -qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask,
751 - const u8 *mac, u16 vid)
752 -{
753 - struct qca8k_fdb fdb = { 0 };
754 - int ret;
755 -
756 - mutex_lock(&priv->reg_mutex);
757 -
758 - qca8k_fdb_write(priv, vid, 0, mac, 0);
759 - ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
760 - if (ret < 0)
761 - goto exit;
762 -
763 - ret = qca8k_fdb_read(priv, &fdb);
764 - if (ret < 0)
765 - goto exit;
766 -
767 - /* Rule exist. Delete first */
768 - if (!fdb.aging) {
769 - ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
770 - if (ret)
771 - goto exit;
772 - }
773 -
774 - /* Add port to fdb portmask */
775 - fdb.port_mask |= port_mask;
776 -
777 - qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
778 - ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
779 -
780 -exit:
781 - mutex_unlock(&priv->reg_mutex);
782 - return ret;
783 -}
784 -
785 -static int
786 -qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask,
787 - const u8 *mac, u16 vid)
788 -{
789 - struct qca8k_fdb fdb = { 0 };
790 - int ret;
791 -
792 - mutex_lock(&priv->reg_mutex);
793 -
794 - qca8k_fdb_write(priv, vid, 0, mac, 0);
795 - ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
796 - if (ret < 0)
797 - goto exit;
798 -
799 - /* Rule doesn't exist. Why delete? */
800 - if (!fdb.aging) {
801 - ret = -EINVAL;
802 - goto exit;
803 - }
804 -
805 - ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
806 - if (ret)
807 - goto exit;
808 -
809 - /* Only port in the rule is this port. Don't re insert */
810 - if (fdb.port_mask == port_mask)
811 - goto exit;
812 -
813 - /* Remove port from port mask */
814 - fdb.port_mask &= ~port_mask;
815 -
816 - qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
817 - ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
818 -
819 -exit:
820 - mutex_unlock(&priv->reg_mutex);
821 - return ret;
822 -}
823 -
824 -static int
825 -qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid)
826 -{
827 - u32 reg;
828 - int ret;
829 -
830 - /* Set the command and VLAN index */
831 - reg = QCA8K_VTU_FUNC1_BUSY;
832 - reg |= cmd;
833 - reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid);
834 -
835 - /* Write the function register triggering the table access */
836 - ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg);
837 - if (ret)
838 - return ret;
839 -
840 - /* wait for completion */
841 - ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY);
842 - if (ret)
843 - return ret;
844 -
845 - /* Check for table full violation when adding an entry */
846 - if (cmd == QCA8K_VLAN_LOAD) {
847 - ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, &reg);
848 - if (ret < 0)
849 - return ret;
850 - if (reg & QCA8K_VTU_FUNC1_FULL)
851 - return -ENOMEM;
852 - }
853 -
854 - return 0;
855 -}
856 -
857 -static int
858 -qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid, bool untagged)
859 -{
860 - u32 reg;
861 - int ret;
862 -
863 - /*
864 - We do the right thing with VLAN 0 and treat it as untagged while
865 - preserving the tag on egress.
866 - */
867 - if (vid == 0)
868 - return 0;
869 -
870 - mutex_lock(&priv->reg_mutex);
871 - ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
872 - if (ret < 0)
873 - goto out;
874 -
875 - ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
876 - if (ret < 0)
877 - goto out;
878 - reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN;
879 - reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
880 - if (untagged)
881 - reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port);
882 - else
883 - reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port);
884 -
885 - ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
886 - if (ret)
887 - goto out;
888 - ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
889 -
890 -out:
891 - mutex_unlock(&priv->reg_mutex);
892 -
893 - return ret;
894 -}
895 -
896 -static int
897 -qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid)
898 -{
899 - u32 reg, mask;
900 - int ret, i;
901 - bool del;
902 -
903 - mutex_lock(&priv->reg_mutex);
904 - ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
905 - if (ret < 0)
906 - goto out;
907 -
908 - ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
909 - if (ret < 0)
910 - goto out;
911 - reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
912 - reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port);
913 -
914 - /* Check if we're the last member to be removed */
915 - del = true;
916 - for (i = 0; i < QCA8K_NUM_PORTS; i++) {
917 - mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i);
918 -
919 - if ((reg & mask) != mask) {
920 - del = false;
921 - break;
922 - }
923 - }
924 -
925 - if (del) {
926 - ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid);
927 - } else {
928 - ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
929 - if (ret)
930 - goto out;
931 - ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
932 - }
933 -
934 -out:
935 - mutex_unlock(&priv->reg_mutex);
936 -
937 - return ret;
938 -}
939 -
940 -static int
941 -qca8k_mib_init(struct qca8k_priv *priv)
942 -{
943 - int ret;
944 -
945 - mutex_lock(&priv->reg_mutex);
946 - ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
947 - QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
948 - FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_FLUSH) |
949 - QCA8K_MIB_BUSY);
950 - if (ret)
951 - goto exit;
952 -
953 - ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY);
954 - if (ret)
955 - goto exit;
956 -
957 - ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
958 - if (ret)
959 - goto exit;
960 -
961 - ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB);
962 -
963 -exit:
964 - mutex_unlock(&priv->reg_mutex);
965 - return ret;
966 -}
967 -
968 -static void
969 -qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
970 -{
971 - u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
972 -
973 - /* Port 0 and 6 have no internal PHY */
974 - if (port > 0 && port < 6)
975 - mask |= QCA8K_PORT_STATUS_LINK_AUTO;
976 -
977 - if (enable)
978 - regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
979 - else
980 - regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
981 -}
982 -
983 -static int
984 -qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
985 - struct sk_buff *read_skb, u32 *val)
986 -{
987 - struct sk_buff *skb = skb_copy(read_skb, GFP_KERNEL);
988 - bool ack;
989 - int ret;
990 -
991 - reinit_completion(&mgmt_eth_data->rw_done);
992 -
993 - /* Increment seq_num and set it in the copy pkt */
994 - mgmt_eth_data->seq++;
995 - qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
996 - mgmt_eth_data->ack = false;
997 -
998 - dev_queue_xmit(skb);
999 -
1000 - ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
1001 - QCA8K_ETHERNET_TIMEOUT);
1002 -
1003 - ack = mgmt_eth_data->ack;
1004 -
1005 - if (ret <= 0)
1006 - return -ETIMEDOUT;
1007 -
1008 - if (!ack)
1009 - return -EINVAL;
1010 -
1011 - *val = mgmt_eth_data->data[0];
1012 -
1013 - return 0;
1014 -}
1015 -
1016 -static int
1017 -qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
1018 - int regnum, u16 data)
1019 -{
1020 - struct sk_buff *write_skb, *clear_skb, *read_skb;
1021 - struct qca8k_mgmt_eth_data *mgmt_eth_data;
1022 - u32 write_val, clear_val = 0, val;
1023 - struct net_device *mgmt_master;
1024 - int ret, ret1;
1025 - bool ack;
1026 -
1027 - if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
1028 - return -EINVAL;
1029 -
1030 - mgmt_eth_data = &priv->mgmt_eth_data;
1031 -
1032 - write_val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
1033 - QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
1034 - QCA8K_MDIO_MASTER_REG_ADDR(regnum);
1035 -
1036 - if (read) {
1037 - write_val |= QCA8K_MDIO_MASTER_READ;
1038 - } else {
1039 - write_val |= QCA8K_MDIO_MASTER_WRITE;
1040 - write_val |= QCA8K_MDIO_MASTER_DATA(data);
1041 - }
1042 -
1043 - /* Prealloc all the needed skb before the lock */
1044 - write_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &write_val,
1045 - QCA8K_ETHERNET_PHY_PRIORITY, sizeof(write_val));
1046 - if (!write_skb)
1047 - return -ENOMEM;
1048 -
1049 - clear_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &clear_val,
1050 - QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
1051 - if (!clear_skb) {
1052 - ret = -ENOMEM;
1053 - goto err_clear_skb;
1054 - }
1055 -
1056 - read_skb = qca8k_alloc_mdio_header(MDIO_READ, QCA8K_MDIO_MASTER_CTRL, &clear_val,
1057 - QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
1058 - if (!read_skb) {
1059 - ret = -ENOMEM;
1060 - goto err_read_skb;
1061 - }
1062 -
1063 - /* Actually start the request:
1064 - * 1. Send mdio master packet
1065 - * 2. Busy Wait for mdio master command
1066 - * 3. Get the data if we are reading
1067 - * 4. Reset the mdio master (even with error)
1068 - */
1069 - mutex_lock(&mgmt_eth_data->mutex);
1070 -
1071 - /* Check if mgmt_master is operational */
1072 - mgmt_master = priv->mgmt_master;
1073 - if (!mgmt_master) {
1074 - mutex_unlock(&mgmt_eth_data->mutex);
1075 - ret = -EINVAL;
1076 - goto err_mgmt_master;
1077 - }
1078 -
1079 - read_skb->dev = mgmt_master;
1080 - clear_skb->dev = mgmt_master;
1081 - write_skb->dev = mgmt_master;
1082 -
1083 - reinit_completion(&mgmt_eth_data->rw_done);
1084 -
1085 - /* Increment seq_num and set it in the write pkt */
1086 - mgmt_eth_data->seq++;
1087 - qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq);
1088 - mgmt_eth_data->ack = false;
1089 -
1090 - dev_queue_xmit(write_skb);
1091 -
1092 - ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
1093 - QCA8K_ETHERNET_TIMEOUT);
1094 -
1095 - ack = mgmt_eth_data->ack;
1096 -
1097 - if (ret <= 0) {
1098 - ret = -ETIMEDOUT;
1099 - kfree_skb(read_skb);
1100 - goto exit;
1101 - }
1102 -
1103 - if (!ack) {
1104 - ret = -EINVAL;
1105 - kfree_skb(read_skb);
1106 - goto exit;
1107 - }
1108 -
1109 - ret = read_poll_timeout(qca8k_phy_eth_busy_wait, ret1,
1110 - !(val & QCA8K_MDIO_MASTER_BUSY), 0,
1111 - QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
1112 - mgmt_eth_data, read_skb, &val);
1113 -
1114 - if (ret < 0 && ret1 < 0) {
1115 - ret = ret1;
1116 - goto exit;
1117 - }
1118 -
1119 - if (read) {
1120 - reinit_completion(&mgmt_eth_data->rw_done);
1121 -
1122 - /* Increment seq_num and set it in the read pkt */
1123 - mgmt_eth_data->seq++;
1124 - qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq);
1125 - mgmt_eth_data->ack = false;
1126 -
1127 - dev_queue_xmit(read_skb);
1128 -
1129 - ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
1130 - QCA8K_ETHERNET_TIMEOUT);
1131 -
1132 - ack = mgmt_eth_data->ack;
1133 -
1134 - if (ret <= 0) {
1135 - ret = -ETIMEDOUT;
1136 - goto exit;
1137 - }
1138 -
1139 - if (!ack) {
1140 - ret = -EINVAL;
1141 - goto exit;
1142 - }
1143 -
1144 - ret = mgmt_eth_data->data[0] & QCA8K_MDIO_MASTER_DATA_MASK;
1145 - } else {
1146 - kfree_skb(read_skb);
1147 - }
1148 -exit:
1149 - reinit_completion(&mgmt_eth_data->rw_done);
1150 -
1151 - /* Increment seq_num and set it in the clear pkt */
1152 - mgmt_eth_data->seq++;
1153 - qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq);
1154 - mgmt_eth_data->ack = false;
1155 -
1156 - dev_queue_xmit(clear_skb);
1157 -
1158 - wait_for_completion_timeout(&mgmt_eth_data->rw_done,
1159 - QCA8K_ETHERNET_TIMEOUT);
1160 -
1161 - mutex_unlock(&mgmt_eth_data->mutex);
1162 -
1163 - return ret;
1164 -
1165 - /* Error handling before lock */
1166 -err_mgmt_master:
1167 - kfree_skb(read_skb);
1168 -err_read_skb:
1169 - kfree_skb(clear_skb);
1170 -err_clear_skb:
1171 - kfree_skb(write_skb);
1172 -
1173 - return ret;
1174 -}
1175 -
1176 -static u32
1177 -qca8k_port_to_phy(int port)
1178 -{
1179 - /* From Andrew Lunn:
1180 - * Port 0 has no internal phy.
1181 - * Port 1 has an internal PHY at MDIO address 0.
1182 - * Port 2 has an internal PHY at MDIO address 1.
1183 - * ...
1184 - * Port 5 has an internal PHY at MDIO address 4.
1185 - * Port 6 has no internal PHY.
1186 - */
1187 -
1188 - return port - 1;
1189 -}
1190 -
1191 -static int
1192 -qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
1193 -{
1194 - u16 r1, r2, page;
1195 - u32 val;
1196 - int ret, ret1;
1197 -
1198 - qca8k_split_addr(reg, &r1, &r2, &page);
1199 -
1200 - ret = read_poll_timeout(qca8k_mii_read32, ret1, !(val & mask), 0,
1201 - QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
1202 - bus, 0x10 | r2, r1, &val);
1203 -
1204 - /* Check if qca8k_read has failed for a different reason
1205 - * before returnting -ETIMEDOUT
1206 - */
1207 - if (ret < 0 && ret1 < 0)
1208 - return ret1;
1209 -
1210 - return ret;
1211 -}
1212 -
1213 -static int
1214 -qca8k_mdio_write(struct qca8k_priv *priv, int phy, int regnum, u16 data)
1215 -{
1216 - struct mii_bus *bus = priv->bus;
1217 - u16 r1, r2, page;
1218 - u32 val;
1219 - int ret;
1220 -
1221 - if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
1222 - return -EINVAL;
1223 -
1224 - val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
1225 - QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
1226 - QCA8K_MDIO_MASTER_REG_ADDR(regnum) |
1227 - QCA8K_MDIO_MASTER_DATA(data);
1228 -
1229 - qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
1230 -
1231 - mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
1232 -
1233 - ret = qca8k_set_page(priv, page);
1234 - if (ret)
1235 - goto exit;
1236 -
1237 - qca8k_mii_write32(priv, 0x10 | r2, r1, val);
1238 -
1239 - ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
1240 - QCA8K_MDIO_MASTER_BUSY);
1241 -
1242 -exit:
1243 - /* even if the busy_wait timeouts try to clear the MASTER_EN */
1244 - qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
1245 -
1246 - mutex_unlock(&bus->mdio_lock);
1247 -
1248 - return ret;
1249 -}
1250 -
1251 -static int
1252 -qca8k_mdio_read(struct qca8k_priv *priv, int phy, int regnum)
1253 -{
1254 - struct mii_bus *bus = priv->bus;
1255 - u16 r1, r2, page;
1256 - u32 val;
1257 - int ret;
1258 -
1259 - if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
1260 - return -EINVAL;
1261 -
1262 - val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
1263 - QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
1264 - QCA8K_MDIO_MASTER_REG_ADDR(regnum);
1265 -
1266 - qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
1267 -
1268 - mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
1269 -
1270 - ret = qca8k_set_page(priv, page);
1271 - if (ret)
1272 - goto exit;
1273 -
1274 - qca8k_mii_write32(priv, 0x10 | r2, r1, val);
1275 -
1276 - ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
1277 - QCA8K_MDIO_MASTER_BUSY);
1278 - if (ret)
1279 - goto exit;
1280 -
1281 - ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
1282 -
1283 -exit:
1284 - /* even if the busy_wait timeouts try to clear the MASTER_EN */
1285 - qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
1286 -
1287 - mutex_unlock(&bus->mdio_lock);
1288 -
1289 - if (ret >= 0)
1290 - ret = val & QCA8K_MDIO_MASTER_DATA_MASK;
1291 -
1292 - return ret;
1293 -}
1294 -
1295 -static int
1296 -qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data)
1297 -{
1298 - struct qca8k_priv *priv = slave_bus->priv;
1299 - int ret;
1300 -
1301 - /* Use mdio Ethernet when available, fallback to legacy one on error */
1302 - ret = qca8k_phy_eth_command(priv, false, phy, regnum, data);
1303 - if (!ret)
1304 - return 0;
1305 -
1306 - return qca8k_mdio_write(priv, phy, regnum, data);
1307 -}
1308 -
1309 -static int
1310 -qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
1311 -{
1312 - struct qca8k_priv *priv = slave_bus->priv;
1313 - int ret;
1314 -
1315 - /* Use mdio Ethernet when available, fallback to legacy one on error */
1316 - ret = qca8k_phy_eth_command(priv, true, phy, regnum, 0);
1317 - if (ret >= 0)
1318 - return ret;
1319 -
1320 - ret = qca8k_mdio_read(priv, phy, regnum);
1321 -
1322 - if (ret < 0)
1323 - return 0xffff;
1324 -
1325 - return ret;
1326 -}
1327 -
1328 -static int
1329 -qca8k_legacy_mdio_write(struct mii_bus *slave_bus, int port, int regnum, u16 data)
1330 -{
1331 - port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
1332 -
1333 - return qca8k_internal_mdio_write(slave_bus, port, regnum, data);
1334 -}
1335 -
1336 -static int
1337 -qca8k_legacy_mdio_read(struct mii_bus *slave_bus, int port, int regnum)
1338 -{
1339 - port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
1340 -
1341 - return qca8k_internal_mdio_read(slave_bus, port, regnum);
1342 -}
1343 -
1344 -static int
1345 -qca8k_mdio_register(struct qca8k_priv *priv)
1346 -{
1347 - struct dsa_switch *ds = priv->ds;
1348 - struct device_node *mdio;
1349 - struct mii_bus *bus;
1350 -
1351 - bus = devm_mdiobus_alloc(ds->dev);
1352 - if (!bus)
1353 - return -ENOMEM;
1354 -
1355 - bus->priv = (void *)priv;
1356 - snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d.%d",
1357 - ds->dst->index, ds->index);
1358 - bus->parent = ds->dev;
1359 - bus->phy_mask = ~ds->phys_mii_mask;
1360 - ds->slave_mii_bus = bus;
1361 -
1362 - /* Check if the devicetree declare the port:phy mapping */
1363 - mdio = of_get_child_by_name(priv->dev->of_node, "mdio");
1364 - if (of_device_is_available(mdio)) {
1365 - bus->name = "qca8k slave mii";
1366 - bus->read = qca8k_internal_mdio_read;
1367 - bus->write = qca8k_internal_mdio_write;
1368 - return devm_of_mdiobus_register(priv->dev, bus, mdio);
1369 - }
1370 -
1371 - /* If a mapping can't be found the legacy mapping is used,
1372 - * using the qca8k_port_to_phy function
1373 - */
1374 - bus->name = "qca8k-legacy slave mii";
1375 - bus->read = qca8k_legacy_mdio_read;
1376 - bus->write = qca8k_legacy_mdio_write;
1377 - return devm_mdiobus_register(priv->dev, bus);
1378 -}
1379 -
1380 -static int
1381 -qca8k_setup_mdio_bus(struct qca8k_priv *priv)
1382 -{
1383 - u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
1384 - struct device_node *ports, *port;
1385 - phy_interface_t mode;
1386 - int err;
1387 -
1388 - ports = of_get_child_by_name(priv->dev->of_node, "ports");
1389 - if (!ports)
1390 - ports = of_get_child_by_name(priv->dev->of_node, "ethernet-ports");
1391 -
1392 - if (!ports)
1393 - return -EINVAL;
1394 -
1395 - for_each_available_child_of_node(ports, port) {
1396 - err = of_property_read_u32(port, "reg", &reg);
1397 - if (err) {
1398 - of_node_put(port);
1399 - of_node_put(ports);
1400 - return err;
1401 - }
1402 -
1403 - if (!dsa_is_user_port(priv->ds, reg))
1404 - continue;
1405 -
1406 - of_get_phy_mode(port, &mode);
1407 -
1408 - if (of_property_read_bool(port, "phy-handle") &&
1409 - mode != PHY_INTERFACE_MODE_INTERNAL)
1410 - external_mdio_mask |= BIT(reg);
1411 - else
1412 - internal_mdio_mask |= BIT(reg);
1413 - }
1414 -
1415 - of_node_put(ports);
1416 - if (!external_mdio_mask && !internal_mdio_mask) {
1417 - dev_err(priv->dev, "no PHYs are defined.\n");
1418 - return -EINVAL;
1419 - }
1420 -
1421 - /* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through
1422 - * the MDIO_MASTER register also _disconnects_ the external MDC
1423 - * passthrough to the internal PHYs. It's not possible to use both
1424 - * configurations at the same time!
1425 - *
1426 - * Because this came up during the review process:
1427 - * If the external mdio-bus driver is capable magically disabling
1428 - * the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's
1429 - * accessors for the time being, it would be possible to pull this
1430 - * off.
1431 - */
1432 - if (!!external_mdio_mask && !!internal_mdio_mask) {
1433 - dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n");
1434 - return -EINVAL;
1435 - }
1436 -
1437 - if (external_mdio_mask) {
1438 - /* Make sure to disable the internal mdio bus in cases
1439 - * a dt-overlay and driver reload changed the configuration
1440 - */
1441 -
1442 - return regmap_clear_bits(priv->regmap, QCA8K_MDIO_MASTER_CTRL,
1443 - QCA8K_MDIO_MASTER_EN);
1444 - }
1445 -
1446 - return qca8k_mdio_register(priv);
1447 -}
1448 -
1449 -static int
1450 -qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv)
1451 -{
1452 - u32 mask = 0;
1453 - int ret = 0;
1454 -
1455 - /* SoC specific settings for ipq8064.
1456 - * If more device require this consider adding
1457 - * a dedicated binding.
1458 - */
1459 - if (of_machine_is_compatible("qcom,ipq8064"))
1460 - mask |= QCA8K_MAC_PWR_RGMII0_1_8V;
1461 -
1462 - /* SoC specific settings for ipq8065 */
1463 - if (of_machine_is_compatible("qcom,ipq8065"))
1464 - mask |= QCA8K_MAC_PWR_RGMII1_1_8V;
1465 -
1466 - if (mask) {
1467 - ret = qca8k_rmw(priv, QCA8K_REG_MAC_PWR_SEL,
1468 - QCA8K_MAC_PWR_RGMII0_1_8V |
1469 - QCA8K_MAC_PWR_RGMII1_1_8V,
1470 - mask);
1471 - }
1472 -
1473 - return ret;
1474 -}
1475 -
1476 -static int qca8k_find_cpu_port(struct dsa_switch *ds)
1477 -{
1478 - struct qca8k_priv *priv = ds->priv;
1479 -
1480 - /* Find the connected cpu port. Valid port are 0 or 6 */
1481 - if (dsa_is_cpu_port(ds, 0))
1482 - return 0;
1483 -
1484 - dev_dbg(priv->dev, "port 0 is not the CPU port. Checking port 6");
1485 -
1486 - if (dsa_is_cpu_port(ds, 6))
1487 - return 6;
1488 -
1489 - return -EINVAL;
1490 -}
1491 -
1492 -static int
1493 -qca8k_setup_of_pws_reg(struct qca8k_priv *priv)
1494 -{
1495 - const struct qca8k_match_data *data = priv->info;
1496 - struct device_node *node = priv->dev->of_node;
1497 - u32 val = 0;
1498 - int ret;
1499 -
1500 - /* QCA8327 require to set to the correct mode.
1501 - * His bigger brother QCA8328 have the 172 pin layout.
1502 - * Should be applied by default but we set this just to make sure.
1503 - */
1504 - if (priv->switch_id == QCA8K_ID_QCA8327) {
1505 - /* Set the correct package of 148 pin for QCA8327 */
1506 - if (data->reduced_package)
1507 - val |= QCA8327_PWS_PACKAGE148_EN;
1508 -
1509 - ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN,
1510 - val);
1511 - if (ret)
1512 - return ret;
1513 - }
1514 -
1515 - if (of_property_read_bool(node, "qca,ignore-power-on-sel"))
1516 - val |= QCA8K_PWS_POWER_ON_SEL;
1517 -
1518 - if (of_property_read_bool(node, "qca,led-open-drain")) {
1519 - if (!(val & QCA8K_PWS_POWER_ON_SEL)) {
1520 - dev_err(priv->dev, "qca,led-open-drain require qca,ignore-power-on-sel to be set.");
1521 - return -EINVAL;
1522 - }
1523 -
1524 - val |= QCA8K_PWS_LED_OPEN_EN_CSR;
1525 - }
1526 -
1527 - return qca8k_rmw(priv, QCA8K_REG_PWS,
1528 - QCA8K_PWS_LED_OPEN_EN_CSR | QCA8K_PWS_POWER_ON_SEL,
1529 - val);
1530 -}
1531 -
1532 -static int
1533 -qca8k_parse_port_config(struct qca8k_priv *priv)
1534 -{
1535 - int port, cpu_port_index = -1, ret;
1536 - struct device_node *port_dn;
1537 - phy_interface_t mode;
1538 - struct dsa_port *dp;
1539 - u32 delay;
1540 -
1541 - /* We have 2 CPU port. Check them */
1542 - for (port = 0; port < QCA8K_NUM_PORTS; port++) {
1543 - /* Skip every other port */
1544 - if (port != 0 && port != 6)
1545 - continue;
1546 -
1547 - dp = dsa_to_port(priv->ds, port);
1548 - port_dn = dp->dn;
1549 - cpu_port_index++;
1550 -
1551 - if (!of_device_is_available(port_dn))
1552 - continue;
1553 -
1554 - ret = of_get_phy_mode(port_dn, &mode);
1555 - if (ret)
1556 - continue;
1557 -
1558 - switch (mode) {
1559 - case PHY_INTERFACE_MODE_RGMII:
1560 - case PHY_INTERFACE_MODE_RGMII_ID:
1561 - case PHY_INTERFACE_MODE_RGMII_TXID:
1562 - case PHY_INTERFACE_MODE_RGMII_RXID:
1563 - case PHY_INTERFACE_MODE_SGMII:
1564 - delay = 0;
1565 -
1566 - if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay))
1567 - /* Switch regs accept value in ns, convert ps to ns */
1568 - delay = delay / 1000;
1569 - else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
1570 - mode == PHY_INTERFACE_MODE_RGMII_TXID)
1571 - delay = 1;
1572 -
1573 - if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, delay)) {
1574 - dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
1575 - delay = 3;
1576 - }
1577 -
1578 - priv->ports_config.rgmii_tx_delay[cpu_port_index] = delay;
1579 -
1580 - delay = 0;
1581 -
1582 - if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay))
1583 - /* Switch regs accept value in ns, convert ps to ns */
1584 - delay = delay / 1000;
1585 - else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
1586 - mode == PHY_INTERFACE_MODE_RGMII_RXID)
1587 - delay = 2;
1588 -
1589 - if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, delay)) {
1590 - dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
1591 - delay = 3;
1592 - }
1593 -
1594 - priv->ports_config.rgmii_rx_delay[cpu_port_index] = delay;
1595 -
1596 - /* Skip sgmii parsing for rgmii* mode */
1597 - if (mode == PHY_INTERFACE_MODE_RGMII ||
1598 - mode == PHY_INTERFACE_MODE_RGMII_ID ||
1599 - mode == PHY_INTERFACE_MODE_RGMII_TXID ||
1600 - mode == PHY_INTERFACE_MODE_RGMII_RXID)
1601 - break;
1602 -
1603 - if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge"))
1604 - priv->ports_config.sgmii_tx_clk_falling_edge = true;
1605 -
1606 - if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge"))
1607 - priv->ports_config.sgmii_rx_clk_falling_edge = true;
1608 -
1609 - if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) {
1610 - priv->ports_config.sgmii_enable_pll = true;
1611 -
1612 - if (priv->switch_id == QCA8K_ID_QCA8327) {
1613 - dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling");
1614 - priv->ports_config.sgmii_enable_pll = false;
1615 - }
1616 -
1617 - if (priv->switch_revision < 2)
1618 - dev_warn(priv->dev, "SGMII PLL should NOT be enabled for qca8337 with revision 2 or more.");
1619 - }
1620 -
1621 - break;
1622 - default:
1623 - continue;
1624 - }
1625 - }
1626 -
1627 - return 0;
1628 -}
1629 -
1630 -static int
1631 -qca8k_setup(struct dsa_switch *ds)
1632 -{
1633 - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
1634 - int cpu_port, ret, i;
1635 - u32 mask;
1636 -
1637 - cpu_port = qca8k_find_cpu_port(ds);
1638 - if (cpu_port < 0) {
1639 - dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6");
1640 - return cpu_port;
1641 - }
1642 -
1643 - /* Parse CPU port config to be later used in phy_link mac_config */
1644 - ret = qca8k_parse_port_config(priv);
1645 - if (ret)
1646 - return ret;
1647 -
1648 - ret = qca8k_setup_mdio_bus(priv);
1649 - if (ret)
1650 - return ret;
1651 -
1652 - ret = qca8k_setup_of_pws_reg(priv);
1653 - if (ret)
1654 - return ret;
1655 -
1656 - ret = qca8k_setup_mac_pwr_sel(priv);
1657 - if (ret)
1658 - return ret;
1659 -
1660 - /* Make sure MAC06 is disabled */
1661 - ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL,
1662 - QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN);
1663 - if (ret) {
1664 - dev_err(priv->dev, "failed disabling MAC06 exchange");
1665 - return ret;
1666 - }
1667 -
1668 - /* Enable CPU Port */
1669 - ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
1670 - QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
1671 - if (ret) {
1672 - dev_err(priv->dev, "failed enabling CPU port");
1673 - return ret;
1674 - }
1675 -
1676 - /* Enable MIB counters */
1677 - ret = qca8k_mib_init(priv);
1678 - if (ret)
1679 - dev_warn(priv->dev, "mib init failed");
1680 -
1681 - /* Initial setup of all ports */
1682 - for (i = 0; i < QCA8K_NUM_PORTS; i++) {
1683 - /* Disable forwarding by default on all ports */
1684 - ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
1685 - QCA8K_PORT_LOOKUP_MEMBER, 0);
1686 - if (ret)
1687 - return ret;
1688 -
1689 - /* Enable QCA header mode on all cpu ports */
1690 - if (dsa_is_cpu_port(ds, i)) {
1691 - ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i),
1692 - FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) |
1693 - FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL));
1694 - if (ret) {
1695 - dev_err(priv->dev, "failed enabling QCA header mode");
1696 - return ret;
1697 - }
1698 - }
1699 -
1700 - /* Disable MAC by default on all user ports */
1701 - if (dsa_is_user_port(ds, i))
1702 - qca8k_port_set_status(priv, i, 0);
1703 - }
1704 -
1705 - /* Forward all unknown frames to CPU port for Linux processing
1706 - * Notice that in multi-cpu config only one port should be set
1707 - * for igmp, unknown, multicast and broadcast packet
1708 - */
1709 - ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
1710 - FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) |
1711 - FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) |
1712 - FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) |
1713 - FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port)));
1714 - if (ret)
1715 - return ret;
1716 -
1717 - /* Setup connection between CPU port & user ports
1718 - * Configure specific switch configuration for ports
1719 - */
1720 - for (i = 0; i < QCA8K_NUM_PORTS; i++) {
1721 - /* CPU port gets connected to all user ports of the switch */
1722 - if (dsa_is_cpu_port(ds, i)) {
1723 - ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
1724 - QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
1725 - if (ret)
1726 - return ret;
1727 - }
1728 -
1729 - /* Individual user ports get connected to CPU port only */
1730 - if (dsa_is_user_port(ds, i)) {
1731 - ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
1732 - QCA8K_PORT_LOOKUP_MEMBER,
1733 - BIT(cpu_port));
1734 - if (ret)
1735 - return ret;
1736 -
1737 - /* Enable ARP Auto-learning by default */
1738 - ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(i),
1739 - QCA8K_PORT_LOOKUP_LEARN);
1740 - if (ret)
1741 - return ret;
1742 -
1743 - /* For port based vlans to work we need to set the
1744 - * default egress vid
1745 - */
1746 - ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i),
1747 - QCA8K_EGREES_VLAN_PORT_MASK(i),
1748 - QCA8K_EGREES_VLAN_PORT(i, QCA8K_PORT_VID_DEF));
1749 - if (ret)
1750 - return ret;
1751 -
1752 - ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i),
1753 - QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) |
1754 - QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF));
1755 - if (ret)
1756 - return ret;
1757 - }
1758 -
1759 - /* The port 5 of the qca8337 have some problem in flood condition. The
1760 - * original legacy driver had some specific buffer and priority settings
1761 - * for the different port suggested by the QCA switch team. Add this
1762 - * missing settings to improve switch stability under load condition.
1763 - * This problem is limited to qca8337 and other qca8k switch are not affected.
1764 - */
1765 - if (priv->switch_id == QCA8K_ID_QCA8337) {
1766 - switch (i) {
1767 - /* The 2 CPU port and port 5 requires some different
1768 - * priority than any other ports.
1769 - */
1770 - case 0:
1771 - case 5:
1772 - case 6:
1773 - mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
1774 - QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
1775 - QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) |
1776 - QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) |
1777 - QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) |
1778 - QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) |
1779 - QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e);
1780 - break;
1781 - default:
1782 - mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
1783 - QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
1784 - QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) |
1785 - QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) |
1786 - QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19);
1787 - }
1788 - qca8k_write(priv, QCA8K_REG_PORT_HOL_CTRL0(i), mask);
1789 -
1790 - mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) |
1791 - QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
1792 - QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
1793 - QCA8K_PORT_HOL_CTRL1_WRED_EN;
1794 - qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i),
1795 - QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK |
1796 - QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
1797 - QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
1798 - QCA8K_PORT_HOL_CTRL1_WRED_EN,
1799 - mask);
1800 - }
1801 - }
1802 -
1803 - /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
1804 - if (priv->switch_id == QCA8K_ID_QCA8327) {
1805 - mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) |
1806 - QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496);
1807 - qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH,
1808 - QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK |
1809 - QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK,
1810 - mask);
1811 - }
1812 -
1813 - /* Setup our port MTUs to match power on defaults */
1814 - ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
1815 - if (ret)
1816 - dev_warn(priv->dev, "failed setting MTU settings");
1817 -
1818 - /* Flush the FDB table */
1819 - qca8k_fdb_flush(priv);
1820 -
1821 - /* We don't have interrupts for link changes, so we need to poll */
1822 - ds->pcs_poll = true;
1823 -
1824 - /* Set min a max ageing value supported */
1825 - ds->ageing_time_min = 7000;
1826 - ds->ageing_time_max = 458745000;
1827 -
1828 - /* Set max number of LAGs supported */
1829 - ds->num_lag_ids = QCA8K_NUM_LAGS;
1830 -
1831 - return 0;
1832 -}
1833 -
1834 -static void
1835 -qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index,
1836 - u32 reg)
1837 -{
1838 - u32 delay, val = 0;
1839 - int ret;
1840 -
1841 - /* Delay can be declared in 3 different way.
1842 - * Mode to rgmii and internal-delay standard binding defined
1843 - * rgmii-id or rgmii-tx/rx phy mode set.
1844 - * The parse logic set a delay different than 0 only when one
1845 - * of the 3 different way is used. In all other case delay is
1846 - * not enabled. With ID or TX/RXID delay is enabled and set
1847 - * to the default and recommended value.
1848 - */
1849 - if (priv->ports_config.rgmii_tx_delay[cpu_port_index]) {
1850 - delay = priv->ports_config.rgmii_tx_delay[cpu_port_index];
1851 -
1852 - val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) |
1853 - QCA8K_PORT_PAD_RGMII_TX_DELAY_EN;
1854 - }
1855 -
1856 - if (priv->ports_config.rgmii_rx_delay[cpu_port_index]) {
1857 - delay = priv->ports_config.rgmii_rx_delay[cpu_port_index];
1858 -
1859 - val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) |
1860 - QCA8K_PORT_PAD_RGMII_RX_DELAY_EN;
1861 - }
1862 -
1863 - /* Set RGMII delay based on the selected values */
1864 - ret = qca8k_rmw(priv, reg,
1865 - QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK |
1866 - QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK |
1867 - QCA8K_PORT_PAD_RGMII_TX_DELAY_EN |
1868 - QCA8K_PORT_PAD_RGMII_RX_DELAY_EN,
1869 - val);
1870 - if (ret)
1871 - dev_err(priv->dev, "Failed to set internal delay for CPU port%d",
1872 - cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6);
1873 -}
1874 -
1875 -static void
1876 -qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
1877 - const struct phylink_link_state *state)
1878 -{
1879 - struct qca8k_priv *priv = ds->priv;
1880 - int cpu_port_index, ret;
1881 - u32 reg, val;
1882 -
1883 - switch (port) {
1884 - case 0: /* 1st CPU port */
1885 - if (state->interface != PHY_INTERFACE_MODE_RGMII &&
1886 - state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
1887 - state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
1888 - state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
1889 - state->interface != PHY_INTERFACE_MODE_SGMII)
1890 - return;
1891 -
1892 - reg = QCA8K_REG_PORT0_PAD_CTRL;
1893 - cpu_port_index = QCA8K_CPU_PORT0;
1894 - break;
1895 - case 1:
1896 - case 2:
1897 - case 3:
1898 - case 4:
1899 - case 5:
1900 - /* Internal PHY, nothing to do */
1901 - return;
1902 - case 6: /* 2nd CPU port / external PHY */
1903 - if (state->interface != PHY_INTERFACE_MODE_RGMII &&
1904 - state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
1905 - state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
1906 - state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
1907 - state->interface != PHY_INTERFACE_MODE_SGMII &&
1908 - state->interface != PHY_INTERFACE_MODE_1000BASEX)
1909 - return;
1910 -
1911 - reg = QCA8K_REG_PORT6_PAD_CTRL;
1912 - cpu_port_index = QCA8K_CPU_PORT6;
1913 - break;
1914 - default:
1915 - dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
1916 - return;
1917 - }
1918 -
1919 - if (port != 6 && phylink_autoneg_inband(mode)) {
1920 - dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
1921 - __func__);
1922 - return;
1923 - }
1924 -
1925 - switch (state->interface) {
1926 - case PHY_INTERFACE_MODE_RGMII:
1927 - case PHY_INTERFACE_MODE_RGMII_ID:
1928 - case PHY_INTERFACE_MODE_RGMII_TXID:
1929 - case PHY_INTERFACE_MODE_RGMII_RXID:
1930 - qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN);
1931 -
1932 - /* Configure rgmii delay */
1933 - qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
1934 -
1935 - /* QCA8337 requires to set rgmii rx delay for all ports.
1936 - * This is enabled through PORT5_PAD_CTRL for all ports,
1937 - * rather than individual port registers.
1938 - */
1939 - if (priv->switch_id == QCA8K_ID_QCA8337)
1940 - qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
1941 - QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
1942 - break;
1943 - case PHY_INTERFACE_MODE_SGMII:
1944 - case PHY_INTERFACE_MODE_1000BASEX:
1945 - /* Enable SGMII on the port */
1946 - qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
1947 -
1948 - /* Enable/disable SerDes auto-negotiation as necessary */
1949 - ret = qca8k_read(priv, QCA8K_REG_PWS, &val);
1950 - if (ret)
1951 - return;
1952 - if (phylink_autoneg_inband(mode))
1953 - val &= ~QCA8K_PWS_SERDES_AEN_DIS;
1954 - else
1955 - val |= QCA8K_PWS_SERDES_AEN_DIS;
1956 - qca8k_write(priv, QCA8K_REG_PWS, val);
1957 -
1958 - /* Configure the SGMII parameters */
1959 - ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val);
1960 - if (ret)
1961 - return;
1962 -
1963 - val |= QCA8K_SGMII_EN_SD;
1964 -
1965 - if (priv->ports_config.sgmii_enable_pll)
1966 - val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
1967 - QCA8K_SGMII_EN_TX;
1968 -
1969 - if (dsa_is_cpu_port(ds, port)) {
1970 - /* CPU port, we're talking to the CPU MAC, be a PHY */
1971 - val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
1972 - val |= QCA8K_SGMII_MODE_CTRL_PHY;
1973 - } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
1974 - val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
1975 - val |= QCA8K_SGMII_MODE_CTRL_MAC;
1976 - } else if (state->interface == PHY_INTERFACE_MODE_1000BASEX) {
1977 - val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
1978 - val |= QCA8K_SGMII_MODE_CTRL_BASEX;
1979 - }
1980 -
1981 - qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
1982 -
1983 - /* From original code is reported port instability as SGMII also
1984 - * require delay set. Apply advised values here or take them from DT.
1985 - */
1986 - if (state->interface == PHY_INTERFACE_MODE_SGMII)
1987 - qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
1988 -
1989 - /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
1990 - * falling edge is set writing in the PORT0 PAD reg
1991 - */
1992 - if (priv->switch_id == QCA8K_ID_QCA8327 ||
1993 - priv->switch_id == QCA8K_ID_QCA8337)
1994 - reg = QCA8K_REG_PORT0_PAD_CTRL;
1995 -
1996 - val = 0;
1997 -
1998 - /* SGMII Clock phase configuration */
1999 - if (priv->ports_config.sgmii_rx_clk_falling_edge)
2000 - val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE;
2001 -
2002 - if (priv->ports_config.sgmii_tx_clk_falling_edge)
2003 - val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE;
2004 -
2005 - if (val)
2006 - ret = qca8k_rmw(priv, reg,
2007 - QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE |
2008 - QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
2009 - val);
2010 -
2011 - break;
2012 - default:
2013 - dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
2014 - phy_modes(state->interface), port);
2015 - return;
2016 - }
2017 -}
2018 -
2019 -static void
2020 -qca8k_phylink_validate(struct dsa_switch *ds, int port,
2021 - unsigned long *supported,
2022 - struct phylink_link_state *state)
2023 -{
2024 - __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
2025 -
2026 - switch (port) {
2027 - case 0: /* 1st CPU port */
2028 - if (state->interface != PHY_INTERFACE_MODE_NA &&
2029 - state->interface != PHY_INTERFACE_MODE_RGMII &&
2030 - state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
2031 - state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
2032 - state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
2033 - state->interface != PHY_INTERFACE_MODE_SGMII)
2034 - goto unsupported;
2035 - break;
2036 - case 1:
2037 - case 2:
2038 - case 3:
2039 - case 4:
2040 - case 5:
2041 - /* Internal PHY */
2042 - if (state->interface != PHY_INTERFACE_MODE_NA &&
2043 - state->interface != PHY_INTERFACE_MODE_GMII &&
2044 - state->interface != PHY_INTERFACE_MODE_INTERNAL)
2045 - goto unsupported;
2046 - break;
2047 - case 6: /* 2nd CPU port / external PHY */
2048 - if (state->interface != PHY_INTERFACE_MODE_NA &&
2049 - state->interface != PHY_INTERFACE_MODE_RGMII &&
2050 - state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
2051 - state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
2052 - state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
2053 - state->interface != PHY_INTERFACE_MODE_SGMII &&
2054 - state->interface != PHY_INTERFACE_MODE_1000BASEX)
2055 - goto unsupported;
2056 - break;
2057 - default:
2058 -unsupported:
2059 - linkmode_zero(supported);
2060 - return;
2061 - }
2062 -
2063 - phylink_set_port_modes(mask);
2064 - phylink_set(mask, Autoneg);
2065 -
2066 - phylink_set(mask, 1000baseT_Full);
2067 - phylink_set(mask, 10baseT_Half);
2068 - phylink_set(mask, 10baseT_Full);
2069 - phylink_set(mask, 100baseT_Half);
2070 - phylink_set(mask, 100baseT_Full);
2071 -
2072 - if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
2073 - phylink_set(mask, 1000baseX_Full);
2074 -
2075 - phylink_set(mask, Pause);
2076 - phylink_set(mask, Asym_Pause);
2077 -
2078 - linkmode_and(supported, supported, mask);
2079 - linkmode_and(state->advertising, state->advertising, mask);
2080 -}
2081 -
2082 -static int
2083 -qca8k_phylink_mac_link_state(struct dsa_switch *ds, int port,
2084 - struct phylink_link_state *state)
2085 -{
2086 - struct qca8k_priv *priv = ds->priv;
2087 - u32 reg;
2088 - int ret;
2089 -
2090 - ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), &reg);
2091 - if (ret < 0)
2092 - return ret;
2093 -
2094 - state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
2095 - state->an_complete = state->link;
2096 - state->an_enabled = !!(reg & QCA8K_PORT_STATUS_LINK_AUTO);
2097 - state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
2098 - DUPLEX_HALF;
2099 -
2100 - switch (reg & QCA8K_PORT_STATUS_SPEED) {
2101 - case QCA8K_PORT_STATUS_SPEED_10:
2102 - state->speed = SPEED_10;
2103 - break;
2104 - case QCA8K_PORT_STATUS_SPEED_100:
2105 - state->speed = SPEED_100;
2106 - break;
2107 - case QCA8K_PORT_STATUS_SPEED_1000:
2108 - state->speed = SPEED_1000;
2109 - break;
2110 - default:
2111 - state->speed = SPEED_UNKNOWN;
2112 - break;
2113 - }
2114 -
2115 - state->pause = MLO_PAUSE_NONE;
2116 - if (reg & QCA8K_PORT_STATUS_RXFLOW)
2117 - state->pause |= MLO_PAUSE_RX;
2118 - if (reg & QCA8K_PORT_STATUS_TXFLOW)
2119 - state->pause |= MLO_PAUSE_TX;
2120 -
2121 - return 1;
2122 -}
2123 -
2124 -static void
2125 -qca8k_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
2126 - phy_interface_t interface)
2127 -{
2128 - struct qca8k_priv *priv = ds->priv;
2129 -
2130 - qca8k_port_set_status(priv, port, 0);
2131 -}
2132 -
2133 -static void
2134 -qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
2135 - phy_interface_t interface, struct phy_device *phydev,
2136 - int speed, int duplex, bool tx_pause, bool rx_pause)
2137 -{
2138 - struct qca8k_priv *priv = ds->priv;
2139 - u32 reg;
2140 -
2141 - if (phylink_autoneg_inband(mode)) {
2142 - reg = QCA8K_PORT_STATUS_LINK_AUTO;
2143 - } else {
2144 - switch (speed) {
2145 - case SPEED_10:
2146 - reg = QCA8K_PORT_STATUS_SPEED_10;
2147 - break;
2148 - case SPEED_100:
2149 - reg = QCA8K_PORT_STATUS_SPEED_100;
2150 - break;
2151 - case SPEED_1000:
2152 - reg = QCA8K_PORT_STATUS_SPEED_1000;
2153 - break;
2154 - default:
2155 - reg = QCA8K_PORT_STATUS_LINK_AUTO;
2156 - break;
2157 - }
2158 -
2159 - if (duplex == DUPLEX_FULL)
2160 - reg |= QCA8K_PORT_STATUS_DUPLEX;
2161 -
2162 - if (rx_pause || dsa_is_cpu_port(ds, port))
2163 - reg |= QCA8K_PORT_STATUS_RXFLOW;
2164 -
2165 - if (tx_pause || dsa_is_cpu_port(ds, port))
2166 - reg |= QCA8K_PORT_STATUS_TXFLOW;
2167 - }
2168 -
2169 - reg |= QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
2170 -
2171 - qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg);
2172 -}
2173 -
2174 -static void
2175 -qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
2176 -{
2177 - struct qca8k_priv *priv = ds->priv;
2178 - int i;
2179 -
2180 - if (stringset != ETH_SS_STATS)
2181 - return;
2182 -
2183 - for (i = 0; i < priv->info->mib_count; i++)
2184 - strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name,
2185 - ETH_GSTRING_LEN);
2186 -}
2187 -
2188 -static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *skb)
2189 -{
2190 - struct qca8k_mib_eth_data *mib_eth_data;
2191 - struct qca8k_priv *priv = ds->priv;
2192 - const struct qca8k_mib_desc *mib;
2193 - struct mib_ethhdr *mib_ethhdr;
2194 - int i, mib_len, offset = 0;
2195 - u64 *data;
2196 - u8 port;
2197 -
2198 - mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb);
2199 - mib_eth_data = &priv->mib_eth_data;
2200 -
2201 - /* The switch autocast every port. Ignore other packet and
2202 - * parse only the requested one.
2203 - */
2204 - port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, ntohs(mib_ethhdr->hdr));
2205 - if (port != mib_eth_data->req_port)
2206 - goto exit;
2207 -
2208 - data = mib_eth_data->data;
2209 -
2210 - for (i = 0; i < priv->info->mib_count; i++) {
2211 - mib = &ar8327_mib[i];
2212 -
2213 - /* First 3 mib are present in the skb head */
2214 - if (i < 3) {
2215 - data[i] = mib_ethhdr->data[i];
2216 - continue;
2217 - }
2218 -
2219 - mib_len = sizeof(uint32_t);
2220 -
2221 - /* Some mib are 64 bit wide */
2222 - if (mib->size == 2)
2223 - mib_len = sizeof(uint64_t);
2224 -
2225 - /* Copy the mib value from packet to the */
2226 - memcpy(data + i, skb->data + offset, mib_len);
2227 -
2228 - /* Set the offset for the next mib */
2229 - offset += mib_len;
2230 - }
2231 -
2232 -exit:
2233 - /* Complete on receiving all the mib packet */
2234 - if (refcount_dec_and_test(&mib_eth_data->port_parsed))
2235 - complete(&mib_eth_data->rw_done);
2236 -}
2237 -
2238 -static int
2239 -qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data)
2240 -{
2241 - struct dsa_port *dp = dsa_to_port(ds, port);
2242 - struct qca8k_mib_eth_data *mib_eth_data;
2243 - struct qca8k_priv *priv = ds->priv;
2244 - int ret;
2245 -
2246 - mib_eth_data = &priv->mib_eth_data;
2247 -
2248 - mutex_lock(&mib_eth_data->mutex);
2249 -
2250 - reinit_completion(&mib_eth_data->rw_done);
2251 -
2252 - mib_eth_data->req_port = dp->index;
2253 - mib_eth_data->data = data;
2254 - refcount_set(&mib_eth_data->port_parsed, QCA8K_NUM_PORTS);
2255 -
2256 - mutex_lock(&priv->reg_mutex);
2257 -
2258 - /* Send mib autocast request */
2259 - ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
2260 - QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
2261 - FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_CAST) |
2262 - QCA8K_MIB_BUSY);
2263 -
2264 - mutex_unlock(&priv->reg_mutex);
2265 -
2266 - if (ret)
2267 - goto exit;
2268 -
2269 - ret = wait_for_completion_timeout(&mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
2270 -
2271 -exit:
2272 - mutex_unlock(&mib_eth_data->mutex);
2273 -
2274 - return ret;
2275 -}
2276 -
2277 -static void
2278 -qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
2279 - uint64_t *data)
2280 -{
2281 - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2282 - const struct qca8k_mib_desc *mib;
2283 - u32 reg, i, val;
2284 - u32 hi = 0;
2285 - int ret;
2286 -
2287 - if (priv->mgmt_master && priv->info->ops->autocast_mib &&
2288 - priv->info->ops->autocast_mib(ds, port, data) > 0)
2289 - return;
2290 -
2291 - for (i = 0; i < priv->info->mib_count; i++) {
2292 - mib = &ar8327_mib[i];
2293 - reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
2294 -
2295 - ret = qca8k_read(priv, reg, &val);
2296 - if (ret < 0)
2297 - continue;
2298 -
2299 - if (mib->size == 2) {
2300 - ret = qca8k_read(priv, reg + 4, &hi);
2301 - if (ret < 0)
2302 - continue;
2303 - }
2304 -
2305 - data[i] = val;
2306 - if (mib->size == 2)
2307 - data[i] |= (u64)hi << 32;
2308 - }
2309 -}
2310 -
2311 -static int
2312 -qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
2313 -{
2314 - struct qca8k_priv *priv = ds->priv;
2315 -
2316 - if (sset != ETH_SS_STATS)
2317 - return 0;
2318 -
2319 - return priv->info->mib_count;
2320 -}
2321 -
2322 -static int
2323 -qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee)
2324 -{
2325 - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2326 - u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
2327 - u32 reg;
2328 - int ret;
2329 -
2330 - mutex_lock(&priv->reg_mutex);
2331 - ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, &reg);
2332 - if (ret < 0)
2333 - goto exit;
2334 -
2335 - if (eee->eee_enabled)
2336 - reg |= lpi_en;
2337 - else
2338 - reg &= ~lpi_en;
2339 - ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg);
2340 -
2341 -exit:
2342 - mutex_unlock(&priv->reg_mutex);
2343 - return ret;
2344 -}
2345 -
2346 -static int
2347 -qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
2348 -{
2349 - /* Nothing to do on the port's MAC */
2350 - return 0;
2351 -}
2352 -
2353 -static void
2354 -qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
2355 -{
2356 - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2357 - u32 stp_state;
2358 -
2359 - switch (state) {
2360 - case BR_STATE_DISABLED:
2361 - stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED;
2362 - break;
2363 - case BR_STATE_BLOCKING:
2364 - stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING;
2365 - break;
2366 - case BR_STATE_LISTENING:
2367 - stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING;
2368 - break;
2369 - case BR_STATE_LEARNING:
2370 - stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING;
2371 - break;
2372 - case BR_STATE_FORWARDING:
2373 - default:
2374 - stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD;
2375 - break;
2376 - }
2377 -
2378 - qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
2379 - QCA8K_PORT_LOOKUP_STATE_MASK, stp_state);
2380 -}
2381 -
2382 -static int
2383 -qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br)
2384 -{
2385 - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2386 - int port_mask, cpu_port;
2387 - int i, ret;
2388 -
2389 - cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
2390 - port_mask = BIT(cpu_port);
2391 -
2392 - for (i = 0; i < QCA8K_NUM_PORTS; i++) {
2393 - if (dsa_is_cpu_port(ds, i))
2394 - continue;
2395 - if (dsa_to_port(ds, i)->bridge_dev != br)
2396 - continue;
2397 - /* Add this port to the portvlan mask of the other ports
2398 - * in the bridge
2399 - */
2400 - ret = regmap_set_bits(priv->regmap,
2401 - QCA8K_PORT_LOOKUP_CTRL(i),
2402 - BIT(port));
2403 - if (ret)
2404 - return ret;
2405 - if (i != port)
2406 - port_mask |= BIT(i);
2407 - }
2408 -
2409 - /* Add all other ports to this ports portvlan mask */
2410 - ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
2411 - QCA8K_PORT_LOOKUP_MEMBER, port_mask);
2412 -
2413 - return ret;
2414 -}
2415 -
2416 -static void
2417 -qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br)
2418 -{
2419 - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2420 - int cpu_port, i;
2421 -
2422 - cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
2423 -
2424 - for (i = 0; i < QCA8K_NUM_PORTS; i++) {
2425 - if (dsa_is_cpu_port(ds, i))
2426 - continue;
2427 - if (dsa_to_port(ds, i)->bridge_dev != br)
2428 - continue;
2429 - /* Remove this port to the portvlan mask of the other ports
2430 - * in the bridge
2431 - */
2432 - regmap_clear_bits(priv->regmap,
2433 - QCA8K_PORT_LOOKUP_CTRL(i),
2434 - BIT(port));
2435 - }
2436 -
2437 - /* Set the cpu port to be the only one in the portvlan mask of
2438 - * this port
2439 - */
2440 - qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
2441 - QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
2442 -}
2443 -
2444 -static void
2445 -qca8k_port_fast_age(struct dsa_switch *ds, int port)
2446 -{
2447 - struct qca8k_priv *priv = ds->priv;
2448 -
2449 - mutex_lock(&priv->reg_mutex);
2450 - qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port);
2451 - mutex_unlock(&priv->reg_mutex);
2452 -}
2453 -
2454 -static int
2455 -qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
2456 -{
2457 - struct qca8k_priv *priv = ds->priv;
2458 - unsigned int secs = msecs / 1000;
2459 - u32 val;
2460 -
2461 - /* AGE_TIME reg is set in 7s step */
2462 - val = secs / 7;
2463 -
2464 - /* Handle case with 0 as val to NOT disable
2465 - * learning
2466 - */
2467 - if (!val)
2468 - val = 1;
2469 -
2470 - return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL, QCA8K_ATU_AGE_TIME_MASK,
2471 - QCA8K_ATU_AGE_TIME(val));
2472 -}
2473 -
2474 -static int
2475 -qca8k_port_enable(struct dsa_switch *ds, int port,
2476 - struct phy_device *phy)
2477 -{
2478 - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2479 -
2480 - qca8k_port_set_status(priv, port, 1);
2481 - priv->port_enabled_map |= BIT(port);
2482 -
2483 - if (dsa_is_user_port(ds, port))
2484 - phy_support_asym_pause(phy);
2485 -
2486 - return 0;
2487 -}
2488 -
2489 -static void
2490 -qca8k_port_disable(struct dsa_switch *ds, int port)
2491 -{
2492 - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2493 -
2494 - qca8k_port_set_status(priv, port, 0);
2495 - priv->port_enabled_map &= ~BIT(port);
2496 -}
2497 -
2498 -static int
2499 -qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
2500 -{
2501 - struct qca8k_priv *priv = ds->priv;
2502 - int ret;
2503 -
2504 - /* We have only have a general MTU setting.
2505 - * DSA always set the CPU port's MTU to the largest MTU of the slave
2506 - * ports.
2507 - * Setting MTU just for the CPU port is sufficient to correctly set a
2508 - * value for every port.
2509 - */
2510 - if (!dsa_is_cpu_port(ds, port))
2511 - return 0;
2512 -
2513 - /* To change the MAX_FRAME_SIZE the cpu ports must be off or
2514 - * the switch panics.
2515 - * Turn off both cpu ports before applying the new value to prevent
2516 - * this.
2517 - */
2518 - if (priv->port_enabled_map & BIT(0))
2519 - qca8k_port_set_status(priv, 0, 0);
2520 -
2521 - if (priv->port_enabled_map & BIT(6))
2522 - qca8k_port_set_status(priv, 6, 0);
2523 -
2524 - /* Include L2 header / FCS length */
2525 - ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu + ETH_HLEN + ETH_FCS_LEN);
2526 -
2527 - if (priv->port_enabled_map & BIT(0))
2528 - qca8k_port_set_status(priv, 0, 1);
2529 -
2530 - if (priv->port_enabled_map & BIT(6))
2531 - qca8k_port_set_status(priv, 6, 1);
2532 -
2533 - return ret;
2534 -}
2535 -
2536 -static int
2537 -qca8k_port_max_mtu(struct dsa_switch *ds, int port)
2538 -{
2539 - return QCA8K_MAX_MTU;
2540 -}
2541 -
2542 -static int
2543 -qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
2544 - u16 port_mask, u16 vid)
2545 -{
2546 - /* Set the vid to the port vlan id if no vid is set */
2547 - if (!vid)
2548 - vid = QCA8K_PORT_VID_DEF;
2549 -
2550 - return qca8k_fdb_add(priv, addr, port_mask, vid,
2551 - QCA8K_ATU_STATUS_STATIC);
2552 -}
2553 -
2554 -static int
2555 -qca8k_port_fdb_add(struct dsa_switch *ds, int port,
2556 - const unsigned char *addr, u16 vid)
2557 -{
2558 - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2559 - u16 port_mask = BIT(port);
2560 -
2561 - return qca8k_port_fdb_insert(priv, addr, port_mask, vid);
2562 -}
2563 -
2564 -static int
2565 -qca8k_port_fdb_del(struct dsa_switch *ds, int port,
2566 - const unsigned char *addr, u16 vid)
2567 -{
2568 - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2569 - u16 port_mask = BIT(port);
2570 -
2571 - if (!vid)
2572 - vid = QCA8K_PORT_VID_DEF;
2573 -
2574 - return qca8k_fdb_del(priv, addr, port_mask, vid);
2575 -}
2576 -
2577 -static int
2578 -qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
2579 - dsa_fdb_dump_cb_t *cb, void *data)
2580 -{
2581 - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2582 - struct qca8k_fdb _fdb = { 0 };
2583 - int cnt = QCA8K_NUM_FDB_RECORDS;
2584 - bool is_static;
2585 - int ret = 0;
2586 -
2587 - mutex_lock(&priv->reg_mutex);
2588 - while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) {
2589 - if (!_fdb.aging)
2590 - break;
2591 - is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC);
2592 - ret = cb(_fdb.mac, _fdb.vid, is_static, data);
2593 - if (ret)
2594 - break;
2595 - }
2596 - mutex_unlock(&priv->reg_mutex);
2597 -
2598 - return 0;
2599 -}
2600 -
2601 -static int
2602 -qca8k_port_mdb_add(struct dsa_switch *ds, int port,
2603 - const struct switchdev_obj_port_mdb *mdb)
2604 -{
2605 - struct qca8k_priv *priv = ds->priv;
2606 - const u8 *addr = mdb->addr;
2607 - u16 vid = mdb->vid;
2608 -
2609 - return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid);
2610 -}
2611 -
2612 -static int
2613 -qca8k_port_mdb_del(struct dsa_switch *ds, int port,
2614 - const struct switchdev_obj_port_mdb *mdb)
2615 -{
2616 - struct qca8k_priv *priv = ds->priv;
2617 - const u8 *addr = mdb->addr;
2618 - u16 vid = mdb->vid;
2619 -
2620 - return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid);
2621 -}
2622 -
2623 -static int
2624 -qca8k_port_mirror_add(struct dsa_switch *ds, int port,
2625 - struct dsa_mall_mirror_tc_entry *mirror,
2626 - bool ingress)
2627 -{
2628 - struct qca8k_priv *priv = ds->priv;
2629 - int monitor_port, ret;
2630 - u32 reg, val;
2631 -
2632 - /* Check for existent entry */
2633 - if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
2634 - return -EEXIST;
2635 -
2636 - ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val);
2637 - if (ret)
2638 - return ret;
2639 -
2640 - /* QCA83xx can have only one port set to mirror mode.
2641 - * Check that the correct port is requested and return error otherwise.
2642 - * When no mirror port is set, the values is set to 0xF
2643 - */
2644 - monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
2645 - if (monitor_port != 0xF && monitor_port != mirror->to_local_port)
2646 - return -EEXIST;
2647 -
2648 - /* Set the monitor port */
2649 - val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM,
2650 - mirror->to_local_port);
2651 - ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
2652 - QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
2653 - if (ret)
2654 - return ret;
2655 -
2656 - if (ingress) {
2657 - reg = QCA8K_PORT_LOOKUP_CTRL(port);
2658 - val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
2659 - } else {
2660 - reg = QCA8K_REG_PORT_HOL_CTRL1(port);
2661 - val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
2662 - }
2663 -
2664 - ret = regmap_update_bits(priv->regmap, reg, val, val);
2665 - if (ret)
2666 - return ret;
2667 -
2668 - /* Track mirror port for tx and rx to decide when the
2669 - * mirror port has to be disabled.
2670 - */
2671 - if (ingress)
2672 - priv->mirror_rx |= BIT(port);
2673 - else
2674 - priv->mirror_tx |= BIT(port);
2675 -
2676 - return 0;
2677 -}
2678 -
2679 -static void
2680 -qca8k_port_mirror_del(struct dsa_switch *ds, int port,
2681 - struct dsa_mall_mirror_tc_entry *mirror)
2682 -{
2683 - struct qca8k_priv *priv = ds->priv;
2684 - u32 reg, val;
2685 - int ret;
2686 -
2687 - if (mirror->ingress) {
2688 - reg = QCA8K_PORT_LOOKUP_CTRL(port);
2689 - val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
2690 - } else {
2691 - reg = QCA8K_REG_PORT_HOL_CTRL1(port);
2692 - val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
2693 - }
2694 -
2695 - ret = regmap_clear_bits(priv->regmap, reg, val);
2696 - if (ret)
2697 - goto err;
2698 -
2699 - if (mirror->ingress)
2700 - priv->mirror_rx &= ~BIT(port);
2701 - else
2702 - priv->mirror_tx &= ~BIT(port);
2703 -
2704 - /* No port set to send packet to mirror port. Disable mirror port */
2705 - if (!priv->mirror_rx && !priv->mirror_tx) {
2706 - val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF);
2707 - ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
2708 - QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
2709 - if (ret)
2710 - goto err;
2711 - }
2712 -err:
2713 - dev_err(priv->dev, "Failed to del mirror port from %d", port);
2714 -}
2715 -
2716 -static int
2717 -qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
2718 - struct netlink_ext_ack *extack)
2719 -{
2720 - struct qca8k_priv *priv = ds->priv;
2721 - int ret;
2722 -
2723 - if (vlan_filtering) {
2724 - ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
2725 - QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
2726 - QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE);
2727 - } else {
2728 - ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
2729 - QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
2730 - QCA8K_PORT_LOOKUP_VLAN_MODE_NONE);
2731 - }
2732 -
2733 - return ret;
2734 -}
2735 -
2736 -static int
2737 -qca8k_port_vlan_add(struct dsa_switch *ds, int port,
2738 - const struct switchdev_obj_port_vlan *vlan,
2739 - struct netlink_ext_ack *extack)
2740 -{
2741 - bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
2742 - bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
2743 - struct qca8k_priv *priv = ds->priv;
2744 - int ret;
2745 -
2746 - ret = qca8k_vlan_add(priv, port, vlan->vid, untagged);
2747 - if (ret) {
2748 - dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret);
2749 - return ret;
2750 - }
2751 -
2752 - if (pvid) {
2753 - ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
2754 - QCA8K_EGREES_VLAN_PORT_MASK(port),
2755 - QCA8K_EGREES_VLAN_PORT(port, vlan->vid));
2756 - if (ret)
2757 - return ret;
2758 -
2759 - ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
2760 - QCA8K_PORT_VLAN_CVID(vlan->vid) |
2761 - QCA8K_PORT_VLAN_SVID(vlan->vid));
2762 - }
2763 -
2764 - return ret;
2765 -}
2766 -
2767 -static int
2768 -qca8k_port_vlan_del(struct dsa_switch *ds, int port,
2769 - const struct switchdev_obj_port_vlan *vlan)
2770 -{
2771 - struct qca8k_priv *priv = ds->priv;
2772 - int ret;
2773 -
2774 - ret = qca8k_vlan_del(priv, port, vlan->vid);
2775 - if (ret)
2776 - dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret);
2777 -
2778 - return ret;
2779 -}
2780 -
2781 -static u32 qca8k_get_phy_flags(struct dsa_switch *ds, int port)
2782 -{
2783 - struct qca8k_priv *priv = ds->priv;
2784 -
2785 - /* Communicate to the phy internal driver the switch revision.
2786 - * Based on the switch revision different values needs to be
2787 - * set to the dbg and mmd reg on the phy.
2788 - * The first 2 bit are used to communicate the switch revision
2789 - * to the phy driver.
2790 - */
2791 - if (port > 0 && port < 6)
2792 - return priv->switch_revision;
2793 -
2794 - return 0;
2795 -}
2796 -
2797 -static enum dsa_tag_protocol
2798 -qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
2799 - enum dsa_tag_protocol mp)
2800 -{
2801 - return DSA_TAG_PROTO_QCA;
2802 -}
2803 -
2804 -static bool
2805 -qca8k_lag_can_offload(struct dsa_switch *ds,
2806 - struct net_device *lag,
2807 - struct netdev_lag_upper_info *info)
2808 -{
2809 - struct dsa_port *dp;
2810 - int id, members = 0;
2811 -
2812 - id = dsa_lag_id(ds->dst, lag);
2813 - if (id < 0 || id >= ds->num_lag_ids)
2814 - return false;
2815 -
2816 - dsa_lag_foreach_port(dp, ds->dst, lag)
2817 - /* Includes the port joining the LAG */
2818 - members++;
2819 -
2820 - if (members > QCA8K_NUM_PORTS_FOR_LAG)
2821 - return false;
2822 -
2823 - if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
2824 - return false;
2825 -
2826 - if (info->hash_type != NETDEV_LAG_HASH_L2 &&
2827 - info->hash_type != NETDEV_LAG_HASH_L23)
2828 - return false;
2829 -
2830 - return true;
2831 -}
2832 -
2833 -static int
2834 -qca8k_lag_setup_hash(struct dsa_switch *ds,
2835 - struct net_device *lag,
2836 - struct netdev_lag_upper_info *info)
2837 -{
2838 - struct qca8k_priv *priv = ds->priv;
2839 - bool unique_lag = true;
2840 - u32 hash = 0;
2841 - int i, id;
2842 -
2843 - id = dsa_lag_id(ds->dst, lag);
2844 -
2845 - switch (info->hash_type) {
2846 - case NETDEV_LAG_HASH_L23:
2847 - hash |= QCA8K_TRUNK_HASH_SIP_EN;
2848 - hash |= QCA8K_TRUNK_HASH_DIP_EN;
2849 - fallthrough;
2850 - case NETDEV_LAG_HASH_L2:
2851 - hash |= QCA8K_TRUNK_HASH_SA_EN;
2852 - hash |= QCA8K_TRUNK_HASH_DA_EN;
2853 - break;
2854 - default: /* We should NEVER reach this */
2855 - return -EOPNOTSUPP;
2856 - }
2857 -
2858 - /* Check if we are the unique configured LAG */
2859 - dsa_lags_foreach_id(i, ds->dst)
2860 - if (i != id && dsa_lag_dev(ds->dst, i)) {
2861 - unique_lag = false;
2862 - break;
2863 - }
2864 -
2865 - /* Hash Mode is global. Make sure the same Hash Mode
2866 - * is set to all the 4 possible lag.
2867 - * If we are the unique LAG we can set whatever hash
2868 - * mode we want.
2869 - * To change hash mode it's needed to remove all LAG
2870 - * and change the mode with the latest.
2871 - */
2872 - if (unique_lag) {
2873 - priv->lag_hash_mode = hash;
2874 - } else if (priv->lag_hash_mode != hash) {
2875 - netdev_err(lag, "Error: Mismateched Hash Mode across different lag is not supported\n");
2876 - return -EOPNOTSUPP;
2877 - }
2878 -
2879 - return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL,
2880 - QCA8K_TRUNK_HASH_MASK, hash);
2881 -}
2882 -
2883 -static int
2884 -qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
2885 - struct net_device *lag, bool delete)
2886 -{
2887 - struct qca8k_priv *priv = ds->priv;
2888 - int ret, id, i;
2889 - u32 val;
2890 -
2891 - id = dsa_lag_id(ds->dst, lag);
2892 -
2893 - /* Read current port member */
2894 - ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val);
2895 - if (ret)
2896 - return ret;
2897 -
2898 - /* Shift val to the correct trunk */
2899 - val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id);
2900 - val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK;
2901 - if (delete)
2902 - val &= ~BIT(port);
2903 - else
2904 - val |= BIT(port);
2905 -
2906 - /* Update port member. With empty portmap disable trunk */
2907 - ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0,
2908 - QCA8K_REG_GOL_TRUNK_MEMBER(id) |
2909 - QCA8K_REG_GOL_TRUNK_EN(id),
2910 - !val << QCA8K_REG_GOL_TRUNK_SHIFT(id) |
2911 - val << QCA8K_REG_GOL_TRUNK_SHIFT(id));
2912 -
2913 - /* Search empty member if adding or port on deleting */
2914 - for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) {
2915 - ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val);
2916 - if (ret)
2917 - return ret;
2918 -
2919 - val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i);
2920 - val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK;
2921 -
2922 - if (delete) {
2923 - /* If port flagged to be disabled assume this member is
2924 - * empty
2925 - */
2926 - if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
2927 - continue;
2928 -
2929 - val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK;
2930 - if (val != port)
2931 - continue;
2932 - } else {
2933 - /* If port flagged to be enabled assume this member is
2934 - * already set
2935 - */
2936 - if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
2937 - continue;
2938 - }
2939 -
2940 - /* We have found the member to add/remove */
2941 - break;
2942 - }
2943 -
2944 - /* Set port in the correct port mask or disable port if in delete mode */
2945 - return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id),
2946 - QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) |
2947 - QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i),
2948 - !delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) |
2949 - port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i));
2950 -}
2951 -
2952 -static int
2953 -qca8k_port_lag_join(struct dsa_switch *ds, int port,
2954 - struct net_device *lag,
2955 - struct netdev_lag_upper_info *info)
2956 -{
2957 - int ret;
2958 -
2959 - if (!qca8k_lag_can_offload(ds, lag, info))
2960 - return -EOPNOTSUPP;
2961 -
2962 - ret = qca8k_lag_setup_hash(ds, lag, info);
2963 - if (ret)
2964 - return ret;
2965 -
2966 - return qca8k_lag_refresh_portmap(ds, port, lag, false);
2967 -}
2968 -
2969 -static int
2970 -qca8k_port_lag_leave(struct dsa_switch *ds, int port,
2971 - struct net_device *lag)
2972 -{
2973 - return qca8k_lag_refresh_portmap(ds, port, lag, true);
2974 -}
2975 -
2976 -static void
2977 -qca8k_master_change(struct dsa_switch *ds, const struct net_device *master,
2978 - bool operational)
2979 -{
2980 - struct dsa_port *dp = master->dsa_ptr;
2981 - struct qca8k_priv *priv = ds->priv;
2982 -
2983 - /* Ethernet MIB/MDIO is only supported for CPU port 0 */
2984 - if (dp->index != 0)
2985 - return;
2986 -
2987 - mutex_lock(&priv->mgmt_eth_data.mutex);
2988 - mutex_lock(&priv->mib_eth_data.mutex);
2989 -
2990 - priv->mgmt_master = operational ? (struct net_device *)master : NULL;
2991 -
2992 - mutex_unlock(&priv->mib_eth_data.mutex);
2993 - mutex_unlock(&priv->mgmt_eth_data.mutex);
2994 -}
2995 -
2996 -static int qca8k_connect_tag_protocol(struct dsa_switch *ds,
2997 - enum dsa_tag_protocol proto)
2998 -{
2999 - struct qca_tagger_data *tagger_data;
3000 -
3001 - switch (proto) {
3002 - case DSA_TAG_PROTO_QCA:
3003 - tagger_data = ds->tagger_data;
3004 -
3005 - tagger_data->rw_reg_ack_handler = qca8k_rw_reg_ack_handler;
3006 - tagger_data->mib_autocast_handler = qca8k_mib_autocast_handler;
3007 -
3008 - break;
3009 - default:
3010 - return -EOPNOTSUPP;
3011 - }
3012 -
3013 - return 0;
3014 -}
3015 -
3016 -static const struct dsa_switch_ops qca8k_switch_ops = {
3017 - .get_tag_protocol = qca8k_get_tag_protocol,
3018 - .setup = qca8k_setup,
3019 - .get_strings = qca8k_get_strings,
3020 - .get_ethtool_stats = qca8k_get_ethtool_stats,
3021 - .get_sset_count = qca8k_get_sset_count,
3022 - .set_ageing_time = qca8k_set_ageing_time,
3023 - .get_mac_eee = qca8k_get_mac_eee,
3024 - .set_mac_eee = qca8k_set_mac_eee,
3025 - .port_enable = qca8k_port_enable,
3026 - .port_disable = qca8k_port_disable,
3027 - .port_change_mtu = qca8k_port_change_mtu,
3028 - .port_max_mtu = qca8k_port_max_mtu,
3029 - .port_stp_state_set = qca8k_port_stp_state_set,
3030 - .port_bridge_join = qca8k_port_bridge_join,
3031 - .port_bridge_leave = qca8k_port_bridge_leave,
3032 - .port_fast_age = qca8k_port_fast_age,
3033 - .port_fdb_add = qca8k_port_fdb_add,
3034 - .port_fdb_del = qca8k_port_fdb_del,
3035 - .port_fdb_dump = qca8k_port_fdb_dump,
3036 - .port_mdb_add = qca8k_port_mdb_add,
3037 - .port_mdb_del = qca8k_port_mdb_del,
3038 - .port_mirror_add = qca8k_port_mirror_add,
3039 - .port_mirror_del = qca8k_port_mirror_del,
3040 - .port_vlan_filtering = qca8k_port_vlan_filtering,
3041 - .port_vlan_add = qca8k_port_vlan_add,
3042 - .port_vlan_del = qca8k_port_vlan_del,
3043 - .phylink_validate = qca8k_phylink_validate,
3044 - .phylink_mac_link_state = qca8k_phylink_mac_link_state,
3045 - .phylink_mac_config = qca8k_phylink_mac_config,
3046 - .phylink_mac_link_down = qca8k_phylink_mac_link_down,
3047 - .phylink_mac_link_up = qca8k_phylink_mac_link_up,
3048 - .get_phy_flags = qca8k_get_phy_flags,
3049 - .port_lag_join = qca8k_port_lag_join,
3050 - .port_lag_leave = qca8k_port_lag_leave,
3051 - .master_state_change = qca8k_master_change,
3052 - .connect_tag_protocol = qca8k_connect_tag_protocol,
3053 -};
3054 -
3055 -static int qca8k_read_switch_id(struct qca8k_priv *priv)
3056 -{
3057 - u32 val;
3058 - u8 id;
3059 - int ret;
3060 -
3061 - if (!priv->info)
3062 - return -ENODEV;
3063 -
3064 - ret = qca8k_read(priv, QCA8K_REG_MASK_CTRL, &val);
3065 - if (ret < 0)
3066 - return -ENODEV;
3067 -
3068 - id = QCA8K_MASK_CTRL_DEVICE_ID(val);
3069 - if (id != priv->info->id) {
3070 - dev_err(priv->dev,
3071 - "Switch id detected %x but expected %x",
3072 - id, priv->info->id);
3073 - return -ENODEV;
3074 - }
3075 -
3076 - priv->switch_id = id;
3077 -
3078 - /* Save revision to communicate to the internal PHY driver */
3079 - priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val);
3080 -
3081 - return 0;
3082 -}
3083 -
3084 -static int
3085 -qca8k_sw_probe(struct mdio_device *mdiodev)
3086 -{
3087 - struct qca8k_priv *priv;
3088 - int ret;
3089 -
3090 - /* allocate the private data struct so that we can probe the switches
3091 - * ID register
3092 - */
3093 - priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
3094 - if (!priv)
3095 - return -ENOMEM;
3096 -
3097 - priv->info = of_device_get_match_data(priv->dev);
3098 - priv->bus = mdiodev->bus;
3099 - priv->dev = &mdiodev->dev;
3100 -
3101 - priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset",
3102 - GPIOD_ASIS);
3103 - if (IS_ERR(priv->reset_gpio))
3104 - return PTR_ERR(priv->reset_gpio);
3105 -
3106 - if (priv->reset_gpio) {
3107 - gpiod_set_value_cansleep(priv->reset_gpio, 1);
3108 - /* The active low duration must be greater than 10 ms
3109 - * and checkpatch.pl wants 20 ms.
3110 - */
3111 - msleep(20);
3112 - gpiod_set_value_cansleep(priv->reset_gpio, 0);
3113 - }
3114 -
3115 - /* Start by setting up the register mapping */
3116 - priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, priv,
3117 - &qca8k_regmap_config);
3118 - if (IS_ERR(priv->regmap)) {
3119 - dev_err(priv->dev, "regmap initialization failed");
3120 - return PTR_ERR(priv->regmap);
3121 - }
3122 -
3123 - priv->mdio_cache.page = 0xffff;
3124 - priv->mdio_cache.lo = 0xffff;
3125 - priv->mdio_cache.hi = 0xffff;
3126 -
3127 - /* Check the detected switch id */
3128 - ret = qca8k_read_switch_id(priv);
3129 - if (ret)
3130 - return ret;
3131 -
3132 - priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
3133 - if (!priv->ds)
3134 - return -ENOMEM;
3135 -
3136 - mutex_init(&priv->mgmt_eth_data.mutex);
3137 - init_completion(&priv->mgmt_eth_data.rw_done);
3138 -
3139 - mutex_init(&priv->mib_eth_data.mutex);
3140 - init_completion(&priv->mib_eth_data.rw_done);
3141 -
3142 - priv->ds->dev = &mdiodev->dev;
3143 - priv->ds->num_ports = QCA8K_NUM_PORTS;
3144 - priv->ds->priv = priv;
3145 - priv->ds->ops = &qca8k_switch_ops;
3146 - mutex_init(&priv->reg_mutex);
3147 - dev_set_drvdata(&mdiodev->dev, priv);
3148 -
3149 - return dsa_register_switch(priv->ds);
3150 -}
3151 -
3152 -static void
3153 -qca8k_sw_remove(struct mdio_device *mdiodev)
3154 -{
3155 - struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
3156 - int i;
3157 -
3158 - if (!priv)
3159 - return;
3160 -
3161 - for (i = 0; i < QCA8K_NUM_PORTS; i++)
3162 - qca8k_port_set_status(priv, i, 0);
3163 -
3164 - dsa_unregister_switch(priv->ds);
3165 -
3166 - dev_set_drvdata(&mdiodev->dev, NULL);
3167 -}
3168 -
3169 -static void qca8k_sw_shutdown(struct mdio_device *mdiodev)
3170 -{
3171 - struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
3172 -
3173 - if (!priv)
3174 - return;
3175 -
3176 - dsa_switch_shutdown(priv->ds);
3177 -
3178 - dev_set_drvdata(&mdiodev->dev, NULL);
3179 -}
3180 -
3181 -#ifdef CONFIG_PM_SLEEP
3182 -static void
3183 -qca8k_set_pm(struct qca8k_priv *priv, int enable)
3184 -{
3185 - int port;
3186 -
3187 - for (port = 0; port < QCA8K_NUM_PORTS; port++) {
3188 - /* Do not enable on resume if the port was
3189 - * disabled before.
3190 - */
3191 - if (!(priv->port_enabled_map & BIT(port)))
3192 - continue;
3193 -
3194 - qca8k_port_set_status(priv, port, enable);
3195 - }
3196 -}
3197 -
3198 -static int qca8k_suspend(struct device *dev)
3199 -{
3200 - struct qca8k_priv *priv = dev_get_drvdata(dev);
3201 -
3202 - qca8k_set_pm(priv, 0);
3203 -
3204 - return dsa_switch_suspend(priv->ds);
3205 -}
3206 -
3207 -static int qca8k_resume(struct device *dev)
3208 -{
3209 - struct qca8k_priv *priv = dev_get_drvdata(dev);
3210 -
3211 - qca8k_set_pm(priv, 1);
3212 -
3213 - return dsa_switch_resume(priv->ds);
3214 -}
3215 -#endif /* CONFIG_PM_SLEEP */
3216 -
3217 -static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
3218 - qca8k_suspend, qca8k_resume);
3219 -
3220 -static const struct qca8k_info_ops qca8xxx_ops = {
3221 - .autocast_mib = qca8k_get_ethtool_stats_eth,
3222 -};
3223 -
3224 -static const struct qca8k_match_data qca8327 = {
3225 - .id = QCA8K_ID_QCA8327,
3226 - .reduced_package = true,
3227 - .mib_count = QCA8K_QCA832X_MIB_COUNT,
3228 - .ops = &qca8xxx_ops,
3229 -};
3230 -
3231 -static const struct qca8k_match_data qca8328 = {
3232 - .id = QCA8K_ID_QCA8327,
3233 - .mib_count = QCA8K_QCA832X_MIB_COUNT,
3234 - .ops = &qca8xxx_ops,
3235 -};
3236 -
3237 -static const struct qca8k_match_data qca833x = {
3238 - .id = QCA8K_ID_QCA8337,
3239 - .mib_count = QCA8K_QCA833X_MIB_COUNT,
3240 - .ops = &qca8xxx_ops,
3241 -};
3242 -
3243 -static const struct of_device_id qca8k_of_match[] = {
3244 - { .compatible = "qca,qca8327", .data = &qca8327 },
3245 - { .compatible = "qca,qca8328", .data = &qca8328 },
3246 - { .compatible = "qca,qca8334", .data = &qca833x },
3247 - { .compatible = "qca,qca8337", .data = &qca833x },
3248 - { /* sentinel */ },
3249 -};
3250 -
3251 -static struct mdio_driver qca8kmdio_driver = {
3252 - .probe = qca8k_sw_probe,
3253 - .remove = qca8k_sw_remove,
3254 - .shutdown = qca8k_sw_shutdown,
3255 - .mdiodrv.driver = {
3256 - .name = "qca8k",
3257 - .of_match_table = qca8k_of_match,
3258 - .pm = &qca8k_pm_ops,
3259 - },
3260 -};
3261 -
3262 -mdio_module_driver(qca8kmdio_driver);
3263 -
3264 -MODULE_AUTHOR("Mathieu Olivari, John Crispin <john@phrozen.org>");
3265 -MODULE_DESCRIPTION("Driver for QCA8K ethernet switch family");
3266 -MODULE_LICENSE("GPL v2");
3267 -MODULE_ALIAS("platform:qca8k");
3268 --- /dev/null
3269 +++ b/drivers/net/dsa/qca/qca8k-8xxx.c
3270 @@ -0,0 +1,3186 @@
3271 +// SPDX-License-Identifier: GPL-2.0
3272 +/*
3273 + * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
3274 + * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
3275 + * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
3276 + * Copyright (c) 2016 John Crispin <john@phrozen.org>
3277 + */
3278 +
3279 +#include <linux/module.h>
3280 +#include <linux/phy.h>
3281 +#include <linux/netdevice.h>
3282 +#include <linux/bitfield.h>
3283 +#include <linux/regmap.h>
3284 +#include <net/dsa.h>
3285 +#include <linux/of_net.h>
3286 +#include <linux/of_mdio.h>
3287 +#include <linux/of_platform.h>
3288 +#include <linux/if_bridge.h>
3289 +#include <linux/mdio.h>
3290 +#include <linux/phylink.h>
3291 +#include <linux/gpio/consumer.h>
3292 +#include <linux/etherdevice.h>
3293 +#include <linux/dsa/tag_qca.h>
3294 +
3295 +#include "qca8k.h"
3296 +
3297 +static void
3298 +qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
3299 +{
3300 + regaddr >>= 1;
3301 + *r1 = regaddr & 0x1e;
3302 +
3303 + regaddr >>= 5;
3304 + *r2 = regaddr & 0x7;
3305 +
3306 + regaddr >>= 3;
3307 + *page = regaddr & 0x3ff;
3308 +}
3309 +
3310 +static int
3311 +qca8k_set_lo(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 lo)
3312 +{
3313 + u16 *cached_lo = &priv->mdio_cache.lo;
3314 + struct mii_bus *bus = priv->bus;
3315 + int ret;
3316 +
3317 + if (lo == *cached_lo)
3318 + return 0;
3319 +
3320 + ret = bus->write(bus, phy_id, regnum, lo);
3321 + if (ret < 0)
3322 + dev_err_ratelimited(&bus->dev,
3323 + "failed to write qca8k 32bit lo register\n");
3324 +
3325 + *cached_lo = lo;
3326 + return 0;
3327 +}
3328 +
3329 +static int
3330 +qca8k_set_hi(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 hi)
3331 +{
3332 + u16 *cached_hi = &priv->mdio_cache.hi;
3333 + struct mii_bus *bus = priv->bus;
3334 + int ret;
3335 +
3336 + if (hi == *cached_hi)
3337 + return 0;
3338 +
3339 + ret = bus->write(bus, phy_id, regnum, hi);
3340 + if (ret < 0)
3341 + dev_err_ratelimited(&bus->dev,
3342 + "failed to write qca8k 32bit hi register\n");
3343 +
3344 + *cached_hi = hi;
3345 + return 0;
3346 +}
3347 +
3348 +static int
3349 +qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
3350 +{
3351 + int ret;
3352 +
3353 + ret = bus->read(bus, phy_id, regnum);
3354 + if (ret >= 0) {
3355 + *val = ret;
3356 + ret = bus->read(bus, phy_id, regnum + 1);
3357 + *val |= ret << 16;
3358 + }
3359 +
3360 + if (ret < 0) {
3361 + dev_err_ratelimited(&bus->dev,
3362 + "failed to read qca8k 32bit register\n");
3363 + *val = 0;
3364 + return ret;
3365 + }
3366 +
3367 + return 0;
3368 +}
3369 +
3370 +static void
3371 +qca8k_mii_write32(struct qca8k_priv *priv, int phy_id, u32 regnum, u32 val)
3372 +{
3373 + u16 lo, hi;
3374 + int ret;
3375 +
3376 + lo = val & 0xffff;
3377 + hi = (u16)(val >> 16);
3378 +
3379 + ret = qca8k_set_lo(priv, phy_id, regnum, lo);
3380 + if (ret >= 0)
3381 + ret = qca8k_set_hi(priv, phy_id, regnum + 1, hi);
3382 +}
3383 +
3384 +static int
3385 +qca8k_set_page(struct qca8k_priv *priv, u16 page)
3386 +{
3387 + u16 *cached_page = &priv->mdio_cache.page;
3388 + struct mii_bus *bus = priv->bus;
3389 + int ret;
3390 +
3391 + if (page == *cached_page)
3392 + return 0;
3393 +
3394 + ret = bus->write(bus, 0x18, 0, page);
3395 + if (ret < 0) {
3396 + dev_err_ratelimited(&bus->dev,
3397 + "failed to set qca8k page\n");
3398 + return ret;
3399 + }
3400 +
3401 + *cached_page = page;
3402 + usleep_range(1000, 2000);
3403 + return 0;
3404 +}
3405 +
3406 +static int
3407 +qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val)
3408 +{
3409 + return regmap_read(priv->regmap, reg, val);
3410 +}
3411 +
3412 +static int
3413 +qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
3414 +{
3415 + return regmap_write(priv->regmap, reg, val);
3416 +}
3417 +
3418 +static int
3419 +qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
3420 +{
3421 + return regmap_update_bits(priv->regmap, reg, mask, write_val);
3422 +}
3423 +
3424 +static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
3425 +{
3426 + struct qca8k_mgmt_eth_data *mgmt_eth_data;
3427 + struct qca8k_priv *priv = ds->priv;
3428 + struct qca_mgmt_ethhdr *mgmt_ethhdr;
3429 + u8 len, cmd;
3430 +
3431 + mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb_mac_header(skb);
3432 + mgmt_eth_data = &priv->mgmt_eth_data;
3433 +
3434 + cmd = FIELD_GET(QCA_HDR_MGMT_CMD, mgmt_ethhdr->command);
3435 + len = FIELD_GET(QCA_HDR_MGMT_LENGTH, mgmt_ethhdr->command);
3436 +
3437 + /* Make sure the seq match the requested packet */
3438 + if (mgmt_ethhdr->seq == mgmt_eth_data->seq)
3439 + mgmt_eth_data->ack = true;
3440 +
3441 + if (cmd == MDIO_READ) {
3442 + mgmt_eth_data->data[0] = mgmt_ethhdr->mdio_data;
3443 +
3444 + /* Get the rest of the 12 byte of data.
3445 + * The read/write function will extract the requested data.
3446 + */
3447 + if (len > QCA_HDR_MGMT_DATA1_LEN)
3448 + memcpy(mgmt_eth_data->data + 1, skb->data,
3449 + QCA_HDR_MGMT_DATA2_LEN);
3450 + }
3451 +
3452 + complete(&mgmt_eth_data->rw_done);
3453 +}
3454 +
3455 +static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *val,
3456 + int priority, unsigned int len)
3457 +{
3458 + struct qca_mgmt_ethhdr *mgmt_ethhdr;
3459 + unsigned int real_len;
3460 + struct sk_buff *skb;
3461 + u32 *data2;
3462 + u16 hdr;
3463 +
3464 + skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN);
3465 + if (!skb)
3466 + return NULL;
3467 +
3468 + /* Max value for len reg is 15 (0xf) but the switch actually return 16 byte
3469 + * Actually for some reason the steps are:
3470 + * 0: nothing
3471 + * 1-4: first 4 byte
3472 + * 5-6: first 12 byte
3473 + * 7-15: all 16 byte
3474 + */
3475 + if (len == 16)
3476 + real_len = 15;
3477 + else
3478 + real_len = len;
3479 +
3480 + skb_reset_mac_header(skb);
3481 + skb_set_network_header(skb, skb->len);
3482 +
3483 + mgmt_ethhdr = skb_push(skb, QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN);
3484 +
3485 + hdr = FIELD_PREP(QCA_HDR_XMIT_VERSION, QCA_HDR_VERSION);
3486 + hdr |= FIELD_PREP(QCA_HDR_XMIT_PRIORITY, priority);
3487 + hdr |= QCA_HDR_XMIT_FROM_CPU;
3488 + hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(0));
3489 + hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG);
3490 +
3491 + mgmt_ethhdr->command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg);
3492 + mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len);
3493 + mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd);
3494 + mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE,
3495 + QCA_HDR_MGMT_CHECK_CODE_VAL);
3496 +
3497 + if (cmd == MDIO_WRITE)
3498 + mgmt_ethhdr->mdio_data = *val;
3499 +
3500 + mgmt_ethhdr->hdr = htons(hdr);
3501 +
3502 + data2 = skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN);
3503 + if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN)
3504 + memcpy(data2, val + 1, len - QCA_HDR_MGMT_DATA1_LEN);
3505 +
3506 + return skb;
3507 +}
3508 +
3509 +static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num)
3510 +{
3511 + struct qca_mgmt_ethhdr *mgmt_ethhdr;
3512 +
3513 + mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb->data;
3514 + mgmt_ethhdr->seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num);
3515 +}
3516 +
3517 +static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
3518 +{
3519 + struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
3520 + struct sk_buff *skb;
3521 + bool ack;
3522 + int ret;
3523 +
3524 + skb = qca8k_alloc_mdio_header(MDIO_READ, reg, NULL,
3525 + QCA8K_ETHERNET_MDIO_PRIORITY, len);
3526 + if (!skb)
3527 + return -ENOMEM;
3528 +
3529 + mutex_lock(&mgmt_eth_data->mutex);
3530 +
3531 + /* Check mgmt_master if is operational */
3532 + if (!priv->mgmt_master) {
3533 + kfree_skb(skb);
3534 + mutex_unlock(&mgmt_eth_data->mutex);
3535 + return -EINVAL;
3536 + }
3537 +
3538 + skb->dev = priv->mgmt_master;
3539 +
3540 + reinit_completion(&mgmt_eth_data->rw_done);
3541 +
3542 + /* Increment seq_num and set it in the mdio pkt */
3543 + mgmt_eth_data->seq++;
3544 + qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
3545 + mgmt_eth_data->ack = false;
3546 +
3547 + dev_queue_xmit(skb);
3548 +
3549 + ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
3550 + msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
3551 +
3552 + *val = mgmt_eth_data->data[0];
3553 + if (len > QCA_HDR_MGMT_DATA1_LEN)
3554 + memcpy(val + 1, mgmt_eth_data->data + 1, len - QCA_HDR_MGMT_DATA1_LEN);
3555 +
3556 + ack = mgmt_eth_data->ack;
3557 +
3558 + mutex_unlock(&mgmt_eth_data->mutex);
3559 +
3560 + if (ret <= 0)
3561 + return -ETIMEDOUT;
3562 +
3563 + if (!ack)
3564 + return -EINVAL;
3565 +
3566 + return 0;
3567 +}
3568 +
3569 +static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
3570 +{
3571 + struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
3572 + struct sk_buff *skb;
3573 + bool ack;
3574 + int ret;
3575 +
3576 + skb = qca8k_alloc_mdio_header(MDIO_WRITE, reg, val,
3577 + QCA8K_ETHERNET_MDIO_PRIORITY, len);
3578 + if (!skb)
3579 + return -ENOMEM;
3580 +
3581 + mutex_lock(&mgmt_eth_data->mutex);
3582 +
3583 + /* Check mgmt_master if is operational */
3584 + if (!priv->mgmt_master) {
3585 + kfree_skb(skb);
3586 + mutex_unlock(&mgmt_eth_data->mutex);
3587 + return -EINVAL;
3588 + }
3589 +
3590 + skb->dev = priv->mgmt_master;
3591 +
3592 + reinit_completion(&mgmt_eth_data->rw_done);
3593 +
3594 + /* Increment seq_num and set it in the mdio pkt */
3595 + mgmt_eth_data->seq++;
3596 + qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
3597 + mgmt_eth_data->ack = false;
3598 +
3599 + dev_queue_xmit(skb);
3600 +
3601 + ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
3602 + msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
3603 +
3604 + ack = mgmt_eth_data->ack;
3605 +
3606 + mutex_unlock(&mgmt_eth_data->mutex);
3607 +
3608 + if (ret <= 0)
3609 + return -ETIMEDOUT;
3610 +
3611 + if (!ack)
3612 + return -EINVAL;
3613 +
3614 + return 0;
3615 +}
3616 +
3617 +static int
3618 +qca8k_regmap_update_bits_eth(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
3619 +{
3620 + u32 val = 0;
3621 + int ret;
3622 +
3623 + ret = qca8k_read_eth(priv, reg, &val, sizeof(val));
3624 + if (ret)
3625 + return ret;
3626 +
3627 + val &= ~mask;
3628 + val |= write_val;
3629 +
3630 + return qca8k_write_eth(priv, reg, &val, sizeof(val));
3631 +}
3632 +
3633 +static int
3634 +qca8k_bulk_read(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
3635 +{
3636 + int i, count = len / sizeof(u32), ret;
3637 +
3638 + if (priv->mgmt_master && !qca8k_read_eth(priv, reg, val, len))
3639 + return 0;
3640 +
3641 + for (i = 0; i < count; i++) {
3642 + ret = regmap_read(priv->regmap, reg + (i * 4), val + i);
3643 + if (ret < 0)
3644 + return ret;
3645 + }
3646 +
3647 + return 0;
3648 +}
3649 +
3650 +static int
3651 +qca8k_bulk_write(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
3652 +{
3653 + int i, count = len / sizeof(u32), ret;
3654 + u32 tmp;
3655 +
3656 + if (priv->mgmt_master && !qca8k_write_eth(priv, reg, val, len))
3657 + return 0;
3658 +
3659 + for (i = 0; i < count; i++) {
3660 + tmp = val[i];
3661 +
3662 + ret = regmap_write(priv->regmap, reg + (i * 4), tmp);
3663 + if (ret < 0)
3664 + return ret;
3665 + }
3666 +
3667 + return 0;
3668 +}
3669 +
3670 +static int
3671 +qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
3672 +{
3673 + struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
3674 + struct mii_bus *bus = priv->bus;
3675 + u16 r1, r2, page;
3676 + int ret;
3677 +
3678 + if (!qca8k_read_eth(priv, reg, val, sizeof(*val)))
3679 + return 0;
3680 +
3681 + qca8k_split_addr(reg, &r1, &r2, &page);
3682 +
3683 + mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
3684 +
3685 + ret = qca8k_set_page(priv, page);
3686 + if (ret < 0)
3687 + goto exit;
3688 +
3689 + ret = qca8k_mii_read32(bus, 0x10 | r2, r1, val);
3690 +
3691 +exit:
3692 + mutex_unlock(&bus->mdio_lock);
3693 + return ret;
3694 +}
3695 +
3696 +static int
3697 +qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
3698 +{
3699 + struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
3700 + struct mii_bus *bus = priv->bus;
3701 + u16 r1, r2, page;
3702 + int ret;
3703 +
3704 + if (!qca8k_write_eth(priv, reg, &val, sizeof(val)))
3705 + return 0;
3706 +
3707 + qca8k_split_addr(reg, &r1, &r2, &page);
3708 +
3709 + mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
3710 +
3711 + ret = qca8k_set_page(priv, page);
3712 + if (ret < 0)
3713 + goto exit;
3714 +
3715 + qca8k_mii_write32(priv, 0x10 | r2, r1, val);
3716 +
3717 +exit:
3718 + mutex_unlock(&bus->mdio_lock);
3719 + return ret;
3720 +}
3721 +
3722 +static int
3723 +qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val)
3724 +{
3725 + struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
3726 + struct mii_bus *bus = priv->bus;
3727 + u16 r1, r2, page;
3728 + u32 val;
3729 + int ret;
3730 +
3731 + if (!qca8k_regmap_update_bits_eth(priv, reg, mask, write_val))
3732 + return 0;
3733 +
3734 + qca8k_split_addr(reg, &r1, &r2, &page);
3735 +
3736 + mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
3737 +
3738 + ret = qca8k_set_page(priv, page);
3739 + if (ret < 0)
3740 + goto exit;
3741 +
3742 + ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
3743 + if (ret < 0)
3744 + goto exit;
3745 +
3746 + val &= ~mask;
3747 + val |= write_val;
3748 + qca8k_mii_write32(priv, 0x10 | r2, r1, val);
3749 +
3750 +exit:
3751 + mutex_unlock(&bus->mdio_lock);
3752 +
3753 + return ret;
3754 +}
3755 +
3756 +static const struct regmap_range qca8k_readable_ranges[] = {
3757 + regmap_reg_range(0x0000, 0x00e4), /* Global control */
3758 + regmap_reg_range(0x0100, 0x0168), /* EEE control */
3759 + regmap_reg_range(0x0200, 0x0270), /* Parser control */
3760 + regmap_reg_range(0x0400, 0x0454), /* ACL */
3761 + regmap_reg_range(0x0600, 0x0718), /* Lookup */
3762 + regmap_reg_range(0x0800, 0x0b70), /* QM */
3763 + regmap_reg_range(0x0c00, 0x0c80), /* PKT */
3764 + regmap_reg_range(0x0e00, 0x0e98), /* L3 */
3765 + regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */
3766 + regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */
3767 + regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */
3768 + regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */
3769 + regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */
3770 + regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */
3771 + regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */
3772 +
3773 +};
3774 +
3775 +static const struct regmap_access_table qca8k_readable_table = {
3776 + .yes_ranges = qca8k_readable_ranges,
3777 + .n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges),
3778 +};
3779 +
3780 +static struct regmap_config qca8k_regmap_config = {
3781 + .reg_bits = 16,
3782 + .val_bits = 32,
3783 + .reg_stride = 4,
3784 + .max_register = 0x16ac, /* end MIB - Port6 range */
3785 + .reg_read = qca8k_regmap_read,
3786 + .reg_write = qca8k_regmap_write,
3787 + .reg_update_bits = qca8k_regmap_update_bits,
3788 + .rd_table = &qca8k_readable_table,
3789 + .disable_locking = true, /* Locking is handled by qca8k read/write */
3790 + .cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */
3791 +};
3792 +
3793 +static int
3794 +qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
3795 +{
3796 + u32 val;
3797 +
3798 + return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0,
3799 + QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC);
3800 +}
3801 +
3802 +static int
3803 +qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
3804 +{
3805 + u32 reg[3];
3806 + int ret;
3807 +
3808 + /* load the ARL table into an array */
3809 + ret = qca8k_bulk_read(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
3810 + if (ret)
3811 + return ret;
3812 +
3813 + /* vid - 83:72 */
3814 + fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]);
3815 + /* aging - 67:64 */
3816 + fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]);
3817 + /* portmask - 54:48 */
3818 + fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]);
3819 + /* mac - 47:0 */
3820 + fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]);
3821 + fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]);
3822 + fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]);
3823 + fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]);
3824 + fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]);
3825 + fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]);
3826 +
3827 + return 0;
3828 +}
3829 +
3830 +static void
3831 +qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask, const u8 *mac,
3832 + u8 aging)
3833 +{
3834 + u32 reg[3] = { 0 };
3835 +
3836 + /* vid - 83:72 */
3837 + reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid);
3838 + /* aging - 67:64 */
3839 + reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging);
3840 + /* portmask - 54:48 */
3841 + reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask);
3842 + /* mac - 47:0 */
3843 + reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]);
3844 + reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]);
3845 + reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]);
3846 + reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]);
3847 + reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]);
3848 + reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]);
3849 +
3850 + /* load the array into the ARL table */
3851 + qca8k_bulk_write(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
3852 +}
3853 +
3854 +static int
3855 +qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd, int port)
3856 +{
3857 + u32 reg;
3858 + int ret;
3859 +
3860 + /* Set the command and FDB index */
3861 + reg = QCA8K_ATU_FUNC_BUSY;
3862 + reg |= cmd;
3863 + if (port >= 0) {
3864 + reg |= QCA8K_ATU_FUNC_PORT_EN;
3865 + reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port);
3866 + }
3867 +
3868 + /* Write the function register triggering the table access */
3869 + ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg);
3870 + if (ret)
3871 + return ret;
3872 +
3873 + /* wait for completion */
3874 + ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY);
3875 + if (ret)
3876 + return ret;
3877 +
3878 + /* Check for table full violation when adding an entry */
3879 + if (cmd == QCA8K_FDB_LOAD) {
3880 + ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, &reg);
3881 + if (ret < 0)
3882 + return ret;
3883 + if (reg & QCA8K_ATU_FUNC_FULL)
3884 + return -1;
3885 + }
3886 +
3887 + return 0;
3888 +}
3889 +
3890 +static int
3891 +qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb, int port)
3892 +{
3893 + int ret;
3894 +
3895 + qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging);
3896 + ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port);
3897 + if (ret < 0)
3898 + return ret;
3899 +
3900 + return qca8k_fdb_read(priv, fdb);
3901 +}
3902 +
3903 +static int
3904 +qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac, u16 port_mask,
3905 + u16 vid, u8 aging)
3906 +{
3907 + int ret;
3908 +
3909 + mutex_lock(&priv->reg_mutex);
3910 + qca8k_fdb_write(priv, vid, port_mask, mac, aging);
3911 + ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
3912 + mutex_unlock(&priv->reg_mutex);
3913 +
3914 + return ret;
3915 +}
3916 +
3917 +static int
3918 +qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac, u16 port_mask, u16 vid)
3919 +{
3920 + int ret;
3921 +
3922 + mutex_lock(&priv->reg_mutex);
3923 + qca8k_fdb_write(priv, vid, port_mask, mac, 0);
3924 + ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
3925 + mutex_unlock(&priv->reg_mutex);
3926 +
3927 + return ret;
3928 +}
3929 +
3930 +static void
3931 +qca8k_fdb_flush(struct qca8k_priv *priv)
3932 +{
3933 + mutex_lock(&priv->reg_mutex);
3934 + qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1);
3935 + mutex_unlock(&priv->reg_mutex);
3936 +}
3937 +
3938 +static int
3939 +qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask,
3940 + const u8 *mac, u16 vid)
3941 +{
3942 + struct qca8k_fdb fdb = { 0 };
3943 + int ret;
3944 +
3945 + mutex_lock(&priv->reg_mutex);
3946 +
3947 + qca8k_fdb_write(priv, vid, 0, mac, 0);
3948 + ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
3949 + if (ret < 0)
3950 + goto exit;
3951 +
3952 + ret = qca8k_fdb_read(priv, &fdb);
3953 + if (ret < 0)
3954 + goto exit;
3955 +
3956 + /* Rule exist. Delete first */
3957 + if (!fdb.aging) {
3958 + ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
3959 + if (ret)
3960 + goto exit;
3961 + }
3962 +
3963 + /* Add port to fdb portmask */
3964 + fdb.port_mask |= port_mask;
3965 +
3966 + qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
3967 + ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
3968 +
3969 +exit:
3970 + mutex_unlock(&priv->reg_mutex);
3971 + return ret;
3972 +}
3973 +
3974 +static int
3975 +qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask,
3976 + const u8 *mac, u16 vid)
3977 +{
3978 + struct qca8k_fdb fdb = { 0 };
3979 + int ret;
3980 +
3981 + mutex_lock(&priv->reg_mutex);
3982 +
3983 + qca8k_fdb_write(priv, vid, 0, mac, 0);
3984 + ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
3985 + if (ret < 0)
3986 + goto exit;
3987 +
3988 + /* Rule doesn't exist. Why delete? */
3989 + if (!fdb.aging) {
3990 + ret = -EINVAL;
3991 + goto exit;
3992 + }
3993 +
3994 + ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
3995 + if (ret)
3996 + goto exit;
3997 +
3998 + /* Only port in the rule is this port. Don't re insert */
3999 + if (fdb.port_mask == port_mask)
4000 + goto exit;
4001 +
4002 + /* Remove port from port mask */
4003 + fdb.port_mask &= ~port_mask;
4004 +
4005 + qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
4006 + ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
4007 +
4008 +exit:
4009 + mutex_unlock(&priv->reg_mutex);
4010 + return ret;
4011 +}
4012 +
4013 +static int
4014 +qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid)
4015 +{
4016 + u32 reg;
4017 + int ret;
4018 +
4019 + /* Set the command and VLAN index */
4020 + reg = QCA8K_VTU_FUNC1_BUSY;
4021 + reg |= cmd;
4022 + reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid);
4023 +
4024 + /* Write the function register triggering the table access */
4025 + ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg);
4026 + if (ret)
4027 + return ret;
4028 +
4029 + /* wait for completion */
4030 + ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY);
4031 + if (ret)
4032 + return ret;
4033 +
4034 + /* Check for table full violation when adding an entry */
4035 + if (cmd == QCA8K_VLAN_LOAD) {
4036 + ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, &reg);
4037 + if (ret < 0)
4038 + return ret;
4039 + if (reg & QCA8K_VTU_FUNC1_FULL)
4040 + return -ENOMEM;
4041 + }
4042 +
4043 + return 0;
4044 +}
4045 +
4046 +static int
4047 +qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid, bool untagged)
4048 +{
4049 + u32 reg;
4050 + int ret;
4051 +
4052 + /*
4053 + We do the right thing with VLAN 0 and treat it as untagged while
4054 + preserving the tag on egress.
4055 + */
4056 + if (vid == 0)
4057 + return 0;
4058 +
4059 + mutex_lock(&priv->reg_mutex);
4060 + ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
4061 + if (ret < 0)
4062 + goto out;
4063 +
4064 + ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
4065 + if (ret < 0)
4066 + goto out;
4067 + reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN;
4068 + reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
4069 + if (untagged)
4070 + reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port);
4071 + else
4072 + reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port);
4073 +
4074 + ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
4075 + if (ret)
4076 + goto out;
4077 + ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
4078 +
4079 +out:
4080 + mutex_unlock(&priv->reg_mutex);
4081 +
4082 + return ret;
4083 +}
4084 +
4085 +static int
4086 +qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid)
4087 +{
4088 + u32 reg, mask;
4089 + int ret, i;
4090 + bool del;
4091 +
4092 + mutex_lock(&priv->reg_mutex);
4093 + ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
4094 + if (ret < 0)
4095 + goto out;
4096 +
4097 + ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
4098 + if (ret < 0)
4099 + goto out;
4100 + reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
4101 + reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port);
4102 +
4103 + /* Check if we're the last member to be removed */
4104 + del = true;
4105 + for (i = 0; i < QCA8K_NUM_PORTS; i++) {
4106 + mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i);
4107 +
4108 + if ((reg & mask) != mask) {
4109 + del = false;
4110 + break;
4111 + }
4112 + }
4113 +
4114 + if (del) {
4115 + ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid);
4116 + } else {
4117 + ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
4118 + if (ret)
4119 + goto out;
4120 + ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
4121 + }
4122 +
4123 +out:
4124 + mutex_unlock(&priv->reg_mutex);
4125 +
4126 + return ret;
4127 +}
4128 +
4129 +static int
4130 +qca8k_mib_init(struct qca8k_priv *priv)
4131 +{
4132 + int ret;
4133 +
4134 + mutex_lock(&priv->reg_mutex);
4135 + ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
4136 + QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
4137 + FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_FLUSH) |
4138 + QCA8K_MIB_BUSY);
4139 + if (ret)
4140 + goto exit;
4141 +
4142 + ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY);
4143 + if (ret)
4144 + goto exit;
4145 +
4146 + ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
4147 + if (ret)
4148 + goto exit;
4149 +
4150 + ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB);
4151 +
4152 +exit:
4153 + mutex_unlock(&priv->reg_mutex);
4154 + return ret;
4155 +}
4156 +
4157 +static void
4158 +qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
4159 +{
4160 + u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
4161 +
4162 + /* Port 0 and 6 have no internal PHY */
4163 + if (port > 0 && port < 6)
4164 + mask |= QCA8K_PORT_STATUS_LINK_AUTO;
4165 +
4166 + if (enable)
4167 + regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
4168 + else
4169 + regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
4170 +}
4171 +
4172 +static int
4173 +qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
4174 + struct sk_buff *read_skb, u32 *val)
4175 +{
4176 + struct sk_buff *skb = skb_copy(read_skb, GFP_KERNEL);
4177 + bool ack;
4178 + int ret;
4179 +
4180 + reinit_completion(&mgmt_eth_data->rw_done);
4181 +
4182 + /* Increment seq_num and set it in the copy pkt */
4183 + mgmt_eth_data->seq++;
4184 + qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
4185 + mgmt_eth_data->ack = false;
4186 +
4187 + dev_queue_xmit(skb);
4188 +
4189 + ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
4190 + QCA8K_ETHERNET_TIMEOUT);
4191 +
4192 + ack = mgmt_eth_data->ack;
4193 +
4194 + if (ret <= 0)
4195 + return -ETIMEDOUT;
4196 +
4197 + if (!ack)
4198 + return -EINVAL;
4199 +
4200 + *val = mgmt_eth_data->data[0];
4201 +
4202 + return 0;
4203 +}
4204 +
4205 +static int
4206 +qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
4207 + int regnum, u16 data)
4208 +{
4209 + struct sk_buff *write_skb, *clear_skb, *read_skb;
4210 + struct qca8k_mgmt_eth_data *mgmt_eth_data;
4211 + u32 write_val, clear_val = 0, val;
4212 + struct net_device *mgmt_master;
4213 + int ret, ret1;
4214 + bool ack;
4215 +
4216 + if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
4217 + return -EINVAL;
4218 +
4219 + mgmt_eth_data = &priv->mgmt_eth_data;
4220 +
4221 + write_val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
4222 + QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
4223 + QCA8K_MDIO_MASTER_REG_ADDR(regnum);
4224 +
4225 + if (read) {
4226 + write_val |= QCA8K_MDIO_MASTER_READ;
4227 + } else {
4228 + write_val |= QCA8K_MDIO_MASTER_WRITE;
4229 + write_val |= QCA8K_MDIO_MASTER_DATA(data);
4230 + }
4231 +
4232 + /* Prealloc all the needed skb before the lock */
4233 + write_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &write_val,
4234 + QCA8K_ETHERNET_PHY_PRIORITY, sizeof(write_val));
4235 + if (!write_skb)
4236 + return -ENOMEM;
4237 +
4238 + clear_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &clear_val,
4239 + QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
4240 + if (!clear_skb) {
4241 + ret = -ENOMEM;
4242 + goto err_clear_skb;
4243 + }
4244 +
4245 + read_skb = qca8k_alloc_mdio_header(MDIO_READ, QCA8K_MDIO_MASTER_CTRL, &clear_val,
4246 + QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
4247 + if (!read_skb) {
4248 + ret = -ENOMEM;
4249 + goto err_read_skb;
4250 + }
4251 +
4252 + /* Actually start the request:
4253 + * 1. Send mdio master packet
4254 + * 2. Busy Wait for mdio master command
4255 + * 3. Get the data if we are reading
4256 + * 4. Reset the mdio master (even with error)
4257 + */
4258 + mutex_lock(&mgmt_eth_data->mutex);
4259 +
4260 + /* Check if mgmt_master is operational */
4261 + mgmt_master = priv->mgmt_master;
4262 + if (!mgmt_master) {
4263 + mutex_unlock(&mgmt_eth_data->mutex);
4264 + ret = -EINVAL;
4265 + goto err_mgmt_master;
4266 + }
4267 +
4268 + read_skb->dev = mgmt_master;
4269 + clear_skb->dev = mgmt_master;
4270 + write_skb->dev = mgmt_master;
4271 +
4272 + reinit_completion(&mgmt_eth_data->rw_done);
4273 +
4274 + /* Increment seq_num and set it in the write pkt */
4275 + mgmt_eth_data->seq++;
4276 + qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq);
4277 + mgmt_eth_data->ack = false;
4278 +
4279 + dev_queue_xmit(write_skb);
4280 +
4281 + ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
4282 + QCA8K_ETHERNET_TIMEOUT);
4283 +
4284 + ack = mgmt_eth_data->ack;
4285 +
4286 + if (ret <= 0) {
4287 + ret = -ETIMEDOUT;
4288 + kfree_skb(read_skb);
4289 + goto exit;
4290 + }
4291 +
4292 + if (!ack) {
4293 + ret = -EINVAL;
4294 + kfree_skb(read_skb);
4295 + goto exit;
4296 + }
4297 +
4298 + ret = read_poll_timeout(qca8k_phy_eth_busy_wait, ret1,
4299 + !(val & QCA8K_MDIO_MASTER_BUSY), 0,
4300 + QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
4301 + mgmt_eth_data, read_skb, &val);
4302 +
4303 + if (ret < 0 && ret1 < 0) {
4304 + ret = ret1;
4305 + goto exit;
4306 + }
4307 +
4308 + if (read) {
4309 + reinit_completion(&mgmt_eth_data->rw_done);
4310 +
4311 + /* Increment seq_num and set it in the read pkt */
4312 + mgmt_eth_data->seq++;
4313 + qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq);
4314 + mgmt_eth_data->ack = false;
4315 +
4316 + dev_queue_xmit(read_skb);
4317 +
4318 + ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
4319 + QCA8K_ETHERNET_TIMEOUT);
4320 +
4321 + ack = mgmt_eth_data->ack;
4322 +
4323 + if (ret <= 0) {
4324 + ret = -ETIMEDOUT;
4325 + goto exit;
4326 + }
4327 +
4328 + if (!ack) {
4329 + ret = -EINVAL;
4330 + goto exit;
4331 + }
4332 +
4333 + ret = mgmt_eth_data->data[0] & QCA8K_MDIO_MASTER_DATA_MASK;
4334 + } else {
4335 + kfree_skb(read_skb);
4336 + }
4337 +exit:
4338 + reinit_completion(&mgmt_eth_data->rw_done);
4339 +
4340 + /* Increment seq_num and set it in the clear pkt */
4341 + mgmt_eth_data->seq++;
4342 + qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq);
4343 + mgmt_eth_data->ack = false;
4344 +
4345 + dev_queue_xmit(clear_skb);
4346 +
4347 + wait_for_completion_timeout(&mgmt_eth_data->rw_done,
4348 + QCA8K_ETHERNET_TIMEOUT);
4349 +
4350 + mutex_unlock(&mgmt_eth_data->mutex);
4351 +
4352 + return ret;
4353 +
4354 + /* Error handling before lock */
4355 +err_mgmt_master:
4356 + kfree_skb(read_skb);
4357 +err_read_skb:
4358 + kfree_skb(clear_skb);
4359 +err_clear_skb:
4360 + kfree_skb(write_skb);
4361 +
4362 + return ret;
4363 +}
4364 +
4365 +static u32
4366 +qca8k_port_to_phy(int port)
4367 +{
4368 + /* From Andrew Lunn:
4369 + * Port 0 has no internal phy.
4370 + * Port 1 has an internal PHY at MDIO address 0.
4371 + * Port 2 has an internal PHY at MDIO address 1.
4372 + * ...
4373 + * Port 5 has an internal PHY at MDIO address 4.
4374 + * Port 6 has no internal PHY.
4375 + */
4376 +
4377 + return port - 1;
4378 +}
4379 +
4380 +static int
4381 +qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
4382 +{
4383 + u16 r1, r2, page;
4384 + u32 val;
4385 + int ret, ret1;
4386 +
4387 + qca8k_split_addr(reg, &r1, &r2, &page);
4388 +
4389 + ret = read_poll_timeout(qca8k_mii_read32, ret1, !(val & mask), 0,
4390 + QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
4391 + bus, 0x10 | r2, r1, &val);
4392 +
4393 + /* Check if qca8k_read has failed for a different reason
4394 + * before returnting -ETIMEDOUT
4395 + */
4396 + if (ret < 0 && ret1 < 0)
4397 + return ret1;
4398 +
4399 + return ret;
4400 +}
4401 +
4402 +static int
4403 +qca8k_mdio_write(struct qca8k_priv *priv, int phy, int regnum, u16 data)
4404 +{
4405 + struct mii_bus *bus = priv->bus;
4406 + u16 r1, r2, page;
4407 + u32 val;
4408 + int ret;
4409 +
4410 + if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
4411 + return -EINVAL;
4412 +
4413 + val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
4414 + QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
4415 + QCA8K_MDIO_MASTER_REG_ADDR(regnum) |
4416 + QCA8K_MDIO_MASTER_DATA(data);
4417 +
4418 + qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
4419 +
4420 + mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
4421 +
4422 + ret = qca8k_set_page(priv, page);
4423 + if (ret)
4424 + goto exit;
4425 +
4426 + qca8k_mii_write32(priv, 0x10 | r2, r1, val);
4427 +
4428 + ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
4429 + QCA8K_MDIO_MASTER_BUSY);
4430 +
4431 +exit:
4432 + /* even if the busy_wait timeouts try to clear the MASTER_EN */
4433 + qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
4434 +
4435 + mutex_unlock(&bus->mdio_lock);
4436 +
4437 + return ret;
4438 +}
4439 +
4440 +static int
4441 +qca8k_mdio_read(struct qca8k_priv *priv, int phy, int regnum)
4442 +{
4443 + struct mii_bus *bus = priv->bus;
4444 + u16 r1, r2, page;
4445 + u32 val;
4446 + int ret;
4447 +
4448 + if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
4449 + return -EINVAL;
4450 +
4451 + val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
4452 + QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
4453 + QCA8K_MDIO_MASTER_REG_ADDR(regnum);
4454 +
4455 + qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
4456 +
4457 + mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
4458 +
4459 + ret = qca8k_set_page(priv, page);
4460 + if (ret)
4461 + goto exit;
4462 +
4463 + qca8k_mii_write32(priv, 0x10 | r2, r1, val);
4464 +
4465 + ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
4466 + QCA8K_MDIO_MASTER_BUSY);
4467 + if (ret)
4468 + goto exit;
4469 +
4470 + ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
4471 +
4472 +exit:
4473 + /* even if the busy_wait timeouts try to clear the MASTER_EN */
4474 + qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
4475 +
4476 + mutex_unlock(&bus->mdio_lock);
4477 +
4478 + if (ret >= 0)
4479 + ret = val & QCA8K_MDIO_MASTER_DATA_MASK;
4480 +
4481 + return ret;
4482 +}
4483 +
4484 +static int
4485 +qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data)
4486 +{
4487 + struct qca8k_priv *priv = slave_bus->priv;
4488 + int ret;
4489 +
4490 + /* Use mdio Ethernet when available, fallback to legacy one on error */
4491 + ret = qca8k_phy_eth_command(priv, false, phy, regnum, data);
4492 + if (!ret)
4493 + return 0;
4494 +
4495 + return qca8k_mdio_write(priv, phy, regnum, data);
4496 +}
4497 +
4498 +static int
4499 +qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
4500 +{
4501 + struct qca8k_priv *priv = slave_bus->priv;
4502 + int ret;
4503 +
4504 + /* Use mdio Ethernet when available, fallback to legacy one on error */
4505 + ret = qca8k_phy_eth_command(priv, true, phy, regnum, 0);
4506 + if (ret >= 0)
4507 + return ret;
4508 +
4509 + ret = qca8k_mdio_read(priv, phy, regnum);
4510 +
4511 + if (ret < 0)
4512 + return 0xffff;
4513 +
4514 + return ret;
4515 +}
4516 +
4517 +static int
4518 +qca8k_legacy_mdio_write(struct mii_bus *slave_bus, int port, int regnum, u16 data)
4519 +{
4520 + port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
4521 +
4522 + return qca8k_internal_mdio_write(slave_bus, port, regnum, data);
4523 +}
4524 +
4525 +static int
4526 +qca8k_legacy_mdio_read(struct mii_bus *slave_bus, int port, int regnum)
4527 +{
4528 + port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
4529 +
4530 + return qca8k_internal_mdio_read(slave_bus, port, regnum);
4531 +}
4532 +
4533 +static int
4534 +qca8k_mdio_register(struct qca8k_priv *priv)
4535 +{
4536 + struct dsa_switch *ds = priv->ds;
4537 + struct device_node *mdio;
4538 + struct mii_bus *bus;
4539 +
4540 + bus = devm_mdiobus_alloc(ds->dev);
4541 + if (!bus)
4542 + return -ENOMEM;
4543 +
4544 + bus->priv = (void *)priv;
4545 + snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d.%d",
4546 + ds->dst->index, ds->index);
4547 + bus->parent = ds->dev;
4548 + bus->phy_mask = ~ds->phys_mii_mask;
4549 + ds->slave_mii_bus = bus;
4550 +
4551 + /* Check if the devicetree declare the port:phy mapping */
4552 + mdio = of_get_child_by_name(priv->dev->of_node, "mdio");
4553 + if (of_device_is_available(mdio)) {
4554 + bus->name = "qca8k slave mii";
4555 + bus->read = qca8k_internal_mdio_read;
4556 + bus->write = qca8k_internal_mdio_write;
4557 + return devm_of_mdiobus_register(priv->dev, bus, mdio);
4558 + }
4559 +
4560 + /* If a mapping can't be found the legacy mapping is used,
4561 + * using the qca8k_port_to_phy function
4562 + */
4563 + bus->name = "qca8k-legacy slave mii";
4564 + bus->read = qca8k_legacy_mdio_read;
4565 + bus->write = qca8k_legacy_mdio_write;
4566 + return devm_mdiobus_register(priv->dev, bus);
4567 +}
4568 +
4569 +static int
4570 +qca8k_setup_mdio_bus(struct qca8k_priv *priv)
4571 +{
4572 + u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
4573 + struct device_node *ports, *port;
4574 + phy_interface_t mode;
4575 + int err;
4576 +
4577 + ports = of_get_child_by_name(priv->dev->of_node, "ports");
4578 + if (!ports)
4579 + ports = of_get_child_by_name(priv->dev->of_node, "ethernet-ports");
4580 +
4581 + if (!ports)
4582 + return -EINVAL;
4583 +
4584 + for_each_available_child_of_node(ports, port) {
4585 + err = of_property_read_u32(port, "reg", &reg);
4586 + if (err) {
4587 + of_node_put(port);
4588 + of_node_put(ports);
4589 + return err;
4590 + }
4591 +
4592 + if (!dsa_is_user_port(priv->ds, reg))
4593 + continue;
4594 +
4595 + of_get_phy_mode(port, &mode);
4596 +
4597 + if (of_property_read_bool(port, "phy-handle") &&
4598 + mode != PHY_INTERFACE_MODE_INTERNAL)
4599 + external_mdio_mask |= BIT(reg);
4600 + else
4601 + internal_mdio_mask |= BIT(reg);
4602 + }
4603 +
4604 + of_node_put(ports);
4605 + if (!external_mdio_mask && !internal_mdio_mask) {
4606 + dev_err(priv->dev, "no PHYs are defined.\n");
4607 + return -EINVAL;
4608 + }
4609 +
4610 + /* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through
4611 + * the MDIO_MASTER register also _disconnects_ the external MDC
4612 + * passthrough to the internal PHYs. It's not possible to use both
4613 + * configurations at the same time!
4614 + *
4615 + * Because this came up during the review process:
4616 + * If the external mdio-bus driver is capable magically disabling
4617 + * the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's
4618 + * accessors for the time being, it would be possible to pull this
4619 + * off.
4620 + */
4621 + if (!!external_mdio_mask && !!internal_mdio_mask) {
4622 + dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n");
4623 + return -EINVAL;
4624 + }
4625 +
4626 + if (external_mdio_mask) {
4627 + /* Make sure to disable the internal mdio bus in cases
4628 + * a dt-overlay and driver reload changed the configuration
4629 + */
4630 +
4631 + return regmap_clear_bits(priv->regmap, QCA8K_MDIO_MASTER_CTRL,
4632 + QCA8K_MDIO_MASTER_EN);
4633 + }
4634 +
4635 + return qca8k_mdio_register(priv);
4636 +}
4637 +
4638 +static int
4639 +qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv)
4640 +{
4641 + u32 mask = 0;
4642 + int ret = 0;
4643 +
4644 + /* SoC specific settings for ipq8064.
4645 + * If more device require this consider adding
4646 + * a dedicated binding.
4647 + */
4648 + if (of_machine_is_compatible("qcom,ipq8064"))
4649 + mask |= QCA8K_MAC_PWR_RGMII0_1_8V;
4650 +
4651 + /* SoC specific settings for ipq8065 */
4652 + if (of_machine_is_compatible("qcom,ipq8065"))
4653 + mask |= QCA8K_MAC_PWR_RGMII1_1_8V;
4654 +
4655 + if (mask) {
4656 + ret = qca8k_rmw(priv, QCA8K_REG_MAC_PWR_SEL,
4657 + QCA8K_MAC_PWR_RGMII0_1_8V |
4658 + QCA8K_MAC_PWR_RGMII1_1_8V,
4659 + mask);
4660 + }
4661 +
4662 + return ret;
4663 +}
4664 +
4665 +static int qca8k_find_cpu_port(struct dsa_switch *ds)
4666 +{
4667 + struct qca8k_priv *priv = ds->priv;
4668 +
4669 + /* Find the connected cpu port. Valid port are 0 or 6 */
4670 + if (dsa_is_cpu_port(ds, 0))
4671 + return 0;
4672 +
4673 + dev_dbg(priv->dev, "port 0 is not the CPU port. Checking port 6");
4674 +
4675 + if (dsa_is_cpu_port(ds, 6))
4676 + return 6;
4677 +
4678 + return -EINVAL;
4679 +}
4680 +
4681 +static int
4682 +qca8k_setup_of_pws_reg(struct qca8k_priv *priv)
4683 +{
4684 + const struct qca8k_match_data *data = priv->info;
4685 + struct device_node *node = priv->dev->of_node;
4686 + u32 val = 0;
4687 + int ret;
4688 +
4689 + /* QCA8327 require to set to the correct mode.
4690 + * His bigger brother QCA8328 have the 172 pin layout.
4691 + * Should be applied by default but we set this just to make sure.
4692 + */
4693 + if (priv->switch_id == QCA8K_ID_QCA8327) {
4694 + /* Set the correct package of 148 pin for QCA8327 */
4695 + if (data->reduced_package)
4696 + val |= QCA8327_PWS_PACKAGE148_EN;
4697 +
4698 + ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN,
4699 + val);
4700 + if (ret)
4701 + return ret;
4702 + }
4703 +
4704 + if (of_property_read_bool(node, "qca,ignore-power-on-sel"))
4705 + val |= QCA8K_PWS_POWER_ON_SEL;
4706 +
4707 + if (of_property_read_bool(node, "qca,led-open-drain")) {
4708 + if (!(val & QCA8K_PWS_POWER_ON_SEL)) {
4709 + dev_err(priv->dev, "qca,led-open-drain require qca,ignore-power-on-sel to be set.");
4710 + return -EINVAL;
4711 + }
4712 +
4713 + val |= QCA8K_PWS_LED_OPEN_EN_CSR;
4714 + }
4715 +
4716 + return qca8k_rmw(priv, QCA8K_REG_PWS,
4717 + QCA8K_PWS_LED_OPEN_EN_CSR | QCA8K_PWS_POWER_ON_SEL,
4718 + val);
4719 +}
4720 +
4721 +static int
4722 +qca8k_parse_port_config(struct qca8k_priv *priv)
4723 +{
4724 + int port, cpu_port_index = -1, ret;
4725 + struct device_node *port_dn;
4726 + phy_interface_t mode;
4727 + struct dsa_port *dp;
4728 + u32 delay;
4729 +
4730 + /* We have 2 CPU port. Check them */
4731 + for (port = 0; port < QCA8K_NUM_PORTS; port++) {
4732 + /* Skip every other port */
4733 + if (port != 0 && port != 6)
4734 + continue;
4735 +
4736 + dp = dsa_to_port(priv->ds, port);
4737 + port_dn = dp->dn;
4738 + cpu_port_index++;
4739 +
4740 + if (!of_device_is_available(port_dn))
4741 + continue;
4742 +
4743 + ret = of_get_phy_mode(port_dn, &mode);
4744 + if (ret)
4745 + continue;
4746 +
4747 + switch (mode) {
4748 + case PHY_INTERFACE_MODE_RGMII:
4749 + case PHY_INTERFACE_MODE_RGMII_ID:
4750 + case PHY_INTERFACE_MODE_RGMII_TXID:
4751 + case PHY_INTERFACE_MODE_RGMII_RXID:
4752 + case PHY_INTERFACE_MODE_SGMII:
4753 + delay = 0;
4754 +
4755 + if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay))
4756 + /* Switch regs accept value in ns, convert ps to ns */
4757 + delay = delay / 1000;
4758 + else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
4759 + mode == PHY_INTERFACE_MODE_RGMII_TXID)
4760 + delay = 1;
4761 +
4762 + if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, delay)) {
4763 + dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
4764 + delay = 3;
4765 + }
4766 +
4767 + priv->ports_config.rgmii_tx_delay[cpu_port_index] = delay;
4768 +
4769 + delay = 0;
4770 +
4771 + if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay))
4772 + /* Switch regs accept value in ns, convert ps to ns */
4773 + delay = delay / 1000;
4774 + else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
4775 + mode == PHY_INTERFACE_MODE_RGMII_RXID)
4776 + delay = 2;
4777 +
4778 + if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, delay)) {
4779 + dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
4780 + delay = 3;
4781 + }
4782 +
4783 + priv->ports_config.rgmii_rx_delay[cpu_port_index] = delay;
4784 +
4785 + /* Skip sgmii parsing for rgmii* mode */
4786 + if (mode == PHY_INTERFACE_MODE_RGMII ||
4787 + mode == PHY_INTERFACE_MODE_RGMII_ID ||
4788 + mode == PHY_INTERFACE_MODE_RGMII_TXID ||
4789 + mode == PHY_INTERFACE_MODE_RGMII_RXID)
4790 + break;
4791 +
4792 + if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge"))
4793 + priv->ports_config.sgmii_tx_clk_falling_edge = true;
4794 +
4795 + if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge"))
4796 + priv->ports_config.sgmii_rx_clk_falling_edge = true;
4797 +
4798 + if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) {
4799 + priv->ports_config.sgmii_enable_pll = true;
4800 +
4801 + if (priv->switch_id == QCA8K_ID_QCA8327) {
4802 + dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling");
4803 + priv->ports_config.sgmii_enable_pll = false;
4804 + }
4805 +
4806 + if (priv->switch_revision < 2)
4807 + dev_warn(priv->dev, "SGMII PLL should NOT be enabled for qca8337 with revision 2 or more.");
4808 + }
4809 +
4810 + break;
4811 + default:
4812 + continue;
4813 + }
4814 + }
4815 +
4816 + return 0;
4817 +}
4818 +
4819 +static int
4820 +qca8k_setup(struct dsa_switch *ds)
4821 +{
4822 + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
4823 + int cpu_port, ret, i;
4824 + u32 mask;
4825 +
4826 + cpu_port = qca8k_find_cpu_port(ds);
4827 + if (cpu_port < 0) {
4828 + dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6");
4829 + return cpu_port;
4830 + }
4831 +
4832 + /* Parse CPU port config to be later used in phy_link mac_config */
4833 + ret = qca8k_parse_port_config(priv);
4834 + if (ret)
4835 + return ret;
4836 +
4837 + ret = qca8k_setup_mdio_bus(priv);
4838 + if (ret)
4839 + return ret;
4840 +
4841 + ret = qca8k_setup_of_pws_reg(priv);
4842 + if (ret)
4843 + return ret;
4844 +
4845 + ret = qca8k_setup_mac_pwr_sel(priv);
4846 + if (ret)
4847 + return ret;
4848 +
4849 + /* Make sure MAC06 is disabled */
4850 + ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL,
4851 + QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN);
4852 + if (ret) {
4853 + dev_err(priv->dev, "failed disabling MAC06 exchange");
4854 + return ret;
4855 + }
4856 +
4857 + /* Enable CPU Port */
4858 + ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
4859 + QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
4860 + if (ret) {
4861 + dev_err(priv->dev, "failed enabling CPU port");
4862 + return ret;
4863 + }
4864 +
4865 + /* Enable MIB counters */
4866 + ret = qca8k_mib_init(priv);
4867 + if (ret)
4868 + dev_warn(priv->dev, "mib init failed");
4869 +
4870 + /* Initial setup of all ports */
4871 + for (i = 0; i < QCA8K_NUM_PORTS; i++) {
4872 + /* Disable forwarding by default on all ports */
4873 + ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
4874 + QCA8K_PORT_LOOKUP_MEMBER, 0);
4875 + if (ret)
4876 + return ret;
4877 +
4878 + /* Enable QCA header mode on all cpu ports */
4879 + if (dsa_is_cpu_port(ds, i)) {
4880 + ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i),
4881 + FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) |
4882 + FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL));
4883 + if (ret) {
4884 + dev_err(priv->dev, "failed enabling QCA header mode");
4885 + return ret;
4886 + }
4887 + }
4888 +
4889 + /* Disable MAC by default on all user ports */
4890 + if (dsa_is_user_port(ds, i))
4891 + qca8k_port_set_status(priv, i, 0);
4892 + }
4893 +
4894 + /* Forward all unknown frames to CPU port for Linux processing
4895 + * Notice that in multi-cpu config only one port should be set
4896 + * for igmp, unknown, multicast and broadcast packet
4897 + */
4898 + ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
4899 + FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) |
4900 + FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) |
4901 + FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) |
4902 + FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port)));
4903 + if (ret)
4904 + return ret;
4905 +
4906 + /* Setup connection between CPU port & user ports
4907 + * Configure specific switch configuration for ports
4908 + */
4909 + for (i = 0; i < QCA8K_NUM_PORTS; i++) {
4910 + /* CPU port gets connected to all user ports of the switch */
4911 + if (dsa_is_cpu_port(ds, i)) {
4912 + ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
4913 + QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
4914 + if (ret)
4915 + return ret;
4916 + }
4917 +
4918 + /* Individual user ports get connected to CPU port only */
4919 + if (dsa_is_user_port(ds, i)) {
4920 + ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
4921 + QCA8K_PORT_LOOKUP_MEMBER,
4922 + BIT(cpu_port));
4923 + if (ret)
4924 + return ret;
4925 +
4926 + /* Enable ARP Auto-learning by default */
4927 + ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(i),
4928 + QCA8K_PORT_LOOKUP_LEARN);
4929 + if (ret)
4930 + return ret;
4931 +
4932 + /* For port based vlans to work we need to set the
4933 + * default egress vid
4934 + */
4935 + ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i),
4936 + QCA8K_EGREES_VLAN_PORT_MASK(i),
4937 + QCA8K_EGREES_VLAN_PORT(i, QCA8K_PORT_VID_DEF));
4938 + if (ret)
4939 + return ret;
4940 +
4941 + ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i),
4942 + QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) |
4943 + QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF));
4944 + if (ret)
4945 + return ret;
4946 + }
4947 +
4948 + /* The port 5 of the qca8337 have some problem in flood condition. The
4949 + * original legacy driver had some specific buffer and priority settings
4950 + * for the different port suggested by the QCA switch team. Add this
4951 + * missing settings to improve switch stability under load condition.
4952 + * This problem is limited to qca8337 and other qca8k switch are not affected.
4953 + */
4954 + if (priv->switch_id == QCA8K_ID_QCA8337) {
4955 + switch (i) {
4956 + /* The 2 CPU port and port 5 requires some different
4957 + * priority than any other ports.
4958 + */
4959 + case 0:
4960 + case 5:
4961 + case 6:
4962 + mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
4963 + QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
4964 + QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) |
4965 + QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) |
4966 + QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) |
4967 + QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) |
4968 + QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e);
4969 + break;
4970 + default:
4971 + mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
4972 + QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
4973 + QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) |
4974 + QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) |
4975 + QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19);
4976 + }
4977 + qca8k_write(priv, QCA8K_REG_PORT_HOL_CTRL0(i), mask);
4978 +
4979 + mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) |
4980 + QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
4981 + QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
4982 + QCA8K_PORT_HOL_CTRL1_WRED_EN;
4983 + qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i),
4984 + QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK |
4985 + QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
4986 + QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
4987 + QCA8K_PORT_HOL_CTRL1_WRED_EN,
4988 + mask);
4989 + }
4990 + }
4991 +
4992 + /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
4993 + if (priv->switch_id == QCA8K_ID_QCA8327) {
4994 + mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) |
4995 + QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496);
4996 + qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH,
4997 + QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK |
4998 + QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK,
4999 + mask);
5000 + }
5001 +
5002 + /* Setup our port MTUs to match power on defaults */
5003 + ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
5004 + if (ret)
5005 + dev_warn(priv->dev, "failed setting MTU settings");
5006 +
5007 + /* Flush the FDB table */
5008 + qca8k_fdb_flush(priv);
5009 +
5010 + /* We don't have interrupts for link changes, so we need to poll */
5011 + ds->pcs_poll = true;
5012 +
5013 + /* Set min a max ageing value supported */
5014 + ds->ageing_time_min = 7000;
5015 + ds->ageing_time_max = 458745000;
5016 +
5017 + /* Set max number of LAGs supported */
5018 + ds->num_lag_ids = QCA8K_NUM_LAGS;
5019 +
5020 + return 0;
5021 +}
5022 +
5023 +static void
5024 +qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index,
5025 + u32 reg)
5026 +{
5027 + u32 delay, val = 0;
5028 + int ret;
5029 +
5030 + /* Delay can be declared in 3 different way.
5031 + * Mode to rgmii and internal-delay standard binding defined
5032 + * rgmii-id or rgmii-tx/rx phy mode set.
5033 + * The parse logic set a delay different than 0 only when one
5034 + * of the 3 different way is used. In all other case delay is
5035 + * not enabled. With ID or TX/RXID delay is enabled and set
5036 + * to the default and recommended value.
5037 + */
5038 + if (priv->ports_config.rgmii_tx_delay[cpu_port_index]) {
5039 + delay = priv->ports_config.rgmii_tx_delay[cpu_port_index];
5040 +
5041 + val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) |
5042 + QCA8K_PORT_PAD_RGMII_TX_DELAY_EN;
5043 + }
5044 +
5045 + if (priv->ports_config.rgmii_rx_delay[cpu_port_index]) {
5046 + delay = priv->ports_config.rgmii_rx_delay[cpu_port_index];
5047 +
5048 + val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) |
5049 + QCA8K_PORT_PAD_RGMII_RX_DELAY_EN;
5050 + }
5051 +
5052 + /* Set RGMII delay based on the selected values */
5053 + ret = qca8k_rmw(priv, reg,
5054 + QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK |
5055 + QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK |
5056 + QCA8K_PORT_PAD_RGMII_TX_DELAY_EN |
5057 + QCA8K_PORT_PAD_RGMII_RX_DELAY_EN,
5058 + val);
5059 + if (ret)
5060 + dev_err(priv->dev, "Failed to set internal delay for CPU port%d",
5061 + cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6);
5062 +}
5063 +
5064 +static void
5065 +qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
5066 + const struct phylink_link_state *state)
5067 +{
5068 + struct qca8k_priv *priv = ds->priv;
5069 + int cpu_port_index, ret;
5070 + u32 reg, val;
5071 +
5072 + switch (port) {
5073 + case 0: /* 1st CPU port */
5074 + if (state->interface != PHY_INTERFACE_MODE_RGMII &&
5075 + state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
5076 + state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
5077 + state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
5078 + state->interface != PHY_INTERFACE_MODE_SGMII)
5079 + return;
5080 +
5081 + reg = QCA8K_REG_PORT0_PAD_CTRL;
5082 + cpu_port_index = QCA8K_CPU_PORT0;
5083 + break;
5084 + case 1:
5085 + case 2:
5086 + case 3:
5087 + case 4:
5088 + case 5:
5089 + /* Internal PHY, nothing to do */
5090 + return;
5091 + case 6: /* 2nd CPU port / external PHY */
5092 + if (state->interface != PHY_INTERFACE_MODE_RGMII &&
5093 + state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
5094 + state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
5095 + state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
5096 + state->interface != PHY_INTERFACE_MODE_SGMII &&
5097 + state->interface != PHY_INTERFACE_MODE_1000BASEX)
5098 + return;
5099 +
5100 + reg = QCA8K_REG_PORT6_PAD_CTRL;
5101 + cpu_port_index = QCA8K_CPU_PORT6;
5102 + break;
5103 + default:
5104 + dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
5105 + return;
5106 + }
5107 +
5108 + if (port != 6 && phylink_autoneg_inband(mode)) {
5109 + dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
5110 + __func__);
5111 + return;
5112 + }
5113 +
5114 + switch (state->interface) {
5115 + case PHY_INTERFACE_MODE_RGMII:
5116 + case PHY_INTERFACE_MODE_RGMII_ID:
5117 + case PHY_INTERFACE_MODE_RGMII_TXID:
5118 + case PHY_INTERFACE_MODE_RGMII_RXID:
5119 + qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN);
5120 +
5121 + /* Configure rgmii delay */
5122 + qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
5123 +
5124 + /* QCA8337 requires to set rgmii rx delay for all ports.
5125 + * This is enabled through PORT5_PAD_CTRL for all ports,
5126 + * rather than individual port registers.
5127 + */
5128 + if (priv->switch_id == QCA8K_ID_QCA8337)
5129 + qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
5130 + QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
5131 + break;
5132 + case PHY_INTERFACE_MODE_SGMII:
5133 + case PHY_INTERFACE_MODE_1000BASEX:
5134 + /* Enable SGMII on the port */
5135 + qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
5136 +
5137 + /* Enable/disable SerDes auto-negotiation as necessary */
5138 + ret = qca8k_read(priv, QCA8K_REG_PWS, &val);
5139 + if (ret)
5140 + return;
5141 + if (phylink_autoneg_inband(mode))
5142 + val &= ~QCA8K_PWS_SERDES_AEN_DIS;
5143 + else
5144 + val |= QCA8K_PWS_SERDES_AEN_DIS;
5145 + qca8k_write(priv, QCA8K_REG_PWS, val);
5146 +
5147 + /* Configure the SGMII parameters */
5148 + ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val);
5149 + if (ret)
5150 + return;
5151 +
5152 + val |= QCA8K_SGMII_EN_SD;
5153 +
5154 + if (priv->ports_config.sgmii_enable_pll)
5155 + val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
5156 + QCA8K_SGMII_EN_TX;
5157 +
5158 + if (dsa_is_cpu_port(ds, port)) {
5159 + /* CPU port, we're talking to the CPU MAC, be a PHY */
5160 + val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
5161 + val |= QCA8K_SGMII_MODE_CTRL_PHY;
5162 + } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
5163 + val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
5164 + val |= QCA8K_SGMII_MODE_CTRL_MAC;
5165 + } else if (state->interface == PHY_INTERFACE_MODE_1000BASEX) {
5166 + val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
5167 + val |= QCA8K_SGMII_MODE_CTRL_BASEX;
5168 + }
5169 +
5170 + qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
5171 +
5172 + /* From original code is reported port instability as SGMII also
5173 + * require delay set. Apply advised values here or take them from DT.
5174 + */
5175 + if (state->interface == PHY_INTERFACE_MODE_SGMII)
5176 + qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
5177 +
5178 + /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
5179 + * falling edge is set writing in the PORT0 PAD reg
5180 + */
5181 + if (priv->switch_id == QCA8K_ID_QCA8327 ||
5182 + priv->switch_id == QCA8K_ID_QCA8337)
5183 + reg = QCA8K_REG_PORT0_PAD_CTRL;
5184 +
5185 + val = 0;
5186 +
5187 + /* SGMII Clock phase configuration */
5188 + if (priv->ports_config.sgmii_rx_clk_falling_edge)
5189 + val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE;
5190 +
5191 + if (priv->ports_config.sgmii_tx_clk_falling_edge)
5192 + val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE;
5193 +
5194 + if (val)
5195 + ret = qca8k_rmw(priv, reg,
5196 + QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE |
5197 + QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
5198 + val);
5199 +
5200 + break;
5201 + default:
5202 + dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
5203 + phy_modes(state->interface), port);
5204 + return;
5205 + }
5206 +}
5207 +
5208 +static void
5209 +qca8k_phylink_validate(struct dsa_switch *ds, int port,
5210 + unsigned long *supported,
5211 + struct phylink_link_state *state)
5212 +{
5213 + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
5214 +
5215 + switch (port) {
5216 + case 0: /* 1st CPU port */
5217 + if (state->interface != PHY_INTERFACE_MODE_NA &&
5218 + state->interface != PHY_INTERFACE_MODE_RGMII &&
5219 + state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
5220 + state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
5221 + state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
5222 + state->interface != PHY_INTERFACE_MODE_SGMII)
5223 + goto unsupported;
5224 + break;
5225 + case 1:
5226 + case 2:
5227 + case 3:
5228 + case 4:
5229 + case 5:
5230 + /* Internal PHY */
5231 + if (state->interface != PHY_INTERFACE_MODE_NA &&
5232 + state->interface != PHY_INTERFACE_MODE_GMII &&
5233 + state->interface != PHY_INTERFACE_MODE_INTERNAL)
5234 + goto unsupported;
5235 + break;
5236 + case 6: /* 2nd CPU port / external PHY */
5237 + if (state->interface != PHY_INTERFACE_MODE_NA &&
5238 + state->interface != PHY_INTERFACE_MODE_RGMII &&
5239 + state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
5240 + state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
5241 + state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
5242 + state->interface != PHY_INTERFACE_MODE_SGMII &&
5243 + state->interface != PHY_INTERFACE_MODE_1000BASEX)
5244 + goto unsupported;
5245 + break;
5246 + default:
5247 +unsupported:
5248 + linkmode_zero(supported);
5249 + return;
5250 + }
5251 +
5252 + phylink_set_port_modes(mask);
5253 + phylink_set(mask, Autoneg);
5254 +
5255 + phylink_set(mask, 1000baseT_Full);
5256 + phylink_set(mask, 10baseT_Half);
5257 + phylink_set(mask, 10baseT_Full);
5258 + phylink_set(mask, 100baseT_Half);
5259 + phylink_set(mask, 100baseT_Full);
5260 +
5261 + if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
5262 + phylink_set(mask, 1000baseX_Full);
5263 +
5264 + phylink_set(mask, Pause);
5265 + phylink_set(mask, Asym_Pause);
5266 +
5267 + linkmode_and(supported, supported, mask);
5268 + linkmode_and(state->advertising, state->advertising, mask);
5269 +}
5270 +
5271 +static int
5272 +qca8k_phylink_mac_link_state(struct dsa_switch *ds, int port,
5273 + struct phylink_link_state *state)
5274 +{
5275 + struct qca8k_priv *priv = ds->priv;
5276 + u32 reg;
5277 + int ret;
5278 +
5279 + ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), &reg);
5280 + if (ret < 0)
5281 + return ret;
5282 +
5283 + state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
5284 + state->an_complete = state->link;
5285 + state->an_enabled = !!(reg & QCA8K_PORT_STATUS_LINK_AUTO);
5286 + state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
5287 + DUPLEX_HALF;
5288 +
5289 + switch (reg & QCA8K_PORT_STATUS_SPEED) {
5290 + case QCA8K_PORT_STATUS_SPEED_10:
5291 + state->speed = SPEED_10;
5292 + break;
5293 + case QCA8K_PORT_STATUS_SPEED_100:
5294 + state->speed = SPEED_100;
5295 + break;
5296 + case QCA8K_PORT_STATUS_SPEED_1000:
5297 + state->speed = SPEED_1000;
5298 + break;
5299 + default:
5300 + state->speed = SPEED_UNKNOWN;
5301 + break;
5302 + }
5303 +
5304 + state->pause = MLO_PAUSE_NONE;
5305 + if (reg & QCA8K_PORT_STATUS_RXFLOW)
5306 + state->pause |= MLO_PAUSE_RX;
5307 + if (reg & QCA8K_PORT_STATUS_TXFLOW)
5308 + state->pause |= MLO_PAUSE_TX;
5309 +
5310 + return 1;
5311 +}
5312 +
5313 +static void
5314 +qca8k_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
5315 + phy_interface_t interface)
5316 +{
5317 + struct qca8k_priv *priv = ds->priv;
5318 +
5319 + qca8k_port_set_status(priv, port, 0);
5320 +}
5321 +
5322 +static void
5323 +qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
5324 + phy_interface_t interface, struct phy_device *phydev,
5325 + int speed, int duplex, bool tx_pause, bool rx_pause)
5326 +{
5327 + struct qca8k_priv *priv = ds->priv;
5328 + u32 reg;
5329 +
5330 + if (phylink_autoneg_inband(mode)) {
5331 + reg = QCA8K_PORT_STATUS_LINK_AUTO;
5332 + } else {
5333 + switch (speed) {
5334 + case SPEED_10:
5335 + reg = QCA8K_PORT_STATUS_SPEED_10;
5336 + break;
5337 + case SPEED_100:
5338 + reg = QCA8K_PORT_STATUS_SPEED_100;
5339 + break;
5340 + case SPEED_1000:
5341 + reg = QCA8K_PORT_STATUS_SPEED_1000;
5342 + break;
5343 + default:
5344 + reg = QCA8K_PORT_STATUS_LINK_AUTO;
5345 + break;
5346 + }
5347 +
5348 + if (duplex == DUPLEX_FULL)
5349 + reg |= QCA8K_PORT_STATUS_DUPLEX;
5350 +
5351 + if (rx_pause || dsa_is_cpu_port(ds, port))
5352 + reg |= QCA8K_PORT_STATUS_RXFLOW;
5353 +
5354 + if (tx_pause || dsa_is_cpu_port(ds, port))
5355 + reg |= QCA8K_PORT_STATUS_TXFLOW;
5356 + }
5357 +
5358 + reg |= QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
5359 +
5360 + qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg);
5361 +}
5362 +
5363 +static void
5364 +qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
5365 +{
5366 + struct qca8k_priv *priv = ds->priv;
5367 + int i;
5368 +
5369 + if (stringset != ETH_SS_STATS)
5370 + return;
5371 +
5372 + for (i = 0; i < priv->info->mib_count; i++)
5373 + strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name,
5374 + ETH_GSTRING_LEN);
5375 +}
5376 +
5377 +static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *skb)
5378 +{
5379 + struct qca8k_mib_eth_data *mib_eth_data;
5380 + struct qca8k_priv *priv = ds->priv;
5381 + const struct qca8k_mib_desc *mib;
5382 + struct mib_ethhdr *mib_ethhdr;
5383 + int i, mib_len, offset = 0;
5384 + u64 *data;
5385 + u8 port;
5386 +
5387 + mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb);
5388 + mib_eth_data = &priv->mib_eth_data;
5389 +
5390 + /* The switch autocast every port. Ignore other packet and
5391 + * parse only the requested one.
5392 + */
5393 + port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, ntohs(mib_ethhdr->hdr));
5394 + if (port != mib_eth_data->req_port)
5395 + goto exit;
5396 +
5397 + data = mib_eth_data->data;
5398 +
5399 + for (i = 0; i < priv->info->mib_count; i++) {
5400 + mib = &ar8327_mib[i];
5401 +
5402 + /* First 3 mib are present in the skb head */
5403 + if (i < 3) {
5404 + data[i] = mib_ethhdr->data[i];
5405 + continue;
5406 + }
5407 +
5408 + mib_len = sizeof(uint32_t);
5409 +
5410 + /* Some mib are 64 bit wide */
5411 + if (mib->size == 2)
5412 + mib_len = sizeof(uint64_t);
5413 +
5414 + /* Copy the mib value from packet to the */
5415 + memcpy(data + i, skb->data + offset, mib_len);
5416 +
5417 + /* Set the offset for the next mib */
5418 + offset += mib_len;
5419 + }
5420 +
5421 +exit:
5422 + /* Complete on receiving all the mib packet */
5423 + if (refcount_dec_and_test(&mib_eth_data->port_parsed))
5424 + complete(&mib_eth_data->rw_done);
5425 +}
5426 +
5427 +static int
5428 +qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data)
5429 +{
5430 + struct dsa_port *dp = dsa_to_port(ds, port);
5431 + struct qca8k_mib_eth_data *mib_eth_data;
5432 + struct qca8k_priv *priv = ds->priv;
5433 + int ret;
5434 +
5435 + mib_eth_data = &priv->mib_eth_data;
5436 +
5437 + mutex_lock(&mib_eth_data->mutex);
5438 +
5439 + reinit_completion(&mib_eth_data->rw_done);
5440 +
5441 + mib_eth_data->req_port = dp->index;
5442 + mib_eth_data->data = data;
5443 + refcount_set(&mib_eth_data->port_parsed, QCA8K_NUM_PORTS);
5444 +
5445 + mutex_lock(&priv->reg_mutex);
5446 +
5447 + /* Send mib autocast request */
5448 + ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
5449 + QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
5450 + FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_CAST) |
5451 + QCA8K_MIB_BUSY);
5452 +
5453 + mutex_unlock(&priv->reg_mutex);
5454 +
5455 + if (ret)
5456 + goto exit;
5457 +
5458 + ret = wait_for_completion_timeout(&mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
5459 +
5460 +exit:
5461 + mutex_unlock(&mib_eth_data->mutex);
5462 +
5463 + return ret;
5464 +}
5465 +
5466 +static void
5467 +qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
5468 + uint64_t *data)
5469 +{
5470 + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
5471 + const struct qca8k_mib_desc *mib;
5472 + u32 reg, i, val;
5473 + u32 hi = 0;
5474 + int ret;
5475 +
5476 + if (priv->mgmt_master && priv->info->ops->autocast_mib &&
5477 + priv->info->ops->autocast_mib(ds, port, data) > 0)
5478 + return;
5479 +
5480 + for (i = 0; i < priv->info->mib_count; i++) {
5481 + mib = &ar8327_mib[i];
5482 + reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
5483 +
5484 + ret = qca8k_read(priv, reg, &val);
5485 + if (ret < 0)
5486 + continue;
5487 +
5488 + if (mib->size == 2) {
5489 + ret = qca8k_read(priv, reg + 4, &hi);
5490 + if (ret < 0)
5491 + continue;
5492 + }
5493 +
5494 + data[i] = val;
5495 + if (mib->size == 2)
5496 + data[i] |= (u64)hi << 32;
5497 + }
5498 +}
5499 +
5500 +static int
5501 +qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
5502 +{
5503 + struct qca8k_priv *priv = ds->priv;
5504 +
5505 + if (sset != ETH_SS_STATS)
5506 + return 0;
5507 +
5508 + return priv->info->mib_count;
5509 +}
5510 +
5511 +static int
5512 +qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee)
5513 +{
5514 + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
5515 + u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
5516 + u32 reg;
5517 + int ret;
5518 +
5519 + mutex_lock(&priv->reg_mutex);
5520 + ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, &reg);
5521 + if (ret < 0)
5522 + goto exit;
5523 +
5524 + if (eee->eee_enabled)
5525 + reg |= lpi_en;
5526 + else
5527 + reg &= ~lpi_en;
5528 + ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg);
5529 +
5530 +exit:
5531 + mutex_unlock(&priv->reg_mutex);
5532 + return ret;
5533 +}
5534 +
5535 +static int
5536 +qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
5537 +{
5538 + /* Nothing to do on the port's MAC */
5539 + return 0;
5540 +}
5541 +
5542 +static void
5543 +qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
5544 +{
5545 + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
5546 + u32 stp_state;
5547 +
5548 + switch (state) {
5549 + case BR_STATE_DISABLED:
5550 + stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED;
5551 + break;
5552 + case BR_STATE_BLOCKING:
5553 + stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING;
5554 + break;
5555 + case BR_STATE_LISTENING:
5556 + stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING;
5557 + break;
5558 + case BR_STATE_LEARNING:
5559 + stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING;
5560 + break;
5561 + case BR_STATE_FORWARDING:
5562 + default:
5563 + stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD;
5564 + break;
5565 + }
5566 +
5567 + qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
5568 + QCA8K_PORT_LOOKUP_STATE_MASK, stp_state);
5569 +}
5570 +
5571 +static int
5572 +qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br)
5573 +{
5574 + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
5575 + int port_mask, cpu_port;
5576 + int i, ret;
5577 +
5578 + cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
5579 + port_mask = BIT(cpu_port);
5580 +
5581 + for (i = 0; i < QCA8K_NUM_PORTS; i++) {
5582 + if (dsa_is_cpu_port(ds, i))
5583 + continue;
5584 + if (dsa_to_port(ds, i)->bridge_dev != br)
5585 + continue;
5586 + /* Add this port to the portvlan mask of the other ports
5587 + * in the bridge
5588 + */
5589 + ret = regmap_set_bits(priv->regmap,
5590 + QCA8K_PORT_LOOKUP_CTRL(i),
5591 + BIT(port));
5592 + if (ret)
5593 + return ret;
5594 + if (i != port)
5595 + port_mask |= BIT(i);
5596 + }
5597 +
5598 + /* Add all other ports to this ports portvlan mask */
5599 + ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
5600 + QCA8K_PORT_LOOKUP_MEMBER, port_mask);
5601 +
5602 + return ret;
5603 +}
5604 +
5605 +static void
5606 +qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br)
5607 +{
5608 + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
5609 + int cpu_port, i;
5610 +
5611 + cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
5612 +
5613 + for (i = 0; i < QCA8K_NUM_PORTS; i++) {
5614 + if (dsa_is_cpu_port(ds, i))
5615 + continue;
5616 + if (dsa_to_port(ds, i)->bridge_dev != br)
5617 + continue;
5618 + /* Remove this port to the portvlan mask of the other ports
5619 + * in the bridge
5620 + */
5621 + regmap_clear_bits(priv->regmap,
5622 + QCA8K_PORT_LOOKUP_CTRL(i),
5623 + BIT(port));
5624 + }
5625 +
5626 + /* Set the cpu port to be the only one in the portvlan mask of
5627 + * this port
5628 + */
5629 + qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
5630 + QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
5631 +}
5632 +
5633 +static void
5634 +qca8k_port_fast_age(struct dsa_switch *ds, int port)
5635 +{
5636 + struct qca8k_priv *priv = ds->priv;
5637 +
5638 + mutex_lock(&priv->reg_mutex);
5639 + qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port);
5640 + mutex_unlock(&priv->reg_mutex);
5641 +}
5642 +
5643 +static int
5644 +qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
5645 +{
5646 + struct qca8k_priv *priv = ds->priv;
5647 + unsigned int secs = msecs / 1000;
5648 + u32 val;
5649 +
5650 + /* AGE_TIME reg is set in 7s step */
5651 + val = secs / 7;
5652 +
5653 + /* Handle case with 0 as val to NOT disable
5654 + * learning
5655 + */
5656 + if (!val)
5657 + val = 1;
5658 +
5659 + return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL, QCA8K_ATU_AGE_TIME_MASK,
5660 + QCA8K_ATU_AGE_TIME(val));
5661 +}
5662 +
5663 +static int
5664 +qca8k_port_enable(struct dsa_switch *ds, int port,
5665 + struct phy_device *phy)
5666 +{
5667 + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
5668 +
5669 + qca8k_port_set_status(priv, port, 1);
5670 + priv->port_enabled_map |= BIT(port);
5671 +
5672 + if (dsa_is_user_port(ds, port))
5673 + phy_support_asym_pause(phy);
5674 +
5675 + return 0;
5676 +}
5677 +
5678 +static void
5679 +qca8k_port_disable(struct dsa_switch *ds, int port)
5680 +{
5681 + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
5682 +
5683 + qca8k_port_set_status(priv, port, 0);
5684 + priv->port_enabled_map &= ~BIT(port);
5685 +}
5686 +
5687 +static int
5688 +qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
5689 +{
5690 + struct qca8k_priv *priv = ds->priv;
5691 + int ret;
5692 +
5693 + /* We have only have a general MTU setting.
5694 + * DSA always set the CPU port's MTU to the largest MTU of the slave
5695 + * ports.
5696 + * Setting MTU just for the CPU port is sufficient to correctly set a
5697 + * value for every port.
5698 + */
5699 + if (!dsa_is_cpu_port(ds, port))
5700 + return 0;
5701 +
5702 + /* To change the MAX_FRAME_SIZE the cpu ports must be off or
5703 + * the switch panics.
5704 + * Turn off both cpu ports before applying the new value to prevent
5705 + * this.
5706 + */
5707 + if (priv->port_enabled_map & BIT(0))
5708 + qca8k_port_set_status(priv, 0, 0);
5709 +
5710 + if (priv->port_enabled_map & BIT(6))
5711 + qca8k_port_set_status(priv, 6, 0);
5712 +
5713 + /* Include L2 header / FCS length */
5714 + ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu + ETH_HLEN + ETH_FCS_LEN);
5715 +
5716 + if (priv->port_enabled_map & BIT(0))
5717 + qca8k_port_set_status(priv, 0, 1);
5718 +
5719 + if (priv->port_enabled_map & BIT(6))
5720 + qca8k_port_set_status(priv, 6, 1);
5721 +
5722 + return ret;
5723 +}
5724 +
5725 +static int
5726 +qca8k_port_max_mtu(struct dsa_switch *ds, int port)
5727 +{
5728 + return QCA8K_MAX_MTU;
5729 +}
5730 +
5731 +static int
5732 +qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
5733 + u16 port_mask, u16 vid)
5734 +{
5735 + /* Set the vid to the port vlan id if no vid is set */
5736 + if (!vid)
5737 + vid = QCA8K_PORT_VID_DEF;
5738 +
5739 + return qca8k_fdb_add(priv, addr, port_mask, vid,
5740 + QCA8K_ATU_STATUS_STATIC);
5741 +}
5742 +
5743 +static int
5744 +qca8k_port_fdb_add(struct dsa_switch *ds, int port,
5745 + const unsigned char *addr, u16 vid)
5746 +{
5747 + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
5748 + u16 port_mask = BIT(port);
5749 +
5750 + return qca8k_port_fdb_insert(priv, addr, port_mask, vid);
5751 +}
5752 +
5753 +static int
5754 +qca8k_port_fdb_del(struct dsa_switch *ds, int port,
5755 + const unsigned char *addr, u16 vid)
5756 +{
5757 + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
5758 + u16 port_mask = BIT(port);
5759 +
5760 + if (!vid)
5761 + vid = QCA8K_PORT_VID_DEF;
5762 +
5763 + return qca8k_fdb_del(priv, addr, port_mask, vid);
5764 +}
5765 +
5766 +static int
5767 +qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
5768 + dsa_fdb_dump_cb_t *cb, void *data)
5769 +{
5770 + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
5771 + struct qca8k_fdb _fdb = { 0 };
5772 + int cnt = QCA8K_NUM_FDB_RECORDS;
5773 + bool is_static;
5774 + int ret = 0;
5775 +
5776 + mutex_lock(&priv->reg_mutex);
5777 + while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) {
5778 + if (!_fdb.aging)
5779 + break;
5780 + is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC);
5781 + ret = cb(_fdb.mac, _fdb.vid, is_static, data);
5782 + if (ret)
5783 + break;
5784 + }
5785 + mutex_unlock(&priv->reg_mutex);
5786 +
5787 + return 0;
5788 +}
5789 +
5790 +static int
5791 +qca8k_port_mdb_add(struct dsa_switch *ds, int port,
5792 + const struct switchdev_obj_port_mdb *mdb)
5793 +{
5794 + struct qca8k_priv *priv = ds->priv;
5795 + const u8 *addr = mdb->addr;
5796 + u16 vid = mdb->vid;
5797 +
5798 + return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid);
5799 +}
5800 +
5801 +static int
5802 +qca8k_port_mdb_del(struct dsa_switch *ds, int port,
5803 + const struct switchdev_obj_port_mdb *mdb)
5804 +{
5805 + struct qca8k_priv *priv = ds->priv;
5806 + const u8 *addr = mdb->addr;
5807 + u16 vid = mdb->vid;
5808 +
5809 + return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid);
5810 +}
5811 +
5812 +static int
5813 +qca8k_port_mirror_add(struct dsa_switch *ds, int port,
5814 + struct dsa_mall_mirror_tc_entry *mirror,
5815 + bool ingress)
5816 +{
5817 + struct qca8k_priv *priv = ds->priv;
5818 + int monitor_port, ret;
5819 + u32 reg, val;
5820 +
5821 + /* Check for existent entry */
5822 + if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
5823 + return -EEXIST;
5824 +
5825 + ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val);
5826 + if (ret)
5827 + return ret;
5828 +
5829 + /* QCA83xx can have only one port set to mirror mode.
5830 + * Check that the correct port is requested and return error otherwise.
5831 + * When no mirror port is set, the values is set to 0xF
5832 + */
5833 + monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
5834 + if (monitor_port != 0xF && monitor_port != mirror->to_local_port)
5835 + return -EEXIST;
5836 +
5837 + /* Set the monitor port */
5838 + val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM,
5839 + mirror->to_local_port);
5840 + ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
5841 + QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
5842 + if (ret)
5843 + return ret;
5844 +
5845 + if (ingress) {
5846 + reg = QCA8K_PORT_LOOKUP_CTRL(port);
5847 + val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
5848 + } else {
5849 + reg = QCA8K_REG_PORT_HOL_CTRL1(port);
5850 + val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
5851 + }
5852 +
5853 + ret = regmap_update_bits(priv->regmap, reg, val, val);
5854 + if (ret)
5855 + return ret;
5856 +
5857 + /* Track mirror port for tx and rx to decide when the
5858 + * mirror port has to be disabled.
5859 + */
5860 + if (ingress)
5861 + priv->mirror_rx |= BIT(port);
5862 + else
5863 + priv->mirror_tx |= BIT(port);
5864 +
5865 + return 0;
5866 +}
5867 +
5868 +static void
5869 +qca8k_port_mirror_del(struct dsa_switch *ds, int port,
5870 + struct dsa_mall_mirror_tc_entry *mirror)
5871 +{
5872 + struct qca8k_priv *priv = ds->priv;
5873 + u32 reg, val;
5874 + int ret;
5875 +
5876 + if (mirror->ingress) {
5877 + reg = QCA8K_PORT_LOOKUP_CTRL(port);
5878 + val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
5879 + } else {
5880 + reg = QCA8K_REG_PORT_HOL_CTRL1(port);
5881 + val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
5882 + }
5883 +
5884 + ret = regmap_clear_bits(priv->regmap, reg, val);
5885 + if (ret)
5886 + goto err;
5887 +
5888 + if (mirror->ingress)
5889 + priv->mirror_rx &= ~BIT(port);
5890 + else
5891 + priv->mirror_tx &= ~BIT(port);
5892 +
5893 + /* No port set to send packet to mirror port. Disable mirror port */
5894 + if (!priv->mirror_rx && !priv->mirror_tx) {
5895 + val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF);
5896 + ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
5897 + QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
5898 + if (ret)
5899 + goto err;
5900 + }
5901 +err:
5902 + dev_err(priv->dev, "Failed to del mirror port from %d", port);
5903 +}
5904 +
5905 +static int
5906 +qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
5907 + struct netlink_ext_ack *extack)
5908 +{
5909 + struct qca8k_priv *priv = ds->priv;
5910 + int ret;
5911 +
5912 + if (vlan_filtering) {
5913 + ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
5914 + QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
5915 + QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE);
5916 + } else {
5917 + ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
5918 + QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
5919 + QCA8K_PORT_LOOKUP_VLAN_MODE_NONE);
5920 + }
5921 +
5922 + return ret;
5923 +}
5924 +
5925 +static int
5926 +qca8k_port_vlan_add(struct dsa_switch *ds, int port,
5927 + const struct switchdev_obj_port_vlan *vlan,
5928 + struct netlink_ext_ack *extack)
5929 +{
5930 + bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
5931 + bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
5932 + struct qca8k_priv *priv = ds->priv;
5933 + int ret;
5934 +
5935 + ret = qca8k_vlan_add(priv, port, vlan->vid, untagged);
5936 + if (ret) {
5937 + dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret);
5938 + return ret;
5939 + }
5940 +
5941 + if (pvid) {
5942 + ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
5943 + QCA8K_EGREES_VLAN_PORT_MASK(port),
5944 + QCA8K_EGREES_VLAN_PORT(port, vlan->vid));
5945 + if (ret)
5946 + return ret;
5947 +
5948 + ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
5949 + QCA8K_PORT_VLAN_CVID(vlan->vid) |
5950 + QCA8K_PORT_VLAN_SVID(vlan->vid));
5951 + }
5952 +
5953 + return ret;
5954 +}
5955 +
5956 +static int
5957 +qca8k_port_vlan_del(struct dsa_switch *ds, int port,
5958 + const struct switchdev_obj_port_vlan *vlan)
5959 +{
5960 + struct qca8k_priv *priv = ds->priv;
5961 + int ret;
5962 +
5963 + ret = qca8k_vlan_del(priv, port, vlan->vid);
5964 + if (ret)
5965 + dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret);
5966 +
5967 + return ret;
5968 +}
5969 +
5970 +static u32 qca8k_get_phy_flags(struct dsa_switch *ds, int port)
5971 +{
5972 + struct qca8k_priv *priv = ds->priv;
5973 +
5974 + /* Communicate to the phy internal driver the switch revision.
5975 + * Based on the switch revision different values needs to be
5976 + * set to the dbg and mmd reg on the phy.
5977 + * The first 2 bit are used to communicate the switch revision
5978 + * to the phy driver.
5979 + */
5980 + if (port > 0 && port < 6)
5981 + return priv->switch_revision;
5982 +
5983 + return 0;
5984 +}
5985 +
5986 +static enum dsa_tag_protocol
5987 +qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
5988 + enum dsa_tag_protocol mp)
5989 +{
5990 + return DSA_TAG_PROTO_QCA;
5991 +}
5992 +
5993 +static bool
5994 +qca8k_lag_can_offload(struct dsa_switch *ds,
5995 + struct net_device *lag,
5996 + struct netdev_lag_upper_info *info)
5997 +{
5998 + struct dsa_port *dp;
5999 + int id, members = 0;
6000 +
6001 + id = dsa_lag_id(ds->dst, lag);
6002 + if (id < 0 || id >= ds->num_lag_ids)
6003 + return false;
6004 +
6005 + dsa_lag_foreach_port(dp, ds->dst, lag)
6006 + /* Includes the port joining the LAG */
6007 + members++;
6008 +
6009 + if (members > QCA8K_NUM_PORTS_FOR_LAG)
6010 + return false;
6011 +
6012 + if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
6013 + return false;
6014 +
6015 + if (info->hash_type != NETDEV_LAG_HASH_L2 &&
6016 + info->hash_type != NETDEV_LAG_HASH_L23)
6017 + return false;
6018 +
6019 + return true;
6020 +}
6021 +
6022 +static int
6023 +qca8k_lag_setup_hash(struct dsa_switch *ds,
6024 + struct net_device *lag,
6025 + struct netdev_lag_upper_info *info)
6026 +{
6027 + struct qca8k_priv *priv = ds->priv;
6028 + bool unique_lag = true;
6029 + u32 hash = 0;
6030 + int i, id;
6031 +
6032 + id = dsa_lag_id(ds->dst, lag);
6033 +
6034 + switch (info->hash_type) {
6035 + case NETDEV_LAG_HASH_L23:
6036 + hash |= QCA8K_TRUNK_HASH_SIP_EN;
6037 + hash |= QCA8K_TRUNK_HASH_DIP_EN;
6038 + fallthrough;
6039 + case NETDEV_LAG_HASH_L2:
6040 + hash |= QCA8K_TRUNK_HASH_SA_EN;
6041 + hash |= QCA8K_TRUNK_HASH_DA_EN;
6042 + break;
6043 + default: /* We should NEVER reach this */
6044 + return -EOPNOTSUPP;
6045 + }
6046 +
6047 + /* Check if we are the unique configured LAG */
6048 + dsa_lags_foreach_id(i, ds->dst)
6049 + if (i != id && dsa_lag_dev(ds->dst, i)) {
6050 + unique_lag = false;
6051 + break;
6052 + }
6053 +
6054 + /* Hash Mode is global. Make sure the same Hash Mode
6055 + * is set to all the 4 possible lag.
6056 + * If we are the unique LAG we can set whatever hash
6057 + * mode we want.
6058 + * To change hash mode it's needed to remove all LAG
6059 + * and change the mode with the latest.
6060 + */
6061 + if (unique_lag) {
6062 + priv->lag_hash_mode = hash;
6063 + } else if (priv->lag_hash_mode != hash) {
6064 + netdev_err(lag, "Error: Mismateched Hash Mode across different lag is not supported\n");
6065 + return -EOPNOTSUPP;
6066 + }
6067 +
6068 + return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL,
6069 + QCA8K_TRUNK_HASH_MASK, hash);
6070 +}
6071 +
6072 +static int
6073 +qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
6074 + struct net_device *lag, bool delete)
6075 +{
6076 + struct qca8k_priv *priv = ds->priv;
6077 + int ret, id, i;
6078 + u32 val;
6079 +
6080 + id = dsa_lag_id(ds->dst, lag);
6081 +
6082 + /* Read current port member */
6083 + ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val);
6084 + if (ret)
6085 + return ret;
6086 +
6087 + /* Shift val to the correct trunk */
6088 + val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id);
6089 + val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK;
6090 + if (delete)
6091 + val &= ~BIT(port);
6092 + else
6093 + val |= BIT(port);
6094 +
6095 + /* Update port member. With empty portmap disable trunk */
6096 + ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0,
6097 + QCA8K_REG_GOL_TRUNK_MEMBER(id) |
6098 + QCA8K_REG_GOL_TRUNK_EN(id),
6099 + !val << QCA8K_REG_GOL_TRUNK_SHIFT(id) |
6100 + val << QCA8K_REG_GOL_TRUNK_SHIFT(id));
6101 +
6102 + /* Search empty member if adding or port on deleting */
6103 + for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) {
6104 + ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val);
6105 + if (ret)
6106 + return ret;
6107 +
6108 + val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i);
6109 + val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK;
6110 +
6111 + if (delete) {
6112 + /* If port flagged to be disabled assume this member is
6113 + * empty
6114 + */
6115 + if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
6116 + continue;
6117 +
6118 + val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK;
6119 + if (val != port)
6120 + continue;
6121 + } else {
6122 + /* If port flagged to be enabled assume this member is
6123 + * already set
6124 + */
6125 + if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
6126 + continue;
6127 + }
6128 +
6129 + /* We have found the member to add/remove */
6130 + break;
6131 + }
6132 +
6133 + /* Set port in the correct port mask or disable port if in delete mode */
6134 + return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id),
6135 + QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) |
6136 + QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i),
6137 + !delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) |
6138 + port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i));
6139 +}
6140 +
6141 +static int
6142 +qca8k_port_lag_join(struct dsa_switch *ds, int port,
6143 + struct net_device *lag,
6144 + struct netdev_lag_upper_info *info)
6145 +{
6146 + int ret;
6147 +
6148 + if (!qca8k_lag_can_offload(ds, lag, info))
6149 + return -EOPNOTSUPP;
6150 +
6151 + ret = qca8k_lag_setup_hash(ds, lag, info);
6152 + if (ret)
6153 + return ret;
6154 +
6155 + return qca8k_lag_refresh_portmap(ds, port, lag, false);
6156 +}
6157 +
6158 +static int
6159 +qca8k_port_lag_leave(struct dsa_switch *ds, int port,
6160 + struct net_device *lag)
6161 +{
6162 + return qca8k_lag_refresh_portmap(ds, port, lag, true);
6163 +}
6164 +
6165 +static void
6166 +qca8k_master_change(struct dsa_switch *ds, const struct net_device *master,
6167 + bool operational)
6168 +{
6169 + struct dsa_port *dp = master->dsa_ptr;
6170 + struct qca8k_priv *priv = ds->priv;
6171 +
6172 + /* Ethernet MIB/MDIO is only supported for CPU port 0 */
6173 + if (dp->index != 0)
6174 + return;
6175 +
6176 + mutex_lock(&priv->mgmt_eth_data.mutex);
6177 + mutex_lock(&priv->mib_eth_data.mutex);
6178 +
6179 + priv->mgmt_master = operational ? (struct net_device *)master : NULL;
6180 +
6181 + mutex_unlock(&priv->mib_eth_data.mutex);
6182 + mutex_unlock(&priv->mgmt_eth_data.mutex);
6183 +}
6184 +
6185 +static int qca8k_connect_tag_protocol(struct dsa_switch *ds,
6186 + enum dsa_tag_protocol proto)
6187 +{
6188 + struct qca_tagger_data *tagger_data;
6189 +
6190 + switch (proto) {
6191 + case DSA_TAG_PROTO_QCA:
6192 + tagger_data = ds->tagger_data;
6193 +
6194 + tagger_data->rw_reg_ack_handler = qca8k_rw_reg_ack_handler;
6195 + tagger_data->mib_autocast_handler = qca8k_mib_autocast_handler;
6196 +
6197 + break;
6198 + default:
6199 + return -EOPNOTSUPP;
6200 + }
6201 +
6202 + return 0;
6203 +}
6204 +
6205 +static const struct dsa_switch_ops qca8k_switch_ops = {
6206 + .get_tag_protocol = qca8k_get_tag_protocol,
6207 + .setup = qca8k_setup,
6208 + .get_strings = qca8k_get_strings,
6209 + .get_ethtool_stats = qca8k_get_ethtool_stats,
6210 + .get_sset_count = qca8k_get_sset_count,
6211 + .set_ageing_time = qca8k_set_ageing_time,
6212 + .get_mac_eee = qca8k_get_mac_eee,
6213 + .set_mac_eee = qca8k_set_mac_eee,
6214 + .port_enable = qca8k_port_enable,
6215 + .port_disable = qca8k_port_disable,
6216 + .port_change_mtu = qca8k_port_change_mtu,
6217 + .port_max_mtu = qca8k_port_max_mtu,
6218 + .port_stp_state_set = qca8k_port_stp_state_set,
6219 + .port_bridge_join = qca8k_port_bridge_join,
6220 + .port_bridge_leave = qca8k_port_bridge_leave,
6221 + .port_fast_age = qca8k_port_fast_age,
6222 + .port_fdb_add = qca8k_port_fdb_add,
6223 + .port_fdb_del = qca8k_port_fdb_del,
6224 + .port_fdb_dump = qca8k_port_fdb_dump,
6225 + .port_mdb_add = qca8k_port_mdb_add,
6226 + .port_mdb_del = qca8k_port_mdb_del,
6227 + .port_mirror_add = qca8k_port_mirror_add,
6228 + .port_mirror_del = qca8k_port_mirror_del,
6229 + .port_vlan_filtering = qca8k_port_vlan_filtering,
6230 + .port_vlan_add = qca8k_port_vlan_add,
6231 + .port_vlan_del = qca8k_port_vlan_del,
6232 + .phylink_validate = qca8k_phylink_validate,
6233 + .phylink_mac_link_state = qca8k_phylink_mac_link_state,
6234 + .phylink_mac_config = qca8k_phylink_mac_config,
6235 + .phylink_mac_link_down = qca8k_phylink_mac_link_down,
6236 + .phylink_mac_link_up = qca8k_phylink_mac_link_up,
6237 + .get_phy_flags = qca8k_get_phy_flags,
6238 + .port_lag_join = qca8k_port_lag_join,
6239 + .port_lag_leave = qca8k_port_lag_leave,
6240 + .master_state_change = qca8k_master_change,
6241 + .connect_tag_protocol = qca8k_connect_tag_protocol,
6242 +};
6243 +
6244 +static int qca8k_read_switch_id(struct qca8k_priv *priv)
6245 +{
6246 + u32 val;
6247 + u8 id;
6248 + int ret;
6249 +
6250 + if (!priv->info)
6251 + return -ENODEV;
6252 +
6253 + ret = qca8k_read(priv, QCA8K_REG_MASK_CTRL, &val);
6254 + if (ret < 0)
6255 + return -ENODEV;
6256 +
6257 + id = QCA8K_MASK_CTRL_DEVICE_ID(val);
6258 + if (id != priv->info->id) {
6259 + dev_err(priv->dev,
6260 + "Switch id detected %x but expected %x",
6261 + id, priv->info->id);
6262 + return -ENODEV;
6263 + }
6264 +
6265 + priv->switch_id = id;
6266 +
6267 + /* Save revision to communicate to the internal PHY driver */
6268 + priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val);
6269 +
6270 + return 0;
6271 +}
6272 +
6273 +static int
6274 +qca8k_sw_probe(struct mdio_device *mdiodev)
6275 +{
6276 + struct qca8k_priv *priv;
6277 + int ret;
6278 +
6279 + /* allocate the private data struct so that we can probe the switches
6280 + * ID register
6281 + */
6282 + priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
6283 + if (!priv)
6284 + return -ENOMEM;
6285 +
6286 + priv->info = of_device_get_match_data(priv->dev);
6287 + priv->bus = mdiodev->bus;
6288 + priv->dev = &mdiodev->dev;
6289 +
6290 + priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset",
6291 + GPIOD_ASIS);
6292 + if (IS_ERR(priv->reset_gpio))
6293 + return PTR_ERR(priv->reset_gpio);
6294 +
6295 + if (priv->reset_gpio) {
6296 + gpiod_set_value_cansleep(priv->reset_gpio, 1);
6297 + /* The active low duration must be greater than 10 ms
6298 + * and checkpatch.pl wants 20 ms.
6299 + */
6300 + msleep(20);
6301 + gpiod_set_value_cansleep(priv->reset_gpio, 0);
6302 + }
6303 +
6304 + /* Start by setting up the register mapping */
6305 + priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, priv,
6306 + &qca8k_regmap_config);
6307 + if (IS_ERR(priv->regmap)) {
6308 + dev_err(priv->dev, "regmap initialization failed");
6309 + return PTR_ERR(priv->regmap);
6310 + }
6311 +
6312 + priv->mdio_cache.page = 0xffff;
6313 + priv->mdio_cache.lo = 0xffff;
6314 + priv->mdio_cache.hi = 0xffff;
6315 +
6316 + /* Check the detected switch id */
6317 + ret = qca8k_read_switch_id(priv);
6318 + if (ret)
6319 + return ret;
6320 +
6321 + priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
6322 + if (!priv->ds)
6323 + return -ENOMEM;
6324 +
6325 + mutex_init(&priv->mgmt_eth_data.mutex);
6326 + init_completion(&priv->mgmt_eth_data.rw_done);
6327 +
6328 + mutex_init(&priv->mib_eth_data.mutex);
6329 + init_completion(&priv->mib_eth_data.rw_done);
6330 +
6331 + priv->ds->dev = &mdiodev->dev;
6332 + priv->ds->num_ports = QCA8K_NUM_PORTS;
6333 + priv->ds->priv = priv;
6334 + priv->ds->ops = &qca8k_switch_ops;
6335 + mutex_init(&priv->reg_mutex);
6336 + dev_set_drvdata(&mdiodev->dev, priv);
6337 +
6338 + return dsa_register_switch(priv->ds);
6339 +}
6340 +
6341 +static void
6342 +qca8k_sw_remove(struct mdio_device *mdiodev)
6343 +{
6344 + struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
6345 + int i;
6346 +
6347 + if (!priv)
6348 + return;
6349 +
6350 + for (i = 0; i < QCA8K_NUM_PORTS; i++)
6351 + qca8k_port_set_status(priv, i, 0);
6352 +
6353 + dsa_unregister_switch(priv->ds);
6354 +
6355 + dev_set_drvdata(&mdiodev->dev, NULL);
6356 +}
6357 +
6358 +static void qca8k_sw_shutdown(struct mdio_device *mdiodev)
6359 +{
6360 + struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
6361 +
6362 + if (!priv)
6363 + return;
6364 +
6365 + dsa_switch_shutdown(priv->ds);
6366 +
6367 + dev_set_drvdata(&mdiodev->dev, NULL);
6368 +}
6369 +
6370 +#ifdef CONFIG_PM_SLEEP
6371 +static void
6372 +qca8k_set_pm(struct qca8k_priv *priv, int enable)
6373 +{
6374 + int port;
6375 +
6376 + for (port = 0; port < QCA8K_NUM_PORTS; port++) {
6377 + /* Do not enable on resume if the port was
6378 + * disabled before.
6379 + */
6380 + if (!(priv->port_enabled_map & BIT(port)))
6381 + continue;
6382 +
6383 + qca8k_port_set_status(priv, port, enable);
6384 + }
6385 +}
6386 +
6387 +static int qca8k_suspend(struct device *dev)
6388 +{
6389 + struct qca8k_priv *priv = dev_get_drvdata(dev);
6390 +
6391 + qca8k_set_pm(priv, 0);
6392 +
6393 + return dsa_switch_suspend(priv->ds);
6394 +}
6395 +
6396 +static int qca8k_resume(struct device *dev)
6397 +{
6398 + struct qca8k_priv *priv = dev_get_drvdata(dev);
6399 +
6400 + qca8k_set_pm(priv, 1);
6401 +
6402 + return dsa_switch_resume(priv->ds);
6403 +}
6404 +#endif /* CONFIG_PM_SLEEP */
6405 +
6406 +static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
6407 + qca8k_suspend, qca8k_resume);
6408 +
6409 +static const struct qca8k_info_ops qca8xxx_ops = {
6410 + .autocast_mib = qca8k_get_ethtool_stats_eth,
6411 +};
6412 +
6413 +static const struct qca8k_match_data qca8327 = {
6414 + .id = QCA8K_ID_QCA8327,
6415 + .reduced_package = true,
6416 + .mib_count = QCA8K_QCA832X_MIB_COUNT,
6417 + .ops = &qca8xxx_ops,
6418 +};
6419 +
6420 +static const struct qca8k_match_data qca8328 = {
6421 + .id = QCA8K_ID_QCA8327,
6422 + .mib_count = QCA8K_QCA832X_MIB_COUNT,
6423 + .ops = &qca8xxx_ops,
6424 +};
6425 +
6426 +static const struct qca8k_match_data qca833x = {
6427 + .id = QCA8K_ID_QCA8337,
6428 + .mib_count = QCA8K_QCA833X_MIB_COUNT,
6429 + .ops = &qca8xxx_ops,
6430 +};
6431 +
6432 +static const struct of_device_id qca8k_of_match[] = {
6433 + { .compatible = "qca,qca8327", .data = &qca8327 },
6434 + { .compatible = "qca,qca8328", .data = &qca8328 },
6435 + { .compatible = "qca,qca8334", .data = &qca833x },
6436 + { .compatible = "qca,qca8337", .data = &qca833x },
6437 + { /* sentinel */ },
6438 +};
6439 +
6440 +static struct mdio_driver qca8kmdio_driver = {
6441 + .probe = qca8k_sw_probe,
6442 + .remove = qca8k_sw_remove,
6443 + .shutdown = qca8k_sw_shutdown,
6444 + .mdiodrv.driver = {
6445 + .name = "qca8k",
6446 + .of_match_table = qca8k_of_match,
6447 + .pm = &qca8k_pm_ops,
6448 + },
6449 +};
6450 +
6451 +mdio_module_driver(qca8kmdio_driver);
6452 +
6453 +MODULE_AUTHOR("Mathieu Olivari, John Crispin <john@phrozen.org>");
6454 +MODULE_DESCRIPTION("Driver for QCA8K ethernet switch family");
6455 +MODULE_LICENSE("GPL v2");
6456 +MODULE_ALIAS("platform:qca8k");
6457 --- /dev/null
6458 +++ b/drivers/net/dsa/qca/qca8k-common.c
6459 @@ -0,0 +1,63 @@
6460 +// SPDX-License-Identifier: GPL-2.0
6461 +/*
6462 + * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
6463 + * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
6464 + * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
6465 + * Copyright (c) 2016 John Crispin <john@phrozen.org>
6466 + */
6467 +
6468 +#include <linux/netdevice.h>
6469 +#include <net/dsa.h>
6470 +
6471 +#include "qca8k.h"
6472 +
6473 +#define MIB_DESC(_s, _o, _n) \
6474 + { \
6475 + .size = (_s), \
6476 + .offset = (_o), \
6477 + .name = (_n), \
6478 + }
6479 +
6480 +const struct qca8k_mib_desc ar8327_mib[] = {
6481 + MIB_DESC(1, 0x00, "RxBroad"),
6482 + MIB_DESC(1, 0x04, "RxPause"),
6483 + MIB_DESC(1, 0x08, "RxMulti"),
6484 + MIB_DESC(1, 0x0c, "RxFcsErr"),
6485 + MIB_DESC(1, 0x10, "RxAlignErr"),
6486 + MIB_DESC(1, 0x14, "RxRunt"),
6487 + MIB_DESC(1, 0x18, "RxFragment"),
6488 + MIB_DESC(1, 0x1c, "Rx64Byte"),
6489 + MIB_DESC(1, 0x20, "Rx128Byte"),
6490 + MIB_DESC(1, 0x24, "Rx256Byte"),
6491 + MIB_DESC(1, 0x28, "Rx512Byte"),
6492 + MIB_DESC(1, 0x2c, "Rx1024Byte"),
6493 + MIB_DESC(1, 0x30, "Rx1518Byte"),
6494 + MIB_DESC(1, 0x34, "RxMaxByte"),
6495 + MIB_DESC(1, 0x38, "RxTooLong"),
6496 + MIB_DESC(2, 0x3c, "RxGoodByte"),
6497 + MIB_DESC(2, 0x44, "RxBadByte"),
6498 + MIB_DESC(1, 0x4c, "RxOverFlow"),
6499 + MIB_DESC(1, 0x50, "Filtered"),
6500 + MIB_DESC(1, 0x54, "TxBroad"),
6501 + MIB_DESC(1, 0x58, "TxPause"),
6502 + MIB_DESC(1, 0x5c, "TxMulti"),
6503 + MIB_DESC(1, 0x60, "TxUnderRun"),
6504 + MIB_DESC(1, 0x64, "Tx64Byte"),
6505 + MIB_DESC(1, 0x68, "Tx128Byte"),
6506 + MIB_DESC(1, 0x6c, "Tx256Byte"),
6507 + MIB_DESC(1, 0x70, "Tx512Byte"),
6508 + MIB_DESC(1, 0x74, "Tx1024Byte"),
6509 + MIB_DESC(1, 0x78, "Tx1518Byte"),
6510 + MIB_DESC(1, 0x7c, "TxMaxByte"),
6511 + MIB_DESC(1, 0x80, "TxOverSize"),
6512 + MIB_DESC(2, 0x84, "TxByte"),
6513 + MIB_DESC(1, 0x8c, "TxCollision"),
6514 + MIB_DESC(1, 0x90, "TxAbortCol"),
6515 + MIB_DESC(1, 0x94, "TxMultiCol"),
6516 + MIB_DESC(1, 0x98, "TxSingleCol"),
6517 + MIB_DESC(1, 0x9c, "TxExcDefer"),
6518 + MIB_DESC(1, 0xa0, "TxDefer"),
6519 + MIB_DESC(1, 0xa4, "TxLateCol"),
6520 + MIB_DESC(1, 0xa8, "RXUnicast"),
6521 + MIB_DESC(1, 0xac, "TXUnicast"),
6522 +};
6523 --- a/drivers/net/dsa/qca/qca8k.h
6524 +++ b/drivers/net/dsa/qca/qca8k.h
6525 @@ -414,4 +414,7 @@ struct qca8k_fdb {
6526 u8 mac[6];
6527 };
6528
6529 +/* Common setup function */
6530 +extern const struct qca8k_mib_desc ar8327_mib[];
6531 +
6532 #endif /* __QCA8K_H */