mediatek: copy patches-6.1 to patches-6.6
[openwrt/staging/stintel.git] / target / linux / mediatek / patches-6.6 / 961-net-ethernet-mediatek-split-tx-and-rx-fields-in-mtk_.patch
1 From: Lorenzo Bianconi <lorenzo@kernel.org>
2 Date: Thu, 2 Nov 2023 16:47:07 +0100
3 Subject: [PATCH net-next 1/2] net: ethernet: mediatek: split tx and rx fields
4 in mtk_soc_data struct
5
6 Split tx and rx fields in mtk_soc_data struct. This is a preliminary
7 patch to roll back to QDMA for MT7986 SoC in order to fix a hw hang
8 if the device receives a corrupted packet.
9
10 Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
11 ---
12 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 210 ++++++++++++--------
13 drivers/net/ethernet/mediatek/mtk_eth_soc.h | 29 +--
14 2 files changed, 139 insertions(+), 100 deletions(-)
15
16 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
17 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
18 @@ -1264,7 +1264,7 @@ static int mtk_init_fq_dma(struct mtk_et
19 eth->scratch_ring = eth->sram_base;
20 else
21 eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
22 - cnt * soc->txrx.txd_size,
23 + cnt * soc->tx.desc_size,
24 &eth->phy_scratch_ring,
25 GFP_KERNEL);
26 if (unlikely(!eth->scratch_ring))
27 @@ -1280,16 +1280,16 @@ static int mtk_init_fq_dma(struct mtk_et
28 if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
29 return -ENOMEM;
30
31 - phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
32 + phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
33
34 for (i = 0; i < cnt; i++) {
35 struct mtk_tx_dma_v2 *txd;
36
37 - txd = eth->scratch_ring + i * soc->txrx.txd_size;
38 + txd = eth->scratch_ring + i * soc->tx.desc_size;
39 txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
40 if (i < cnt - 1)
41 txd->txd2 = eth->phy_scratch_ring +
42 - (i + 1) * soc->txrx.txd_size;
43 + (i + 1) * soc->tx.desc_size;
44
45 txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
46 txd->txd4 = 0;
47 @@ -1538,7 +1538,7 @@ static int mtk_tx_map(struct sk_buff *sk
48 if (itxd == ring->last_free)
49 return -ENOMEM;
50
51 - itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
52 + itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
53 memset(itx_buf, 0, sizeof(*itx_buf));
54
55 txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
56 @@ -1579,7 +1579,7 @@ static int mtk_tx_map(struct sk_buff *sk
57
58 memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
59 txd_info.size = min_t(unsigned int, frag_size,
60 - soc->txrx.dma_max_len);
61 + soc->tx.dma_max_len);
62 txd_info.qid = queue;
63 txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
64 !(frag_size - txd_info.size);
65 @@ -1592,7 +1592,7 @@ static int mtk_tx_map(struct sk_buff *sk
66 mtk_tx_set_dma_desc(dev, txd, &txd_info);
67
68 tx_buf = mtk_desc_to_tx_buf(ring, txd,
69 - soc->txrx.txd_size);
70 + soc->tx.desc_size);
71 if (new_desc)
72 memset(tx_buf, 0, sizeof(*tx_buf));
73 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
74 @@ -1635,7 +1635,7 @@ static int mtk_tx_map(struct sk_buff *sk
75 } else {
76 int next_idx;
77
78 - next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
79 + next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->tx.desc_size),
80 ring->dma_size);
81 mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
82 }
83 @@ -1644,7 +1644,7 @@ static int mtk_tx_map(struct sk_buff *sk
84
85 err_dma:
86 do {
87 - tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
88 + tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
89
90 /* unmap dma */
91 mtk_tx_unmap(eth, tx_buf, NULL, false);
92 @@ -1669,7 +1669,7 @@ static int mtk_cal_txd_req(struct mtk_et
93 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
94 frag = &skb_shinfo(skb)->frags[i];
95 nfrags += DIV_ROUND_UP(skb_frag_size(frag),
96 - eth->soc->txrx.dma_max_len);
97 + eth->soc->tx.dma_max_len);
98 }
99 } else {
100 nfrags += skb_shinfo(skb)->nr_frags;
101 @@ -1810,7 +1810,7 @@ static struct mtk_rx_ring *mtk_get_rx_ri
102
103 ring = &eth->rx_ring[i];
104 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
105 - rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
106 + rxd = ring->dma + idx * eth->soc->rx.desc_size;
107 if (rxd->rxd2 & RX_DMA_DONE) {
108 ring->calc_idx_update = true;
109 return ring;
110 @@ -1978,7 +1978,7 @@ static int mtk_xdp_submit_frame(struct m
111 }
112 htxd = txd;
113
114 - tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
115 + tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->tx.desc_size);
116 memset(tx_buf, 0, sizeof(*tx_buf));
117 htx_buf = tx_buf;
118
119 @@ -1997,7 +1997,7 @@ static int mtk_xdp_submit_frame(struct m
120 goto unmap;
121
122 tx_buf = mtk_desc_to_tx_buf(ring, txd,
123 - soc->txrx.txd_size);
124 + soc->tx.desc_size);
125 memset(tx_buf, 0, sizeof(*tx_buf));
126 n_desc++;
127 }
128 @@ -2035,7 +2035,7 @@ static int mtk_xdp_submit_frame(struct m
129 } else {
130 int idx;
131
132 - idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
133 + idx = txd_to_idx(ring, txd, soc->tx.desc_size);
134 mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
135 MT7628_TX_CTX_IDX0);
136 }
137 @@ -2046,7 +2046,7 @@ static int mtk_xdp_submit_frame(struct m
138
139 unmap:
140 while (htxd != txd) {
141 - tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
142 + tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->tx.desc_size);
143 mtk_tx_unmap(eth, tx_buf, NULL, false);
144
145 htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
146 @@ -2177,7 +2177,7 @@ static int mtk_poll_rx(struct napi_struc
147 goto rx_done;
148
149 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
150 - rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
151 + rxd = ring->dma + idx * eth->soc->rx.desc_size;
152 data = ring->data[idx];
153
154 if (!mtk_rx_get_desc(eth, &trxd, rxd))
155 @@ -2312,7 +2312,7 @@ static int mtk_poll_rx(struct napi_struc
156 rxdcsum = &trxd.rxd4;
157 }
158
159 - if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
160 + if (*rxdcsum & eth->soc->rx.dma_l4_valid)
161 skb->ip_summed = CHECKSUM_UNNECESSARY;
162 else
163 skb_checksum_none_assert(skb);
164 @@ -2436,7 +2436,7 @@ static int mtk_poll_tx_qdma(struct mtk_e
165 break;
166
167 tx_buf = mtk_desc_to_tx_buf(ring, desc,
168 - eth->soc->txrx.txd_size);
169 + eth->soc->tx.desc_size);
170 if (!tx_buf->data)
171 break;
172
173 @@ -2487,7 +2487,7 @@ static int mtk_poll_tx_pdma(struct mtk_e
174 }
175 mtk_tx_unmap(eth, tx_buf, &bq, true);
176
177 - desc = ring->dma + cpu * eth->soc->txrx.txd_size;
178 + desc = ring->dma + cpu * eth->soc->tx.desc_size;
179 ring->last_free = desc;
180 atomic_inc(&ring->free_count);
181
182 @@ -2577,7 +2577,7 @@ static int mtk_napi_rx(struct napi_struc
183 do {
184 int rx_done;
185
186 - mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
187 + mtk_w32(eth, eth->soc->rx.irq_done_mask,
188 reg_map->pdma.irq_status);
189 rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
190 rx_done_total += rx_done;
191 @@ -2593,10 +2593,10 @@ static int mtk_napi_rx(struct napi_struc
192 return budget;
193
194 } while (mtk_r32(eth, reg_map->pdma.irq_status) &
195 - eth->soc->txrx.rx_irq_done_mask);
196 + eth->soc->rx.irq_done_mask);
197
198 if (napi_complete_done(napi, rx_done_total))
199 - mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
200 + mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
201
202 return rx_done_total;
203 }
204 @@ -2605,7 +2605,7 @@ static int mtk_tx_alloc(struct mtk_eth *
205 {
206 const struct mtk_soc_data *soc = eth->soc;
207 struct mtk_tx_ring *ring = &eth->tx_ring;
208 - int i, sz = soc->txrx.txd_size;
209 + int i, sz = soc->tx.desc_size;
210 struct mtk_tx_dma_v2 *txd;
211 int ring_size;
212 u32 ofs, val;
213 @@ -2728,14 +2728,14 @@ static void mtk_tx_clean(struct mtk_eth
214 }
215 if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && ring->dma) {
216 dma_free_coherent(eth->dma_dev,
217 - ring->dma_size * soc->txrx.txd_size,
218 + ring->dma_size * soc->tx.desc_size,
219 ring->dma, ring->phys);
220 ring->dma = NULL;
221 }
222
223 if (ring->dma_pdma) {
224 dma_free_coherent(eth->dma_dev,
225 - ring->dma_size * soc->txrx.txd_size,
226 + ring->dma_size * soc->tx.desc_size,
227 ring->dma_pdma, ring->phys_pdma);
228 ring->dma_pdma = NULL;
229 }
230 @@ -2790,15 +2790,15 @@ static int mtk_rx_alloc(struct mtk_eth *
231 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) ||
232 rx_flag != MTK_RX_FLAGS_NORMAL) {
233 ring->dma = dma_alloc_coherent(eth->dma_dev,
234 - rx_dma_size * eth->soc->txrx.rxd_size,
235 - &ring->phys, GFP_KERNEL);
236 + rx_dma_size * eth->soc->rx.desc_size,
237 + &ring->phys, GFP_KERNEL);
238 } else {
239 struct mtk_tx_ring *tx_ring = &eth->tx_ring;
240
241 ring->dma = tx_ring->dma + tx_ring_size *
242 - eth->soc->txrx.txd_size * (ring_no + 1);
243 + eth->soc->tx.desc_size * (ring_no + 1);
244 ring->phys = tx_ring->phys + tx_ring_size *
245 - eth->soc->txrx.txd_size * (ring_no + 1);
246 + eth->soc->tx.desc_size * (ring_no + 1);
247 }
248
249 if (!ring->dma)
250 @@ -2809,7 +2809,7 @@ static int mtk_rx_alloc(struct mtk_eth *
251 dma_addr_t dma_addr;
252 void *data;
253
254 - rxd = ring->dma + i * eth->soc->txrx.rxd_size;
255 + rxd = ring->dma + i * eth->soc->rx.desc_size;
256 if (ring->page_pool) {
257 data = mtk_page_pool_get_buff(ring->page_pool,
258 &dma_addr, GFP_KERNEL);
259 @@ -2900,7 +2900,7 @@ static void mtk_rx_clean(struct mtk_eth
260 if (!ring->data[i])
261 continue;
262
263 - rxd = ring->dma + i * eth->soc->txrx.rxd_size;
264 + rxd = ring->dma + i * eth->soc->rx.desc_size;
265 if (!rxd->rxd1)
266 continue;
267
268 @@ -2917,7 +2917,7 @@ static void mtk_rx_clean(struct mtk_eth
269
270 if (!in_sram && ring->dma) {
271 dma_free_coherent(eth->dma_dev,
272 - ring->dma_size * eth->soc->txrx.rxd_size,
273 + ring->dma_size * eth->soc->rx.desc_size,
274 ring->dma, ring->phys);
275 ring->dma = NULL;
276 }
277 @@ -3280,7 +3280,7 @@ static void mtk_dma_free(struct mtk_eth
278 netdev_reset_queue(eth->netdev[i]);
279 if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
280 dma_free_coherent(eth->dma_dev,
281 - MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
282 + MTK_QDMA_RING_SIZE * soc->tx.desc_size,
283 eth->scratch_ring, eth->phy_scratch_ring);
284 eth->scratch_ring = NULL;
285 eth->phy_scratch_ring = 0;
286 @@ -3330,7 +3330,7 @@ static irqreturn_t mtk_handle_irq_rx(int
287
288 eth->rx_events++;
289 if (likely(napi_schedule_prep(&eth->rx_napi))) {
290 - mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
291 + mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
292 __napi_schedule(&eth->rx_napi);
293 }
294
295 @@ -3356,9 +3356,9 @@ static irqreturn_t mtk_handle_irq(int ir
296 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
297
298 if (mtk_r32(eth, reg_map->pdma.irq_mask) &
299 - eth->soc->txrx.rx_irq_done_mask) {
300 + eth->soc->rx.irq_done_mask) {
301 if (mtk_r32(eth, reg_map->pdma.irq_status) &
302 - eth->soc->txrx.rx_irq_done_mask)
303 + eth->soc->rx.irq_done_mask)
304 mtk_handle_irq_rx(irq, _eth);
305 }
306 if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
307 @@ -3376,10 +3376,10 @@ static void mtk_poll_controller(struct n
308 struct mtk_eth *eth = mac->hw;
309
310 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
311 - mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
312 + mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
313 mtk_handle_irq_rx(eth->irq[2], dev);
314 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
315 - mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
316 + mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
317 }
318 #endif
319
320 @@ -3545,7 +3545,7 @@ static int mtk_open(struct net_device *d
321 napi_enable(&eth->tx_napi);
322 napi_enable(&eth->rx_napi);
323 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
324 - mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask);
325 + mtk_rx_irq_enable(eth, soc->rx.irq_done_mask);
326 refcount_set(&eth->dma_refcnt, 1);
327 }
328 else
329 @@ -3628,7 +3628,7 @@ static int mtk_stop(struct net_device *d
330 mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
331
332 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
333 - mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
334 + mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
335 napi_disable(&eth->tx_napi);
336 napi_disable(&eth->rx_napi);
337
338 @@ -4107,9 +4107,9 @@ static int mtk_hw_init(struct mtk_eth *e
339
340 /* FE int grouping */
341 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
342 - mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
343 + mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->pdma.int_grp + 4);
344 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
345 - mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
346 + mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->qdma.int_grp + 4);
347 mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
348
349 if (mtk_is_netsys_v3_or_greater(eth)) {
350 @@ -5270,11 +5270,15 @@ static const struct mtk_soc_data mt2701_
351 .required_clks = MT7623_CLKS_BITMAP,
352 .required_pctl = true,
353 .version = 1,
354 - .txrx = {
355 - .txd_size = sizeof(struct mtk_tx_dma),
356 - .rxd_size = sizeof(struct mtk_rx_dma),
357 - .rx_irq_done_mask = MTK_RX_DONE_INT,
358 - .rx_dma_l4_valid = RX_DMA_L4_VALID,
359 + .tx = {
360 + .desc_size = sizeof(struct mtk_tx_dma),
361 + .dma_max_len = MTK_TX_DMA_BUF_LEN,
362 + .dma_len_offset = 16,
363 + },
364 + .rx = {
365 + .desc_size = sizeof(struct mtk_rx_dma),
366 + .irq_done_mask = MTK_RX_DONE_INT,
367 + .dma_l4_valid = RX_DMA_L4_VALID,
368 .dma_max_len = MTK_TX_DMA_BUF_LEN,
369 .dma_len_offset = 16,
370 },
371 @@ -5290,11 +5294,15 @@ static const struct mtk_soc_data mt7621_
372 .offload_version = 1,
373 .hash_offset = 2,
374 .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
375 - .txrx = {
376 - .txd_size = sizeof(struct mtk_tx_dma),
377 - .rxd_size = sizeof(struct mtk_rx_dma),
378 - .rx_irq_done_mask = MTK_RX_DONE_INT,
379 - .rx_dma_l4_valid = RX_DMA_L4_VALID,
380 + .tx = {
381 + .desc_size = sizeof(struct mtk_tx_dma),
382 + .dma_max_len = MTK_TX_DMA_BUF_LEN,
383 + .dma_len_offset = 16,
384 + },
385 + .rx = {
386 + .desc_size = sizeof(struct mtk_rx_dma),
387 + .irq_done_mask = MTK_RX_DONE_INT,
388 + .dma_l4_valid = RX_DMA_L4_VALID,
389 .dma_max_len = MTK_TX_DMA_BUF_LEN,
390 .dma_len_offset = 16,
391 },
392 @@ -5312,11 +5320,15 @@ static const struct mtk_soc_data mt7622_
393 .hash_offset = 2,
394 .has_accounting = true,
395 .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
396 - .txrx = {
397 - .txd_size = sizeof(struct mtk_tx_dma),
398 - .rxd_size = sizeof(struct mtk_rx_dma),
399 - .rx_irq_done_mask = MTK_RX_DONE_INT,
400 - .rx_dma_l4_valid = RX_DMA_L4_VALID,
401 + .tx = {
402 + .desc_size = sizeof(struct mtk_tx_dma),
403 + .dma_max_len = MTK_TX_DMA_BUF_LEN,
404 + .dma_len_offset = 16,
405 + },
406 + .rx = {
407 + .desc_size = sizeof(struct mtk_rx_dma),
408 + .irq_done_mask = MTK_RX_DONE_INT,
409 + .dma_l4_valid = RX_DMA_L4_VALID,
410 .dma_max_len = MTK_TX_DMA_BUF_LEN,
411 .dma_len_offset = 16,
412 },
413 @@ -5333,11 +5345,15 @@ static const struct mtk_soc_data mt7623_
414 .hash_offset = 2,
415 .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
416 .disable_pll_modes = true,
417 - .txrx = {
418 - .txd_size = sizeof(struct mtk_tx_dma),
419 - .rxd_size = sizeof(struct mtk_rx_dma),
420 - .rx_irq_done_mask = MTK_RX_DONE_INT,
421 - .rx_dma_l4_valid = RX_DMA_L4_VALID,
422 + .tx = {
423 + .desc_size = sizeof(struct mtk_tx_dma),
424 + .dma_max_len = MTK_TX_DMA_BUF_LEN,
425 + .dma_len_offset = 16,
426 + },
427 + .rx = {
428 + .desc_size = sizeof(struct mtk_rx_dma),
429 + .irq_done_mask = MTK_RX_DONE_INT,
430 + .dma_l4_valid = RX_DMA_L4_VALID,
431 .dma_max_len = MTK_TX_DMA_BUF_LEN,
432 .dma_len_offset = 16,
433 },
434 @@ -5352,11 +5368,15 @@ static const struct mtk_soc_data mt7629_
435 .required_pctl = false,
436 .has_accounting = true,
437 .version = 1,
438 - .txrx = {
439 - .txd_size = sizeof(struct mtk_tx_dma),
440 - .rxd_size = sizeof(struct mtk_rx_dma),
441 - .rx_irq_done_mask = MTK_RX_DONE_INT,
442 - .rx_dma_l4_valid = RX_DMA_L4_VALID,
443 + .tx = {
444 + .desc_size = sizeof(struct mtk_tx_dma),
445 + .dma_max_len = MTK_TX_DMA_BUF_LEN,
446 + .dma_len_offset = 16,
447 + },
448 + .rx = {
449 + .desc_size = sizeof(struct mtk_rx_dma),
450 + .irq_done_mask = MTK_RX_DONE_INT,
451 + .dma_l4_valid = RX_DMA_L4_VALID,
452 .dma_max_len = MTK_TX_DMA_BUF_LEN,
453 .dma_len_offset = 16,
454 },
455 @@ -5374,11 +5394,15 @@ static const struct mtk_soc_data mt7981_
456 .hash_offset = 4,
457 .has_accounting = true,
458 .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
459 - .txrx = {
460 - .txd_size = sizeof(struct mtk_tx_dma_v2),
461 - .rxd_size = sizeof(struct mtk_rx_dma_v2),
462 - .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
463 - .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
464 + .tx = {
465 + .desc_size = sizeof(struct mtk_tx_dma_v2),
466 + .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
467 + .dma_len_offset = 8,
468 + },
469 + .rx = {
470 + .desc_size = sizeof(struct mtk_rx_dma_v2),
471 + .irq_done_mask = MTK_RX_DONE_INT_V2,
472 + .dma_l4_valid = RX_DMA_L4_VALID_V2,
473 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
474 .dma_len_offset = 8,
475 },
476 @@ -5396,11 +5420,15 @@ static const struct mtk_soc_data mt7986_
477 .hash_offset = 4,
478 .has_accounting = true,
479 .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
480 - .txrx = {
481 - .txd_size = sizeof(struct mtk_tx_dma_v2),
482 - .rxd_size = sizeof(struct mtk_rx_dma_v2),
483 - .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
484 - .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
485 + .tx = {
486 + .desc_size = sizeof(struct mtk_tx_dma_v2),
487 + .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
488 + .dma_len_offset = 8,
489 + },
490 + .rx = {
491 + .desc_size = sizeof(struct mtk_rx_dma_v2),
492 + .irq_done_mask = MTK_RX_DONE_INT_V2,
493 + .dma_l4_valid = RX_DMA_L4_VALID_V2,
494 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
495 .dma_len_offset = 8,
496 },
497 @@ -5418,11 +5446,15 @@ static const struct mtk_soc_data mt7988_
498 .hash_offset = 4,
499 .has_accounting = true,
500 .foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
501 - .txrx = {
502 - .txd_size = sizeof(struct mtk_tx_dma_v2),
503 - .rxd_size = sizeof(struct mtk_rx_dma_v2),
504 - .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
505 - .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
506 + .tx = {
507 + .desc_size = sizeof(struct mtk_tx_dma_v2),
508 + .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
509 + .dma_len_offset = 8,
510 + },
511 + .rx = {
512 + .desc_size = sizeof(struct mtk_rx_dma_v2),
513 + .irq_done_mask = MTK_RX_DONE_INT_V2,
514 + .dma_l4_valid = RX_DMA_L4_VALID_V2,
515 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
516 .dma_len_offset = 8,
517 },
518 @@ -5435,11 +5467,15 @@ static const struct mtk_soc_data rt5350_
519 .required_clks = MT7628_CLKS_BITMAP,
520 .required_pctl = false,
521 .version = 1,
522 - .txrx = {
523 - .txd_size = sizeof(struct mtk_tx_dma),
524 - .rxd_size = sizeof(struct mtk_rx_dma),
525 - .rx_irq_done_mask = MTK_RX_DONE_INT,
526 - .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
527 + .tx = {
528 + .desc_size = sizeof(struct mtk_tx_dma),
529 + .dma_max_len = MTK_TX_DMA_BUF_LEN,
530 + .dma_len_offset = 16,
531 + },
532 + .rx = {
533 + .desc_size = sizeof(struct mtk_rx_dma),
534 + .irq_done_mask = MTK_RX_DONE_INT,
535 + .dma_l4_valid = RX_DMA_L4_VALID_PDMA,
536 .dma_max_len = MTK_TX_DMA_BUF_LEN,
537 .dma_len_offset = 16,
538 },
539 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
540 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
541 @@ -327,8 +327,8 @@
542 /* QDMA descriptor txd3 */
543 #define TX_DMA_OWNER_CPU BIT(31)
544 #define TX_DMA_LS0 BIT(30)
545 -#define TX_DMA_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
546 -#define TX_DMA_PLEN1(x) ((x) & eth->soc->txrx.dma_max_len)
547 +#define TX_DMA_PLEN0(x) (((x) & eth->soc->tx.dma_max_len) << eth->soc->tx.dma_len_offset)
548 +#define TX_DMA_PLEN1(x) ((x) & eth->soc->tx.dma_max_len)
549 #define TX_DMA_SWC BIT(14)
550 #define TX_DMA_PQID GENMASK(3, 0)
551 #define TX_DMA_ADDR64_MASK GENMASK(3, 0)
552 @@ -348,8 +348,8 @@
553 /* QDMA descriptor rxd2 */
554 #define RX_DMA_DONE BIT(31)
555 #define RX_DMA_LSO BIT(30)
556 -#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
557 -#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len)
558 +#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->rx.dma_max_len) << eth->soc->rx.dma_len_offset)
559 +#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->rx.dma_len_offset) & eth->soc->rx.dma_max_len)
560 #define RX_DMA_VTAG BIT(15)
561 #define RX_DMA_ADDR64_MASK GENMASK(3, 0)
562 #if IS_ENABLED(CONFIG_64BIT)
563 @@ -1209,10 +1209,9 @@ struct mtk_reg_map {
564 * @foe_entry_size Foe table entry size.
565 * @has_accounting Bool indicating support for accounting of
566 * offloaded flows.
567 - * @txd_size Tx DMA descriptor size.
568 - * @rxd_size Rx DMA descriptor size.
569 - * @rx_irq_done_mask Rx irq done register mask.
570 - * @rx_dma_l4_valid Rx DMA valid register mask.
571 + * @desc_size Tx/Rx DMA descriptor size.
572 + * @irq_done_mask Rx irq done register mask.
573 + * @dma_l4_valid Rx DMA valid register mask.
574 * @dma_max_len Max DMA tx/rx buffer length.
575 * @dma_len_offset Tx/Rx DMA length field offset.
576 */
577 @@ -1230,13 +1229,17 @@ struct mtk_soc_data {
578 bool has_accounting;
579 bool disable_pll_modes;
580 struct {
581 - u32 txd_size;
582 - u32 rxd_size;
583 - u32 rx_irq_done_mask;
584 - u32 rx_dma_l4_valid;
585 + u32 desc_size;
586 u32 dma_max_len;
587 u32 dma_len_offset;
588 - } txrx;
589 + } tx;
590 + struct {
591 + u32 desc_size;
592 + u32 irq_done_mask;
593 + u32 dma_l4_valid;
594 + u32 dma_max_len;
595 + u32 dma_len_offset;
596 + } rx;
597 };
598
599 #define MTK_DMA_MONITOR_TIMEOUT msecs_to_jiffies(1000)