uboot-envtools: update to 2023.01
[openwrt/staging/noltari.git] / package / boot / uboot-mediatek / patches / 002-0007-net-mediatek-stop-using-bitfileds-for-DMA-descriptor.patch
1 From 5f6f3600a334398e27802de33a6a8726aacbe88c Mon Sep 17 00:00:00 2001
2 From: Weijie Gao <weijie.gao@mediatek.com>
3 Date: Wed, 31 Aug 2022 19:04:23 +0800
4 Subject: [PATCH 07/32] net: mediatek: stop using bitfileds for DMA descriptors
5
6 This patch is a preparation for adding a new version of PDMA of which the
7 DMA descriptor fields has changed. Using bitfields will result in a complex
8 modification. Convert bitfields to u32 units can solve this problem easily.
9
10 Reviewed-by: Simon Glass <sjg@chromium.org>
11 Signed-off-by: Weijie Gao <weijie.gao@mediatek.com>
12 ---
13 drivers/net/mtk_eth.c | 144 ++++++++++++++----------------------------
14 drivers/net/mtk_eth.h | 32 ++++++++++
15 2 files changed, 80 insertions(+), 96 deletions(-)
16
17 --- a/drivers/net/mtk_eth.c
18 +++ b/drivers/net/mtk_eth.c
19 @@ -65,77 +65,6 @@
20 (DP_DISCARD << MC_DP_S) | \
21 (DP_DISCARD << UN_DP_S))
22
23 -struct pdma_rxd_info1 {
24 - u32 PDP0;
25 -};
26 -
27 -struct pdma_rxd_info2 {
28 - u32 PLEN1 : 14;
29 - u32 LS1 : 1;
30 - u32 UN_USED : 1;
31 - u32 PLEN0 : 14;
32 - u32 LS0 : 1;
33 - u32 DDONE : 1;
34 -};
35 -
36 -struct pdma_rxd_info3 {
37 - u32 PDP1;
38 -};
39 -
40 -struct pdma_rxd_info4 {
41 - u32 FOE_ENTRY : 14;
42 - u32 CRSN : 5;
43 - u32 SP : 3;
44 - u32 L4F : 1;
45 - u32 L4VLD : 1;
46 - u32 TACK : 1;
47 - u32 IP4F : 1;
48 - u32 IP4 : 1;
49 - u32 IP6 : 1;
50 - u32 UN_USED : 4;
51 -};
52 -
53 -struct pdma_rxdesc {
54 - struct pdma_rxd_info1 rxd_info1;
55 - struct pdma_rxd_info2 rxd_info2;
56 - struct pdma_rxd_info3 rxd_info3;
57 - struct pdma_rxd_info4 rxd_info4;
58 -};
59 -
60 -struct pdma_txd_info1 {
61 - u32 SDP0;
62 -};
63 -
64 -struct pdma_txd_info2 {
65 - u32 SDL1 : 14;
66 - u32 LS1 : 1;
67 - u32 BURST : 1;
68 - u32 SDL0 : 14;
69 - u32 LS0 : 1;
70 - u32 DDONE : 1;
71 -};
72 -
73 -struct pdma_txd_info3 {
74 - u32 SDP1;
75 -};
76 -
77 -struct pdma_txd_info4 {
78 - u32 VLAN_TAG : 16;
79 - u32 INS : 1;
80 - u32 RESV : 2;
81 - u32 UDF : 6;
82 - u32 FPORT : 3;
83 - u32 TSO : 1;
84 - u32 TUI_CO : 3;
85 -};
86 -
87 -struct pdma_txdesc {
88 - struct pdma_txd_info1 txd_info1;
89 - struct pdma_txd_info2 txd_info2;
90 - struct pdma_txd_info3 txd_info3;
91 - struct pdma_txd_info4 txd_info4;
92 -};
93 -
94 enum mtk_switch {
95 SW_NONE,
96 SW_MT7530,
97 @@ -151,13 +80,15 @@ enum mtk_switch {
98 struct mtk_soc_data {
99 u32 caps;
100 u32 ana_rgc3;
101 + u32 txd_size;
102 + u32 rxd_size;
103 };
104
105 struct mtk_eth_priv {
106 char pkt_pool[TOTAL_PKT_BUF_SIZE] __aligned(ARCH_DMA_MINALIGN);
107
108 - struct pdma_txdesc *tx_ring_noc;
109 - struct pdma_rxdesc *rx_ring_noc;
110 + void *tx_ring_noc;
111 + void *rx_ring_noc;
112
113 int rx_dma_owner_idx0;
114 int tx_cpu_owner_idx0;
115 @@ -1202,14 +1133,16 @@ static void mtk_mac_init(struct mtk_eth_
116 static void mtk_eth_fifo_init(struct mtk_eth_priv *priv)
117 {
118 char *pkt_base = priv->pkt_pool;
119 + struct mtk_tx_dma *txd;
120 + struct mtk_rx_dma *rxd;
121 int i;
122
123 mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG, 0xffff0000, 0);
124 udelay(500);
125
126 - memset(priv->tx_ring_noc, 0, NUM_TX_DESC * sizeof(struct pdma_txdesc));
127 - memset(priv->rx_ring_noc, 0, NUM_RX_DESC * sizeof(struct pdma_rxdesc));
128 - memset(priv->pkt_pool, 0, TOTAL_PKT_BUF_SIZE);
129 + memset(priv->tx_ring_noc, 0, NUM_TX_DESC * priv->soc->txd_size);
130 + memset(priv->rx_ring_noc, 0, NUM_RX_DESC * priv->soc->rxd_size);
131 + memset(priv->pkt_pool, 0xff, TOTAL_PKT_BUF_SIZE);
132
133 flush_dcache_range((ulong)pkt_base,
134 (ulong)(pkt_base + TOTAL_PKT_BUF_SIZE));
135 @@ -1218,17 +1151,21 @@ static void mtk_eth_fifo_init(struct mtk
136 priv->tx_cpu_owner_idx0 = 0;
137
138 for (i = 0; i < NUM_TX_DESC; i++) {
139 - priv->tx_ring_noc[i].txd_info2.LS0 = 1;
140 - priv->tx_ring_noc[i].txd_info2.DDONE = 1;
141 - priv->tx_ring_noc[i].txd_info4.FPORT = priv->gmac_id + 1;
142 + txd = priv->tx_ring_noc + i * priv->soc->txd_size;
143 +
144 + txd->txd1 = virt_to_phys(pkt_base);
145 + txd->txd2 = PDMA_TXD2_DDONE | PDMA_TXD2_LS0;
146 + txd->txd4 = PDMA_TXD4_FPORT_SET(priv->gmac_id + 1);
147
148 - priv->tx_ring_noc[i].txd_info1.SDP0 = virt_to_phys(pkt_base);
149 pkt_base += PKTSIZE_ALIGN;
150 }
151
152 for (i = 0; i < NUM_RX_DESC; i++) {
153 - priv->rx_ring_noc[i].rxd_info2.PLEN0 = PKTSIZE_ALIGN;
154 - priv->rx_ring_noc[i].rxd_info1.PDP0 = virt_to_phys(pkt_base);
155 + rxd = priv->rx_ring_noc + i * priv->soc->rxd_size;
156 +
157 + rxd->rxd1 = virt_to_phys(pkt_base);
158 + rxd->rxd2 = PDMA_RXD2_PLEN0_SET(PKTSIZE_ALIGN);
159 +
160 pkt_base += PKTSIZE_ALIGN;
161 }
162
163 @@ -1315,20 +1252,22 @@ static int mtk_eth_send(struct udevice *
164 {
165 struct mtk_eth_priv *priv = dev_get_priv(dev);
166 u32 idx = priv->tx_cpu_owner_idx0;
167 + struct mtk_tx_dma *txd;
168 void *pkt_base;
169
170 - if (!priv->tx_ring_noc[idx].txd_info2.DDONE) {
171 + txd = priv->tx_ring_noc + idx * priv->soc->txd_size;
172 +
173 + if (!(txd->txd2 & PDMA_TXD2_DDONE)) {
174 debug("mtk-eth: TX DMA descriptor ring is full\n");
175 return -EPERM;
176 }
177
178 - pkt_base = (void *)phys_to_virt(priv->tx_ring_noc[idx].txd_info1.SDP0);
179 + pkt_base = (void *)phys_to_virt(txd->txd1);
180 memcpy(pkt_base, packet, length);
181 flush_dcache_range((ulong)pkt_base, (ulong)pkt_base +
182 roundup(length, ARCH_DMA_MINALIGN));
183
184 - priv->tx_ring_noc[idx].txd_info2.SDL0 = length;
185 - priv->tx_ring_noc[idx].txd_info2.DDONE = 0;
186 + txd->txd2 = PDMA_TXD2_LS0 | PDMA_TXD2_SDL0_SET(length);
187
188 priv->tx_cpu_owner_idx0 = (priv->tx_cpu_owner_idx0 + 1) % NUM_TX_DESC;
189 mtk_pdma_write(priv, TX_CTX_IDX_REG(0), priv->tx_cpu_owner_idx0);
190 @@ -1340,16 +1279,20 @@ static int mtk_eth_recv(struct udevice *
191 {
192 struct mtk_eth_priv *priv = dev_get_priv(dev);
193 u32 idx = priv->rx_dma_owner_idx0;
194 + struct mtk_rx_dma *rxd;
195 uchar *pkt_base;
196 u32 length;
197
198 - if (!priv->rx_ring_noc[idx].rxd_info2.DDONE) {
199 + rxd = priv->rx_ring_noc + idx * priv->soc->rxd_size;
200 +
201 + if (!(rxd->rxd2 & PDMA_RXD2_DDONE)) {
202 debug("mtk-eth: RX DMA descriptor ring is empty\n");
203 return -EAGAIN;
204 }
205
206 - length = priv->rx_ring_noc[idx].rxd_info2.PLEN0;
207 - pkt_base = (void *)phys_to_virt(priv->rx_ring_noc[idx].rxd_info1.PDP0);
208 + length = PDMA_RXD2_PLEN0_GET(rxd->rxd2);
209 +
210 + pkt_base = (void *)phys_to_virt(rxd->rxd1);
211 invalidate_dcache_range((ulong)pkt_base, (ulong)pkt_base +
212 roundup(length, ARCH_DMA_MINALIGN));
213
214 @@ -1363,10 +1306,11 @@ static int mtk_eth_free_pkt(struct udevi
215 {
216 struct mtk_eth_priv *priv = dev_get_priv(dev);
217 u32 idx = priv->rx_dma_owner_idx0;
218 + struct mtk_rx_dma *rxd;
219 +
220 + rxd = priv->rx_ring_noc + idx * priv->soc->rxd_size;
221
222 - priv->rx_ring_noc[idx].rxd_info2.DDONE = 0;
223 - priv->rx_ring_noc[idx].rxd_info2.LS0 = 0;
224 - priv->rx_ring_noc[idx].rxd_info2.PLEN0 = PKTSIZE_ALIGN;
225 + rxd->rxd2 = PDMA_RXD2_PLEN0_SET(PKTSIZE_ALIGN);
226
227 mtk_pdma_write(priv, RX_CRX_IDX_REG(0), idx);
228 priv->rx_dma_owner_idx0 = (priv->rx_dma_owner_idx0 + 1) % NUM_RX_DESC;
229 @@ -1393,11 +1337,11 @@ static int mtk_eth_probe(struct udevice
230 return ret;
231
232 /* Prepare for tx/rx rings */
233 - priv->tx_ring_noc = (struct pdma_txdesc *)
234 - noncached_alloc(sizeof(struct pdma_txdesc) * NUM_TX_DESC,
235 + priv->tx_ring_noc = (void *)
236 + noncached_alloc(priv->soc->txd_size * NUM_TX_DESC,
237 ARCH_DMA_MINALIGN);
238 - priv->rx_ring_noc = (struct pdma_rxdesc *)
239 - noncached_alloc(sizeof(struct pdma_rxdesc) * NUM_RX_DESC,
240 + priv->rx_ring_noc = (void *)
241 + noncached_alloc(priv->soc->rxd_size * NUM_RX_DESC,
242 ARCH_DMA_MINALIGN);
243
244 /* Set MAC mode */
245 @@ -1554,18 +1498,26 @@ static int mtk_eth_of_to_plat(struct ude
246
247 static const struct mtk_soc_data mt7629_data = {
248 .ana_rgc3 = 0x128,
249 + .txd_size = sizeof(struct mtk_tx_dma),
250 + .rxd_size = sizeof(struct mtk_rx_dma),
251 };
252
253 static const struct mtk_soc_data mt7623_data = {
254 .caps = MT7623_CAPS,
255 + .txd_size = sizeof(struct mtk_tx_dma),
256 + .rxd_size = sizeof(struct mtk_rx_dma),
257 };
258
259 static const struct mtk_soc_data mt7622_data = {
260 .ana_rgc3 = 0x2028,
261 + .txd_size = sizeof(struct mtk_tx_dma),
262 + .rxd_size = sizeof(struct mtk_rx_dma),
263 };
264
265 static const struct mtk_soc_data mt7621_data = {
266 .caps = MT7621_CAPS,
267 + .txd_size = sizeof(struct mtk_tx_dma),
268 + .rxd_size = sizeof(struct mtk_rx_dma),
269 };
270
271 static const struct udevice_id mtk_eth_ids[] = {
272 --- a/drivers/net/mtk_eth.h
273 +++ b/drivers/net/mtk_eth.h
274 @@ -10,6 +10,7 @@
275 #define _MTK_ETH_H_
276
277 #include <linux/bitops.h>
278 +#include <linux/bitfield.h>
279
280 enum mkt_eth_capabilities {
281 MTK_TRGMII_BIT,
282 @@ -435,4 +436,35 @@ enum mkt_eth_capabilities {
283 #define PHY_POWER_SAVING_M 0x300
284 #define PHY_POWER_SAVING_TX 0x0
285
286 +/* PDMA descriptors */
287 +struct mtk_rx_dma {
288 + unsigned int rxd1;
289 + unsigned int rxd2;
290 + unsigned int rxd3;
291 + unsigned int rxd4;
292 +} __packed __aligned(4);
293 +
294 +struct mtk_tx_dma {
295 + unsigned int txd1;
296 + unsigned int txd2;
297 + unsigned int txd3;
298 + unsigned int txd4;
299 +} __packed __aligned(4);
300 +
301 +/* PDMA TXD fields */
302 +#define PDMA_TXD2_DDONE BIT(31)
303 +#define PDMA_TXD2_LS0 BIT(30)
304 +#define PDMA_TXD2_SDL0_M GENMASK(29, 16)
305 +#define PDMA_TXD2_SDL0_SET(_v) FIELD_PREP(PDMA_TXD2_SDL0_M, (_v))
306 +
307 +#define PDMA_TXD4_FPORT_M GENMASK(27, 25)
308 +#define PDMA_TXD4_FPORT_SET(_v) FIELD_PREP(PDMA_TXD4_FPORT_M, (_v))
309 +
310 +/* PDMA RXD fields */
311 +#define PDMA_RXD2_DDONE BIT(31)
312 +#define PDMA_RXD2_LS0 BIT(30)
313 +#define PDMA_RXD2_PLEN0_M GENMASK(29, 16)
314 +#define PDMA_RXD2_PLEN0_GET(_v) FIELD_GET(PDMA_RXD2_PLEN0_M, (_v))
315 +#define PDMA_RXD2_PLEN0_SET(_v) FIELD_PREP(PDMA_RXD2_PLEN0_M, (_v))
316 +
317 #endif /* _MTK_ETH_H_ */