kernel: bump 5.15 to 5.15.132
[openwrt/staging/stintel.git] / target / linux / generic / backport-5.15 / 706-03-v6.0-net-ethernet-mtk_eth_soc-add-xmit-XDP-support.patch
1 From 5886d26fd25bbe26130e3e5f7474b9b3e98a3469 Mon Sep 17 00:00:00 2001
2 From: Lorenzo Bianconi <lorenzo@kernel.org>
3 Date: Fri, 22 Jul 2022 09:19:39 +0200
4 Subject: [PATCH] net: ethernet: mtk_eth_soc: add xmit XDP support
5
6 Introduce XDP support for XDP_TX verdict and ndo_xdp_xmit function
7 pointer.
8
9 Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
10 Signed-off-by: David S. Miller <davem@davemloft.net>
11 ---
12 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 192 +++++++++++++++++---
13 drivers/net/ethernet/mediatek/mtk_eth_soc.h | 10 +-
14 2 files changed, 180 insertions(+), 22 deletions(-)
15
16 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
17 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
18 @@ -988,15 +988,26 @@ static void mtk_tx_unmap(struct mtk_eth
19 }
20 }
21
22 - tx_buf->flags = 0;
23 - if (tx_buf->skb &&
24 - (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) {
25 - if (napi)
26 - napi_consume_skb(tx_buf->skb, napi);
27 + if (tx_buf->type == MTK_TYPE_SKB) {
28 + if (tx_buf->data &&
29 + tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
30 + struct sk_buff *skb = tx_buf->data;
31 +
32 + if (napi)
33 + napi_consume_skb(skb, napi);
34 + else
35 + dev_kfree_skb_any(skb);
36 + }
37 + } else if (tx_buf->data) {
38 + struct xdp_frame *xdpf = tx_buf->data;
39 +
40 + if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
41 + xdp_return_frame_rx_napi(xdpf);
42 else
43 - dev_kfree_skb_any(tx_buf->skb);
44 + xdp_return_frame(xdpf);
45 }
46 - tx_buf->skb = NULL;
47 + tx_buf->flags = 0;
48 + tx_buf->data = NULL;
49 }
50
51 static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
52 @@ -1013,7 +1024,7 @@ static void setup_tx_buf(struct mtk_eth
53 dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
54 dma_unmap_len_set(tx_buf, dma_len1, size);
55 } else {
56 - tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
57 + tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
58 txd->txd1 = mapped_addr;
59 txd->txd2 = TX_DMA_PLEN0(size);
60 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
61 @@ -1189,7 +1200,7 @@ static int mtk_tx_map(struct sk_buff *sk
62 soc->txrx.txd_size);
63 if (new_desc)
64 memset(tx_buf, 0, sizeof(*tx_buf));
65 - tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
66 + tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
67 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
68 tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
69 MTK_TX_FLAGS_FPORT1;
70 @@ -1203,7 +1214,8 @@ static int mtk_tx_map(struct sk_buff *sk
71 }
72
73 /* store skb to cleanup */
74 - itx_buf->skb = skb;
75 + itx_buf->type = MTK_TYPE_SKB;
76 + itx_buf->data = skb;
77
78 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
79 if (k & 0x1)
80 @@ -1415,13 +1427,14 @@ static struct page_pool *mtk_create_page
81 .pool_size = size,
82 .nid = NUMA_NO_NODE,
83 .dev = eth->dma_dev,
84 - .dma_dir = DMA_FROM_DEVICE,
85 .offset = MTK_PP_HEADROOM,
86 .max_len = MTK_PP_MAX_BUF_SIZE,
87 };
88 struct page_pool *pp;
89 int err;
90
91 + pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL
92 + : DMA_FROM_DEVICE;
93 pp = page_pool_create(&pp_params);
94 if (IS_ERR(pp))
95 return pp;
96 @@ -1467,6 +1480,122 @@ static void mtk_rx_put_buff(struct mtk_r
97 skb_free_frag(data);
98 }
99
100 +static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
101 + struct net_device *dev, bool dma_map)
102 +{
103 + const struct mtk_soc_data *soc = eth->soc;
104 + struct mtk_tx_ring *ring = &eth->tx_ring;
105 + struct mtk_tx_dma_desc_info txd_info = {
106 + .size = xdpf->len,
107 + .first = true,
108 + .last = true,
109 + };
110 + struct mtk_mac *mac = netdev_priv(dev);
111 + struct mtk_tx_dma *txd, *txd_pdma;
112 + int err = 0, index = 0, n_desc = 1;
113 + struct mtk_tx_buf *tx_buf;
114 +
115 + if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
116 + return -EBUSY;
117 +
118 + if (unlikely(atomic_read(&ring->free_count) <= 1))
119 + return -EBUSY;
120 +
121 + spin_lock(&eth->page_lock);
122 +
123 + txd = ring->next_free;
124 + if (txd == ring->last_free) {
125 + err = -ENOMEM;
126 + goto out;
127 + }
128 +
129 + tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
130 + memset(tx_buf, 0, sizeof(*tx_buf));
131 +
132 + if (dma_map) { /* ndo_xdp_xmit */
133 + txd_info.addr = dma_map_single(eth->dma_dev, xdpf->data,
134 + txd_info.size, DMA_TO_DEVICE);
135 + if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr))) {
136 + err = -ENOMEM;
137 + goto out;
138 + }
139 + tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
140 + } else {
141 + struct page *page = virt_to_head_page(xdpf->data);
142 +
143 + txd_info.addr = page_pool_get_dma_addr(page) +
144 + sizeof(*xdpf) + xdpf->headroom;
145 + dma_sync_single_for_device(eth->dma_dev, txd_info.addr,
146 + txd_info.size,
147 + DMA_BIDIRECTIONAL);
148 + }
149 + mtk_tx_set_dma_desc(dev, txd, &txd_info);
150 +
151 + tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1;
152 +
153 + txd_pdma = qdma_to_pdma(ring, txd);
154 + setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr, txd_info.size,
155 + index++);
156 +
157 + /* store xdpf for cleanup */
158 + tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
159 + tx_buf->data = xdpf;
160 +
161 + if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
162 + if (index & 1)
163 + txd_pdma->txd2 |= TX_DMA_LS0;
164 + else
165 + txd_pdma->txd2 |= TX_DMA_LS1;
166 + }
167 +
168 + ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
169 + atomic_sub(n_desc, &ring->free_count);
170 +
171 + /* make sure that all changes to the dma ring are flushed before we
172 + * continue
173 + */
174 + wmb();
175 +
176 + if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
177 + mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
178 + } else {
179 + int idx;
180 +
181 + idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
182 + mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
183 + MT7628_TX_CTX_IDX0);
184 + }
185 +out:
186 + spin_unlock(&eth->page_lock);
187 +
188 + return err;
189 +}
190 +
191 +static int mtk_xdp_xmit(struct net_device *dev, int num_frame,
192 + struct xdp_frame **frames, u32 flags)
193 +{
194 + struct mtk_mac *mac = netdev_priv(dev);
195 + struct mtk_hw_stats *hw_stats = mac->hw_stats;
196 + struct mtk_eth *eth = mac->hw;
197 + int i, nxmit = 0;
198 +
199 + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
200 + return -EINVAL;
201 +
202 + for (i = 0; i < num_frame; i++) {
203 + if (mtk_xdp_submit_frame(eth, frames[i], dev, true))
204 + break;
205 + nxmit++;
206 + }
207 +
208 + u64_stats_update_begin(&hw_stats->syncp);
209 + hw_stats->xdp_stats.tx_xdp_xmit += nxmit;
210 + hw_stats->xdp_stats.tx_xdp_xmit_errors += num_frame - nxmit;
211 + u64_stats_update_end(&hw_stats->syncp);
212 +
213 + return nxmit;
214 +}
215 +
216 static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
217 struct xdp_buff *xdp, struct net_device *dev)
218 {
219 @@ -1495,6 +1624,18 @@ static u32 mtk_xdp_run(struct mtk_eth *e
220
221 count = &hw_stats->xdp_stats.rx_xdp_redirect;
222 goto update_stats;
223 + case XDP_TX: {
224 + struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
225 +
226 + if (mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
227 + count = &hw_stats->xdp_stats.rx_xdp_tx_errors;
228 + act = XDP_DROP;
229 + break;
230 + }
231 +
232 + count = &hw_stats->xdp_stats.rx_xdp_tx;
233 + goto update_stats;
234 + }
235 default:
236 bpf_warn_invalid_xdp_action(act);
237 fallthrough;
238 @@ -1728,9 +1869,8 @@ static int mtk_poll_tx_qdma(struct mtk_e
239 {
240 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
241 struct mtk_tx_ring *ring = &eth->tx_ring;
242 - struct mtk_tx_dma *desc;
243 - struct sk_buff *skb;
244 struct mtk_tx_buf *tx_buf;
245 + struct mtk_tx_dma *desc;
246 u32 cpu, dma;
247
248 cpu = ring->last_free_ptr;
249 @@ -1751,15 +1891,21 @@ static int mtk_poll_tx_qdma(struct mtk_e
250 if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
251 mac = 1;
252
253 - skb = tx_buf->skb;
254 - if (!skb)
255 + if (!tx_buf->data)
256 break;
257
258 - if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
259 + if (tx_buf->type == MTK_TYPE_SKB &&
260 + tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
261 + struct sk_buff *skb = tx_buf->data;
262 +
263 bytes[mac] += skb->len;
264 done[mac]++;
265 budget--;
266 + } else if (tx_buf->type == MTK_TYPE_XDP_TX ||
267 + tx_buf->type == MTK_TYPE_XDP_NDO) {
268 + budget--;
269 }
270 +
271 mtk_tx_unmap(eth, tx_buf, true);
272
273 ring->last_free = desc;
274 @@ -1778,9 +1924,8 @@ static int mtk_poll_tx_pdma(struct mtk_e
275 unsigned int *done, unsigned int *bytes)
276 {
277 struct mtk_tx_ring *ring = &eth->tx_ring;
278 - struct mtk_tx_dma *desc;
279 - struct sk_buff *skb;
280 struct mtk_tx_buf *tx_buf;
281 + struct mtk_tx_dma *desc;
282 u32 cpu, dma;
283
284 cpu = ring->cpu_idx;
285 @@ -1788,14 +1933,18 @@ static int mtk_poll_tx_pdma(struct mtk_e
286
287 while ((cpu != dma) && budget) {
288 tx_buf = &ring->buf[cpu];
289 - skb = tx_buf->skb;
290 - if (!skb)
291 + if (!tx_buf->data)
292 break;
293
294 - if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
295 + if (tx_buf->type == MTK_TYPE_SKB &&
296 + tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
297 + struct sk_buff *skb = tx_buf->data;
298 bytes[0] += skb->len;
299 done[0]++;
300 budget--;
301 + } else if (tx_buf->type == MTK_TYPE_XDP_TX ||
302 + tx_buf->type == MTK_TYPE_XDP_NDO) {
303 + budget--;
304 }
305
306 mtk_tx_unmap(eth, tx_buf, true);
307 @@ -3463,6 +3612,7 @@ static const struct net_device_ops mtk_n
308 #endif
309 .ndo_setup_tc = mtk_eth_setup_tc,
310 .ndo_bpf = mtk_xdp,
311 + .ndo_xdp_xmit = mtk_xdp_xmit,
312 };
313
314 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
315 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
316 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
317 @@ -694,6 +694,12 @@ enum mtk_dev_state {
318 MTK_RESETTING
319 };
320
321 +enum mtk_tx_buf_type {
322 + MTK_TYPE_SKB,
323 + MTK_TYPE_XDP_TX,
324 + MTK_TYPE_XDP_NDO,
325 +};
326 +
327 /* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at
328 * by the TX descriptor s
329 * @skb: The SKB pointer of the packet being sent
330 @@ -703,7 +709,9 @@ enum mtk_dev_state {
331 * @dma_len1: The length of the second segment
332 */
333 struct mtk_tx_buf {
334 - struct sk_buff *skb;
335 + enum mtk_tx_buf_type type;
336 + void *data;
337 +
338 u32 flags;
339 DEFINE_DMA_UNMAP_ADDR(dma_addr0);
340 DEFINE_DMA_UNMAP_LEN(dma_len0);