kernel: 5.15: update Aquantia PHY driver to v6.1 code
[openwrt/openwrt.git] / target / linux / generic / backport-5.15 / 752-17-v6.7-net-ethernet-mtk_wed-introduce-hw_rro-support-for-MT.patch
1 From: Sujuan Chen <sujuan.chen@mediatek.com>
2 Date: Mon, 18 Sep 2023 12:29:16 +0200
3 Subject: [PATCH] net: ethernet: mtk_wed: introduce hw_rro support for MT7988
4
5 MT7988 SoC support 802.11 receive reordering offload in hw while
6 MT7986 SoC implements it through the firmware running on the mcu.
7
8 Co-developed-by: Lorenzo Bianconi <lorenzo@kernel.org>
9 Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
10 Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
11 Signed-off-by: Paolo Abeni <pabeni@redhat.com>
12 ---
13
14 --- a/drivers/net/ethernet/mediatek/mtk_wed.c
15 +++ b/drivers/net/ethernet/mediatek/mtk_wed.c
16 @@ -26,7 +26,7 @@
17 #define MTK_WED_BUF_SIZE 2048
18 #define MTK_WED_PAGE_BUF_SIZE 128
19 #define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
20 -#define MTK_WED_RX_PAGE_BUF_PER_PAGE (PAGE_SIZE / 128)
21 +#define MTK_WED_RX_BUF_PER_PAGE (PAGE_SIZE / MTK_WED_PAGE_BUF_SIZE)
22 #define MTK_WED_RX_RING_SIZE 1536
23 #define MTK_WED_RX_PG_BM_CNT 8192
24 #define MTK_WED_AMSDU_BUF_SIZE (PAGE_SIZE << 4)
25 @@ -596,6 +596,68 @@ free_pagelist:
26 }
27
28 static int
29 +mtk_wed_hwrro_buffer_alloc(struct mtk_wed_device *dev)
30 +{
31 + int n_pages = MTK_WED_RX_PG_BM_CNT / MTK_WED_RX_BUF_PER_PAGE;
32 + struct mtk_wed_buf *page_list;
33 + struct mtk_wed_bm_desc *desc;
34 + dma_addr_t desc_phys;
35 + int i, page_idx = 0;
36 +
37 + if (!dev->wlan.hw_rro)
38 + return 0;
39 +
40 + page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
41 + if (!page_list)
42 + return -ENOMEM;
43 +
44 + dev->hw_rro.size = dev->wlan.rx_nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
45 + dev->hw_rro.pages = page_list;
46 + desc = dma_alloc_coherent(dev->hw->dev,
47 + dev->wlan.rx_nbuf * sizeof(*desc),
48 + &desc_phys, GFP_KERNEL);
49 + if (!desc)
50 + return -ENOMEM;
51 +
52 + dev->hw_rro.desc = desc;
53 + dev->hw_rro.desc_phys = desc_phys;
54 +
55 + for (i = 0; i < MTK_WED_RX_PG_BM_CNT; i += MTK_WED_RX_BUF_PER_PAGE) {
56 + dma_addr_t page_phys, buf_phys;
57 + struct page *page;
58 + int s;
59 +
60 + page = __dev_alloc_page(GFP_KERNEL);
61 + if (!page)
62 + return -ENOMEM;
63 +
64 + page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE,
65 + DMA_BIDIRECTIONAL);
66 + if (dma_mapping_error(dev->hw->dev, page_phys)) {
67 + __free_page(page);
68 + return -ENOMEM;
69 + }
70 +
71 + page_list[page_idx].p = page;
72 + page_list[page_idx++].phy_addr = page_phys;
73 + dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
74 + DMA_BIDIRECTIONAL);
75 +
76 + buf_phys = page_phys;
77 + for (s = 0; s < MTK_WED_RX_BUF_PER_PAGE; s++) {
78 + desc->buf0 = cpu_to_le32(buf_phys);
79 + buf_phys += MTK_WED_PAGE_BUF_SIZE;
80 + desc++;
81 + }
82 +
83 + dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE,
84 + DMA_BIDIRECTIONAL);
85 + }
86 +
87 + return 0;
88 +}
89 +
90 +static int
91 mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev)
92 {
93 struct mtk_wed_bm_desc *desc;
94 @@ -612,7 +674,42 @@ mtk_wed_rx_buffer_alloc(struct mtk_wed_d
95 dev->rx_buf_ring.desc_phys = desc_phys;
96 dev->wlan.init_rx_buf(dev, dev->wlan.rx_npkt);
97
98 - return 0;
99 + return mtk_wed_hwrro_buffer_alloc(dev);
100 +}
101 +
102 +static void
103 +mtk_wed_hwrro_free_buffer(struct mtk_wed_device *dev)
104 +{
105 + struct mtk_wed_buf *page_list = dev->hw_rro.pages;
106 + struct mtk_wed_bm_desc *desc = dev->hw_rro.desc;
107 + int i, page_idx = 0;
108 +
109 + if (!dev->wlan.hw_rro)
110 + return;
111 +
112 + if (!page_list)
113 + return;
114 +
115 + if (!desc)
116 + goto free_pagelist;
117 +
118 + for (i = 0; i < MTK_WED_RX_PG_BM_CNT; i += MTK_WED_RX_BUF_PER_PAGE) {
119 + dma_addr_t buf_addr = page_list[page_idx].phy_addr;
120 + void *page = page_list[page_idx++].p;
121 +
122 + if (!page)
123 + break;
124 +
125 + dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE,
126 + DMA_BIDIRECTIONAL);
127 + __free_page(page);
128 + }
129 +
130 + dma_free_coherent(dev->hw->dev, dev->hw_rro.size * sizeof(*desc),
131 + desc, dev->hw_rro.desc_phys);
132 +
133 +free_pagelist:
134 + kfree(page_list);
135 }
136
137 static void
138 @@ -626,6 +723,28 @@ mtk_wed_free_rx_buffer(struct mtk_wed_de
139 dev->wlan.release_rx_buf(dev);
140 dma_free_coherent(dev->hw->dev, dev->rx_buf_ring.size * sizeof(*desc),
141 desc, dev->rx_buf_ring.desc_phys);
142 +
143 + mtk_wed_hwrro_free_buffer(dev);
144 +}
145 +
146 +static void
147 +mtk_wed_hwrro_init(struct mtk_wed_device *dev)
148 +{
149 + if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hw_rro)
150 + return;
151 +
152 + wed_set(dev, MTK_WED_RRO_PG_BM_RX_DMAM,
153 + FIELD_PREP(MTK_WED_RRO_PG_BM_RX_SDL0, 128));
154 +
155 + wed_w32(dev, MTK_WED_RRO_PG_BM_BASE, dev->hw_rro.desc_phys);
156 +
157 + wed_w32(dev, MTK_WED_RRO_PG_BM_INIT_PTR,
158 + MTK_WED_RRO_PG_BM_INIT_SW_TAIL_IDX |
159 + FIELD_PREP(MTK_WED_RRO_PG_BM_SW_TAIL_IDX,
160 + MTK_WED_RX_PG_BM_CNT));
161 +
162 + /* enable rx_page_bm to fetch dmad */
163 + wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_PG_BM_EN);
164 }
165
166 static void
167 @@ -639,6 +758,8 @@ mtk_wed_rx_buffer_hw_init(struct mtk_wed
168 wed_w32(dev, MTK_WED_RX_BM_DYN_ALLOC_TH,
169 FIELD_PREP(MTK_WED_RX_BM_DYN_ALLOC_TH_H, 0xffff));
170 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN);
171 +
172 + mtk_wed_hwrro_init(dev);
173 }
174
175 static void
176 @@ -934,6 +1055,8 @@ mtk_wed_bus_init(struct mtk_wed_device *
177 static void
178 mtk_wed_set_wpdma(struct mtk_wed_device *dev)
179 {
180 + int i;
181 +
182 if (mtk_wed_is_v1(dev->hw)) {
183 wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
184 return;
185 @@ -951,6 +1074,15 @@ mtk_wed_set_wpdma(struct mtk_wed_device
186
187 wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
188 wed_w32(dev, dev->hw->soc->regmap.wpdma_rx_ring0, dev->wlan.wpdma_rx);
189 +
190 + if (!dev->wlan.hw_rro)
191 + return;
192 +
193 + wed_w32(dev, MTK_WED_RRO_RX_D_CFG(0), dev->wlan.wpdma_rx_rro[0]);
194 + wed_w32(dev, MTK_WED_RRO_RX_D_CFG(1), dev->wlan.wpdma_rx_rro[1]);
195 + for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++)
196 + wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING_CFG(i),
197 + dev->wlan.wpdma_rx_pg + i * 0x10);
198 }
199
200 static void
201 @@ -1762,6 +1894,165 @@ mtk_wed_dma_enable(struct mtk_wed_device
202 }
203
204 static void
205 +mtk_wed_start_hw_rro(struct mtk_wed_device *dev, u32 irq_mask, bool reset)
206 +{
207 + int i;
208 +
209 + wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask);
210 + wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
211 +
212 + if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hw_rro)
213 + return;
214 +
215 + wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_MSDU_PG_DRV_CLR);
216 + wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
217 + MTK_WED_RRO_MSDU_PG_DRV_CLR);
218 +
219 + wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_RX,
220 + MTK_WED_WPDMA_INT_CTRL_RRO_RX0_EN |
221 + MTK_WED_WPDMA_INT_CTRL_RRO_RX0_CLR |
222 + MTK_WED_WPDMA_INT_CTRL_RRO_RX1_EN |
223 + MTK_WED_WPDMA_INT_CTRL_RRO_RX1_CLR |
224 + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX0_DONE_TRIG,
225 + dev->wlan.rro_rx_tbit[0]) |
226 + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX1_DONE_TRIG,
227 + dev->wlan.rro_rx_tbit[1]));
228 +
229 + wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_MSDU_PG,
230 + MTK_WED_WPDMA_INT_CTRL_RRO_PG0_EN |
231 + MTK_WED_WPDMA_INT_CTRL_RRO_PG0_CLR |
232 + MTK_WED_WPDMA_INT_CTRL_RRO_PG1_EN |
233 + MTK_WED_WPDMA_INT_CTRL_RRO_PG1_CLR |
234 + MTK_WED_WPDMA_INT_CTRL_RRO_PG2_EN |
235 + MTK_WED_WPDMA_INT_CTRL_RRO_PG2_CLR |
236 + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG0_DONE_TRIG,
237 + dev->wlan.rx_pg_tbit[0]) |
238 + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG1_DONE_TRIG,
239 + dev->wlan.rx_pg_tbit[1]) |
240 + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG2_DONE_TRIG,
241 + dev->wlan.rx_pg_tbit[2]));
242 +
243 + /* RRO_MSDU_PG_RING2_CFG1_FLD_DRV_EN should be enabled after
244 + * WM FWDL completed, otherwise RRO_MSDU_PG ring may broken
245 + */
246 + wed_set(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
247 + MTK_WED_RRO_MSDU_PG_DRV_EN);
248 +
249 + for (i = 0; i < MTK_WED_RX_QUEUES; i++) {
250 + struct mtk_wed_ring *ring = &dev->rx_rro_ring[i];
251 +
252 + if (!(ring->flags & MTK_WED_RING_CONFIGURED))
253 + continue;
254 +
255 + if (mtk_wed_check_wfdma_rx_fill(dev, ring))
256 + dev_err(dev->hw->dev,
257 + "rx_rro_ring(%d) initialization failed\n", i);
258 + }
259 +
260 + for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++) {
261 + struct mtk_wed_ring *ring = &dev->rx_page_ring[i];
262 +
263 + if (!(ring->flags & MTK_WED_RING_CONFIGURED))
264 + continue;
265 +
266 + if (mtk_wed_check_wfdma_rx_fill(dev, ring))
267 + dev_err(dev->hw->dev,
268 + "rx_page_ring(%d) initialization failed\n", i);
269 + }
270 +}
271 +
272 +static void
273 +mtk_wed_rro_rx_ring_setup(struct mtk_wed_device *dev, int idx,
274 + void __iomem *regs)
275 +{
276 + struct mtk_wed_ring *ring = &dev->rx_rro_ring[idx];
277 +
278 + ring->wpdma = regs;
279 + wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_BASE,
280 + readl(regs));
281 + wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_COUNT,
282 + readl(regs + MTK_WED_RING_OFS_COUNT));
283 + ring->flags |= MTK_WED_RING_CONFIGURED;
284 +}
285 +
286 +static void
287 +mtk_wed_msdu_pg_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
288 +{
289 + struct mtk_wed_ring *ring = &dev->rx_page_ring[idx];
290 +
291 + ring->wpdma = regs;
292 + wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_BASE,
293 + readl(regs));
294 + wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_COUNT,
295 + readl(regs + MTK_WED_RING_OFS_COUNT));
296 + ring->flags |= MTK_WED_RING_CONFIGURED;
297 +}
298 +
299 +static int
300 +mtk_wed_ind_rx_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
301 +{
302 + struct mtk_wed_ring *ring = &dev->ind_cmd_ring;
303 + u32 val = readl(regs + MTK_WED_RING_OFS_COUNT);
304 + int i, count = 0;
305 +
306 + ring->wpdma = regs;
307 + wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_BASE,
308 + readl(regs) & 0xfffffff0);
309 +
310 + wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_COUNT,
311 + readl(regs + MTK_WED_RING_OFS_COUNT));
312 +
313 + /* ack sn cr */
314 + wed_w32(dev, MTK_WED_RRO_CFG0, dev->wlan.phy_base +
315 + dev->wlan.ind_cmd.ack_sn_addr);
316 + wed_w32(dev, MTK_WED_RRO_CFG1,
317 + FIELD_PREP(MTK_WED_RRO_CFG1_MAX_WIN_SZ,
318 + dev->wlan.ind_cmd.win_size) |
319 + FIELD_PREP(MTK_WED_RRO_CFG1_PARTICL_SE_ID,
320 + dev->wlan.ind_cmd.particular_sid));
321 +
322 + /* particular session addr element */
323 + wed_w32(dev, MTK_WED_ADDR_ELEM_CFG0,
324 + dev->wlan.ind_cmd.particular_se_phys);
325 +
326 + for (i = 0; i < dev->wlan.ind_cmd.se_group_nums; i++) {
327 + wed_w32(dev, MTK_WED_RADDR_ELEM_TBL_WDATA,
328 + dev->wlan.ind_cmd.addr_elem_phys[i] >> 4);
329 + wed_w32(dev, MTK_WED_ADDR_ELEM_TBL_CFG,
330 + MTK_WED_ADDR_ELEM_TBL_WR | (i & 0x7f));
331 +
332 + val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG);
333 + while (!(val & MTK_WED_ADDR_ELEM_TBL_WR_RDY) && count++ < 100)
334 + val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG);
335 + if (count >= 100)
336 + dev_err(dev->hw->dev,
337 + "write ba session base failed\n");
338 + }
339 +
340 + /* pn check init */
341 + for (i = 0; i < dev->wlan.ind_cmd.particular_sid; i++) {
342 + wed_w32(dev, MTK_WED_PN_CHECK_WDATA_M,
343 + MTK_WED_PN_CHECK_IS_FIRST);
344 +
345 + wed_w32(dev, MTK_WED_PN_CHECK_CFG, MTK_WED_PN_CHECK_WR |
346 + FIELD_PREP(MTK_WED_PN_CHECK_SE_ID, i));
347 +
348 + count = 0;
349 + val = wed_r32(dev, MTK_WED_PN_CHECK_CFG);
350 + while (!(val & MTK_WED_PN_CHECK_WR_RDY) && count++ < 100)
351 + val = wed_r32(dev, MTK_WED_PN_CHECK_CFG);
352 + if (count >= 100)
353 + dev_err(dev->hw->dev,
354 + "session(%d) initialization failed\n", i);
355 + }
356 +
357 + wed_w32(dev, MTK_WED_RX_IND_CMD_CNT0, MTK_WED_RX_IND_CMD_DBG_CNT_EN);
358 + wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_IND_CMD_EN);
359 +
360 + return 0;
361 +}
362 +
363 +static void
364 mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
365 {
366 int i;
367 @@ -2215,6 +2506,10 @@ void mtk_wed_add_hw(struct device_node *
368 .detach = mtk_wed_detach,
369 .ppe_check = mtk_wed_ppe_check,
370 .setup_tc = mtk_wed_setup_tc,
371 + .start_hw_rro = mtk_wed_start_hw_rro,
372 + .rro_rx_ring_setup = mtk_wed_rro_rx_ring_setup,
373 + .msdu_pg_rx_ring_setup = mtk_wed_msdu_pg_rx_ring_setup,
374 + .ind_rx_ring_setup = mtk_wed_ind_rx_ring_setup,
375 };
376 struct device_node *eth_np = eth->dev->of_node;
377 struct platform_device *pdev;
378 --- a/include/linux/soc/mediatek/mtk_wed.h
379 +++ b/include/linux/soc/mediatek/mtk_wed.h
380 @@ -10,6 +10,7 @@
381
382 #define MTK_WED_TX_QUEUES 2
383 #define MTK_WED_RX_QUEUES 2
384 +#define MTK_WED_RX_PAGE_QUEUES 3
385
386 #define WED_WO_STA_REC 0x6
387
388 @@ -99,6 +100,9 @@ struct mtk_wed_device {
389 struct mtk_wed_ring txfree_ring;
390 struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES];
391 struct mtk_wed_ring rx_wdma[MTK_WED_RX_QUEUES];
392 + struct mtk_wed_ring rx_rro_ring[MTK_WED_RX_QUEUES];
393 + struct mtk_wed_ring rx_page_ring[MTK_WED_RX_PAGE_QUEUES];
394 + struct mtk_wed_ring ind_cmd_ring;
395
396 struct {
397 int size;
398 @@ -120,6 +124,13 @@ struct mtk_wed_device {
399 dma_addr_t fdbk_phys;
400 } rro;
401
402 + struct {
403 + int size;
404 + struct mtk_wed_buf *pages;
405 + struct mtk_wed_bm_desc *desc;
406 + dma_addr_t desc_phys;
407 + } hw_rro;
408 +
409 /* filled by driver: */
410 struct {
411 union {
412 @@ -138,6 +149,8 @@ struct mtk_wed_device {
413 u32 wpdma_txfree;
414 u32 wpdma_rx_glo;
415 u32 wpdma_rx;
416 + u32 wpdma_rx_rro[MTK_WED_RX_QUEUES];
417 + u32 wpdma_rx_pg;
418
419 bool wcid_512;
420 bool hw_rro;
421 @@ -152,9 +165,20 @@ struct mtk_wed_device {
422
423 u8 tx_tbit[MTK_WED_TX_QUEUES];
424 u8 rx_tbit[MTK_WED_RX_QUEUES];
425 + u8 rro_rx_tbit[MTK_WED_RX_QUEUES];
426 + u8 rx_pg_tbit[MTK_WED_RX_PAGE_QUEUES];
427 u8 txfree_tbit;
428 u8 amsdu_max_subframes;
429
430 + struct {
431 + u8 se_group_nums;
432 + u16 win_size;
433 + u16 particular_sid;
434 + u32 ack_sn_addr;
435 + dma_addr_t particular_se_phys;
436 + dma_addr_t addr_elem_phys[1024];
437 + } ind_cmd;
438 +
439 u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
440 int (*offload_enable)(struct mtk_wed_device *wed);
441 void (*offload_disable)(struct mtk_wed_device *wed);
442 @@ -193,6 +217,14 @@ struct mtk_wed_ops {
443 void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask);
444 int (*setup_tc)(struct mtk_wed_device *wed, struct net_device *dev,
445 enum tc_setup_type type, void *type_data);
446 + void (*start_hw_rro)(struct mtk_wed_device *dev, u32 irq_mask,
447 + bool reset);
448 + void (*rro_rx_ring_setup)(struct mtk_wed_device *dev, int ring,
449 + void __iomem *regs);
450 + void (*msdu_pg_rx_ring_setup)(struct mtk_wed_device *dev, int ring,
451 + void __iomem *regs);
452 + int (*ind_rx_ring_setup)(struct mtk_wed_device *dev,
453 + void __iomem *regs);
454 };
455
456 extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
457 @@ -264,6 +296,15 @@ static inline bool mtk_wed_is_amsdu_supp
458 #define mtk_wed_device_dma_reset(_dev) (_dev)->ops->reset_dma(_dev)
459 #define mtk_wed_device_setup_tc(_dev, _netdev, _type, _type_data) \
460 (_dev)->ops->setup_tc(_dev, _netdev, _type, _type_data)
461 +#define mtk_wed_device_start_hw_rro(_dev, _mask, _reset) \
462 + (_dev)->ops->start_hw_rro(_dev, _mask, _reset)
463 +#define mtk_wed_device_rro_rx_ring_setup(_dev, _ring, _regs) \
464 + (_dev)->ops->rro_rx_ring_setup(_dev, _ring, _regs)
465 +#define mtk_wed_device_msdu_pg_rx_ring_setup(_dev, _ring, _regs) \
466 + (_dev)->ops->msdu_pg_rx_ring_setup(_dev, _ring, _regs)
467 +#define mtk_wed_device_ind_rx_ring_setup(_dev, _regs) \
468 + (_dev)->ops->ind_rx_ring_setup(_dev, _regs)
469 +
470 #else
471 static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
472 {
473 @@ -283,6 +324,10 @@ static inline bool mtk_wed_device_active
474 #define mtk_wed_device_stop(_dev) do {} while (0)
475 #define mtk_wed_device_dma_reset(_dev) do {} while (0)
476 #define mtk_wed_device_setup_tc(_dev, _netdev, _type, _type_data) -EOPNOTSUPP
477 +#define mtk_wed_device_start_hw_rro(_dev, _mask, _reset) do {} while (0)
478 +#define mtk_wed_device_rro_rx_ring_setup(_dev, _ring, _regs) -ENODEV
479 +#define mtk_wed_device_msdu_pg_rx_ring_setup(_dev, _ring, _regs) -ENODEV
480 +#define mtk_wed_device_ind_rx_ring_setup(_dev, _regs) -ENODEV
481 #endif
482
483 #endif