mediatek: update patches
[openwrt/openwrt.git] / target / linux / mediatek / patches-4.4 / 0096-net-next-mediatek-add-support-for-IRQ-grouping.patch
1 From 190df1a9dbf4d8809b7f991194ce60e47f2290a2 Mon Sep 17 00:00:00 2001
2 From: John Crispin <john@phrozen.org>
3 Date: Wed, 23 Mar 2016 18:31:48 +0100
4 Subject: [PATCH 096/102] net-next: mediatek: add support for IRQ grouping
5
6 The ethernet core has 3 IRQs. using the IRQ grouping registers we are able
7 to separate TX and RX IRQs, which allows us to service them on separate
8 cores. This patch splits the irq handler into 2 separate functions, one for
9 TX and another for RX. The TX housekeeping is split out into its own NAPI
10 handler.
11
12 Signed-off-by: John Crispin <john@phrozen.org>
13 ---
14 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 156 +++++++++++++++++----------
15 drivers/net/ethernet/mediatek/mtk_eth_soc.h | 15 ++-
16 2 files changed, 111 insertions(+), 60 deletions(-)
17
18 diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
19 index c869064..718cbb2 100644
20 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
21 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
22 @@ -905,14 +905,13 @@ release_desc:
23 return done;
24 }
25
26 -static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
27 +static int mtk_poll_tx(struct mtk_eth *eth, int budget)
28 {
29 struct mtk_tx_ring *ring = &eth->tx_ring;
30 struct mtk_tx_dma *desc;
31 struct sk_buff *skb;
32 struct mtk_tx_buf *tx_buf;
33 - int total = 0, done = 0;
34 - unsigned int bytes = 0;
35 + unsigned int bytes = 0, done = 0;
36 u32 cpu, dma;
37 static int condition;
38 int i;
39 @@ -964,63 +963,82 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
40 netdev_completed_queue(eth->netdev[i], done, bytes);
41 }
42
43 - /* read hw index again make sure no new tx packet */
44 - if (cpu != dma || cpu != mtk_r32(eth, MTK_QTX_DRX_PTR))
45 - *tx_again = true;
46 - else
47 - mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
48 -
49 - if (!total)
50 - return 0;
51 -
52 if (mtk_queue_stopped(eth) &&
53 (atomic_read(&ring->free_count) > ring->thresh))
54 mtk_wake_queue(eth);
55
56 - return total;
57 + return done;
58 }
59
60 -static int mtk_poll(struct napi_struct *napi, int budget)
61 +static void mtk_handle_status_irq(struct mtk_eth *eth)
62 {
63 - struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
64 - u32 status, status2, mask;
65 - int tx_done, rx_done;
66 - bool tx_again = false;
67 -
68 - status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
69 - status2 = mtk_r32(eth, MTK_INT_STATUS2);
70 - tx_done = 0;
71 - rx_done = 0;
72 - tx_again = 0;
73 -
74 - if (status & MTK_TX_DONE_INT)
75 - tx_done = mtk_poll_tx(eth, budget, &tx_again);
76 -
77 - if (status & MTK_RX_DONE_INT)
78 - rx_done = mtk_poll_rx(napi, budget, eth);
79 + u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
80
81 if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
82 mtk_stats_update(eth);
83 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
84 MTK_INT_STATUS2);
85 }
86 +}
87 +
88 +static int mtk_napi_tx(struct napi_struct *napi, int budget)
89 +{
90 + struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
91 + u32 status, mask;
92 + int tx_done = 0;
93 +
94 + mtk_handle_status_irq(eth);
95 + mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
96 + tx_done = mtk_poll_tx(eth, budget);
97 +
98 + if (unlikely(netif_msg_intr(eth))) {
99 + status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
100 + mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
101 + dev_info(eth->dev,
102 + "done tx %d, intr 0x%08x/0x%x\n",
103 + tx_done, status, mask);
104 + }
105 +
106 + if (tx_done == budget)
107 + return budget;
108 +
109 + status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
110 + if (status & MTK_TX_DONE_INT)
111 + return budget;
112 +
113 + napi_complete(napi);
114 + mtk_irq_enable(eth, MTK_TX_DONE_INT);
115 +
116 + return tx_done;
117 +}
118 +
119 +static int mtk_napi_rx(struct napi_struct *napi, int budget)
120 +{
121 + struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
122 + u32 status, mask;
123 + int rx_done = 0;
124 +
125 + mtk_handle_status_irq(eth);
126 + mtk_w32(eth, MTK_RX_DONE_INT, MTK_QMTK_INT_STATUS);
127 + rx_done = mtk_poll_rx(napi, budget, eth);
128
129 if (unlikely(netif_msg_intr(eth))) {
130 + status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
131 mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
132 - netdev_info(eth->netdev[0],
133 - "done tx %d, rx %d, intr 0x%08x/0x%x\n",
134 - tx_done, rx_done, status, mask);
135 + dev_info(eth->dev,
136 + "done rx %d, intr 0x%08x/0x%x\n",
137 + rx_done, status, mask);
138 }
139
140 - if (tx_again || rx_done == budget)
141 + if (rx_done == budget)
142 return budget;
143
144 status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
145 - if (status & (tx_intr | rx_intr))
146 + if (status & MTK_RX_DONE_INT)
147 return budget;
148
149 napi_complete(napi);
150 - mtk_irq_enable(eth, MTK_RX_DONE_INT | MTK_RX_DONE_INT);
151 + mtk_irq_enable(eth, MTK_RX_DONE_INT);
152
153 return rx_done;
154 }
155 @@ -1256,22 +1274,26 @@ static void mtk_tx_timeout(struct net_device *dev)
156 schedule_work(&eth->pending_work);
157 }
158
159 -static irqreturn_t mtk_handle_irq(int irq, void *_eth)
160 +static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
161 {
162 struct mtk_eth *eth = _eth;
163 - u32 status;
164
165 - status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
166 - if (unlikely(!status))
167 - return IRQ_NONE;
168 + if (likely(napi_schedule_prep(&eth->rx_napi))) {
169 + __napi_schedule(&eth->rx_napi);
170 + mtk_irq_disable(eth, MTK_RX_DONE_INT);
171 + }
172
173 - if (likely(status & (MTK_RX_DONE_INT | MTK_TX_DONE_INT))) {
174 - if (likely(napi_schedule_prep(&eth->rx_napi)))
175 - __napi_schedule(&eth->rx_napi);
176 - } else {
177 - mtk_w32(eth, status, MTK_QMTK_INT_STATUS);
178 + return IRQ_HANDLED;
179 +}
180 +
181 +static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
182 +{
183 + struct mtk_eth *eth = _eth;
184 +
185 + if (likely(napi_schedule_prep(&eth->tx_napi))) {
186 + __napi_schedule(&eth->tx_napi);
187 + mtk_irq_disable(eth, MTK_TX_DONE_INT);
188 }
189 - mtk_irq_disable(eth, (MTK_RX_DONE_INT | MTK_TX_DONE_INT));
190
191 return IRQ_HANDLED;
192 }
193 @@ -1284,7 +1306,7 @@ static void mtk_poll_controller(struct net_device *dev)
194 u32 int_mask = MTK_TX_DONE_INT | MTK_RX_DONE_INT;
195
196 mtk_irq_disable(eth, int_mask);
197 - mtk_handle_irq(dev->irq, dev);
198 + mtk_handle_irq(dev->irq[0], dev);
199 mtk_irq_enable(eth, int_mask);
200 }
201 #endif
202 @@ -1320,6 +1342,7 @@ static int mtk_open(struct net_device *dev)
203 if (err)
204 return err;
205
206 + napi_enable(&eth->tx_napi);
207 napi_enable(&eth->rx_napi);
208 mtk_irq_enable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
209 }
210 @@ -1368,6 +1391,7 @@ static int mtk_stop(struct net_device *dev)
211 return 0;
212
213 mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
214 + napi_disable(&eth->tx_napi);
215 napi_disable(&eth->rx_napi);
216
217 mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
218 @@ -1405,7 +1429,11 @@ static int __init mtk_hw_init(struct mtk_eth *eth)
219 /* Enable RX VLan Offloading */
220 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
221
222 - err = devm_request_irq(eth->dev, eth->irq, mtk_handle_irq, 0,
223 + err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0,
224 + dev_name(eth->dev), eth);
225 + if (err)
226 + return err;
227 + err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0,
228 dev_name(eth->dev), eth);
229 if (err)
230 return err;
231 @@ -1421,7 +1449,11 @@ static int __init mtk_hw_init(struct mtk_eth *eth)
232 mtk_w32(eth, 0, MTK_RST_GL);
233
234 /* FE int grouping */
235 - mtk_w32(eth, 0, MTK_FE_INT_GRP);
236 + mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
237 + mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
238 + mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
239 + mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
240 + mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
241
242 for (i = 0; i < 2; i++) {
243 u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
244 @@ -1469,7 +1501,9 @@ static void mtk_uninit(struct net_device *dev)
245 phy_disconnect(mac->phy_dev);
246 mtk_mdio_cleanup(eth);
247 mtk_irq_disable(eth, ~0);
248 - free_irq(dev->irq, dev);
249 + free_irq(eth->irq[0], dev);
250 + free_irq(eth->irq[1], dev);
251 + free_irq(eth->irq[2], dev);
252 }
253
254 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
255 @@ -1744,10 +1778,10 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
256 dev_err(eth->dev, "error bringing up device\n");
257 goto free_netdev;
258 }
259 - eth->netdev[id]->irq = eth->irq;
260 + eth->netdev[id]->irq = eth->irq[0];
261 netif_info(eth, probe, eth->netdev[id],
262 "mediatek frame engine at 0x%08lx, irq %d\n",
263 - eth->netdev[id]->base_addr, eth->netdev[id]->irq);
264 + eth->netdev[id]->base_addr, eth->irq[0]);
265
266 return 0;
267
268 @@ -1764,6 +1798,7 @@ static int mtk_probe(struct platform_device *pdev)
269 struct mtk_soc_data *soc;
270 struct mtk_eth *eth;
271 int err;
272 + int i;
273
274 match = of_match_device(of_mtk_match, &pdev->dev);
275 soc = (struct mtk_soc_data *)match->data;
276 @@ -1799,10 +1834,12 @@ static int mtk_probe(struct platform_device *pdev)
277 return PTR_ERR(eth->rstc);
278 }
279
280 - eth->irq = platform_get_irq(pdev, 0);
281 - if (eth->irq < 0) {
282 - dev_err(&pdev->dev, "no IRQ resource found\n");
283 - return -ENXIO;
284 + for (i = 0; i < 3; i++) {
285 + eth->irq[i] = platform_get_irq(pdev, i);
286 + if (eth->irq[i] < 0) {
287 + dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
288 + return -ENXIO;
289 + }
290 }
291
292 eth->clk_ethif = devm_clk_get(&pdev->dev, "ethif");
293 @@ -1843,7 +1880,9 @@ static int mtk_probe(struct platform_device *pdev)
294 * for NAPI to work
295 */
296 init_dummy_netdev(&eth->dummy_dev);
297 - netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_poll,
298 + netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
299 + MTK_NAPI_WEIGHT);
300 + netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx,
301 MTK_NAPI_WEIGHT);
302
303 platform_set_drvdata(pdev, eth);
304 @@ -1864,6 +1903,7 @@ static int mtk_remove(struct platform_device *pdev)
305 clk_disable_unprepare(eth->clk_gp1);
306 clk_disable_unprepare(eth->clk_gp2);
307
308 + netif_napi_del(&eth->tx_napi);
309 netif_napi_del(&eth->rx_napi);
310 mtk_cleanup(eth);
311 platform_set_drvdata(pdev, NULL);
312 diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
313 index 3159d2a..f82e3ac 100644
314 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
315 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
316 @@ -68,6 +68,10 @@
317 /* Unicast Filter MAC Address Register - High */
318 #define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000))
319
320 +/* PDMA Interrupt grouping registers */
321 +#define MTK_PDMA_INT_GRP1 0xa50
322 +#define MTK_PDMA_INT_GRP2 0xa54
323 +
324 /* QDMA TX Queue Configuration Registers */
325 #define MTK_QTX_CFG(x) (0x1800 + (x * 0x10))
326 #define QDMA_RES_THRES 4
327 @@ -125,6 +129,11 @@
328 #define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \
329 MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3)
330
331 +/* QDMA Interrupt grouping registers */
332 +#define MTK_QDMA_INT_GRP1 0x1a20
333 +#define MTK_QDMA_INT_GRP2 0x1a24
334 +#define MTK_RLS_DONE_INT BIT(0)
335 +
336 /* QDMA Interrupt Status Register */
337 #define MTK_QDMA_INT_MASK 0x1A1C
338
339 @@ -356,7 +365,8 @@ struct mtk_rx_ring {
340 * @dma_refcnt: track how many netdevs are using the DMA engine
341 * @tx_ring: Pointer to the memore holding info about the TX ring
342 * @rx_ring: Pointer to the memore holding info about the RX ring
343 - * @rx_napi: The NAPI struct
344 + * @tx_napi: The TX NAPI struct
345 + * @rx_napi: The RX NAPI struct
346 * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
347 * @phy_scratch_ring: physical address of scratch_ring
348 * @scratch_head: The scratch memory that scratch_ring points to.
349 @@ -377,7 +387,7 @@ struct mtk_eth {
350 struct net_device dummy_dev;
351 struct net_device *netdev[MTK_MAX_DEVS];
352 struct mtk_mac *mac[MTK_MAX_DEVS];
353 - int irq;
354 + int irq[3];
355 u32 msg_enable;
356 unsigned long sysclk;
357 struct regmap *ethsys;
358 @@ -385,6 +395,7 @@ struct mtk_eth {
359 atomic_t dma_refcnt;
360 struct mtk_tx_ring tx_ring;
361 struct mtk_rx_ring rx_ring;
362 + struct napi_struct tx_napi;
363 struct napi_struct rx_napi;
364 struct mtk_tx_dma *scratch_ring;
365 dma_addr_t phy_scratch_ring;
366 --
367 1.7.10.4
368