kernel: update kernel 4.4 to version 4.4.9
[openwrt/staging/noltari.git] / target / linux / mediatek / patches-4.4 / 0088-net-next-mediatek-add-support-for-IRQ-grouping.patch
1 From 41b4500871ab5b1ef27c6fb49ffd8aac8c7e5009 Mon Sep 17 00:00:00 2001
2 From: John Crispin <blogic@openwrt.org>
3 Date: Wed, 23 Mar 2016 18:31:48 +0100
4 Subject: [PATCH 88/91] net-next: mediatek: add support for IRQ grouping
5
6 The ethernet core has 3 IRQs. using the IRQ grouping registers we are able
7 to separate TX and RX IRQs, which allows us to service them on separate
8 cores. This patch splits the irq handler into 2 separate functiosn, one for
9 TX and another for RX. The TX housekeeping is split out of the NAPI handler.
10 Instead we use a tasklet to handle housekeeping.
11
12 Signed-off-by: John Crispin <blogic@openwrt.org>
13 ---
14 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 164 ++++++++++++++++++---------
15 drivers/net/ethernet/mediatek/mtk_eth_soc.h | 16 ++-
16 2 files changed, 124 insertions(+), 56 deletions(-)
17
18 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
19 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
20 @@ -790,7 +790,7 @@ drop:
21 }
22
23 static int mtk_poll_rx(struct napi_struct *napi, int budget,
24 - struct mtk_eth *eth, u32 rx_intr)
25 + struct mtk_eth *eth)
26 {
27 struct mtk_rx_ring *ring = &eth->rx_ring;
28 int idx = ring->calc_idx;
29 @@ -878,19 +878,18 @@ release_desc:
30 }
31
32 if (done < budget)
33 - mtk_w32(eth, rx_intr, MTK_QMTK_INT_STATUS);
34 + mtk_w32(eth, MTK_RX_DONE_INT, MTK_QMTK_INT_STATUS);
35
36 return done;
37 }
38
39 -static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
40 +static int mtk_poll_tx(struct mtk_eth *eth, int budget)
41 {
42 struct mtk_tx_ring *ring = &eth->tx_ring;
43 struct mtk_tx_dma *desc;
44 struct sk_buff *skb;
45 struct mtk_tx_buf *tx_buf;
46 - int total = 0, done = 0;
47 - unsigned int bytes = 0;
48 + unsigned int bytes = 0, done = 0;
49 u32 cpu, dma;
50 static int condition;
51 int i;
52 @@ -944,63 +943,80 @@ static int mtk_poll_tx(struct mtk_eth *e
53 }
54
55 /* read hw index again make sure no new tx packet */
56 - if (cpu != dma || cpu != mtk_r32(eth, MTK_QTX_DRX_PTR))
57 - *tx_again = true;
58 - else
59 + if (cpu == dma && cpu == mtk_r32(eth, MTK_QTX_DRX_PTR))
60 mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
61
62 - if (!total)
63 - return 0;
64 -
65 if (atomic_read(&ring->free_count) > ring->thresh)
66 mtk_wake_queue(eth);
67
68 - return total;
69 + return done;
70 }
71
72 -static int mtk_poll(struct napi_struct *napi, int budget)
73 +static void mtk_handle_status_irq(struct mtk_eth *eth)
74 {
75 - struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
76 - u32 status, status2, mask, tx_intr, rx_intr, status_intr;
77 - int tx_done, rx_done;
78 - bool tx_again = false;
79 -
80 - status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
81 - status2 = mtk_r32(eth, MTK_INT_STATUS2);
82 - tx_intr = MTK_TX_DONE_INT;
83 - rx_intr = MTK_RX_DONE_INT;
84 - status_intr = (MTK_GDM1_AF | MTK_GDM2_AF);
85 - tx_done = 0;
86 - rx_done = 0;
87 - tx_again = 0;
88 -
89 - if (status & tx_intr)
90 - tx_done = mtk_poll_tx(eth, budget, &tx_again);
91 -
92 - if (status & rx_intr)
93 - rx_done = mtk_poll_rx(napi, budget, eth, rx_intr);
94 + u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
95 + u32 status_intr = (MTK_GDM1_AF | MTK_GDM2_AF);
96
97 if (unlikely(status2 & status_intr)) {
98 mtk_stats_update(eth);
99 mtk_w32(eth, status_intr, MTK_INT_STATUS2);
100 }
101 +}
102 +
103 +static int mtk_napi_tx(struct napi_struct *napi, int budget)
104 +{
105 + struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
106 + u32 status, mask;
107 + int tx_done = 0;
108 +
109 + mtk_handle_status_irq(eth);
110
111 + status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
112 + tx_done = mtk_poll_tx(eth, budget);
113 if (unlikely(netif_msg_intr(eth))) {
114 mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
115 - netdev_info(eth->netdev[0],
116 - "done tx %d, rx %d, intr 0x%08x/0x%x\n",
117 - tx_done, rx_done, status, mask);
118 + dev_info(eth->dev,
119 + "done tx %d, intr 0x%08x/0x%x\n",
120 + tx_done, status, mask);
121 }
122
123 - if (tx_again || rx_done == budget)
124 + if (tx_done == budget)
125 return budget;
126
127 status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
128 - if (status & (tx_intr | rx_intr))
129 + if (status & MTK_TX_DONE_INT)
130 return budget;
131
132 napi_complete(napi);
133 - mtk_irq_enable(eth, tx_intr | rx_intr);
134 + mtk_irq_enable(eth, MTK_TX_DONE_INT);
135 +
136 + return tx_done;
137 +}
138 +
139 +static int mtk_napi_rx(struct napi_struct *napi, int budget)
140 +{
141 + struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
142 + u32 status, mask;
143 + int rx_done = 0;
144 +
145 + status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
146 + rx_done = mtk_poll_rx(napi, budget, eth);
147 + if (unlikely(netif_msg_intr(eth))) {
148 + mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
149 + dev_info(eth->dev,
150 + "done rx %d, intr 0x%08x/0x%x\n",
151 + rx_done, status, mask);
152 + }
153 +
154 + if (rx_done == budget)
155 + return budget;
156 +
157 + status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
158 + if (status & MTK_RX_DONE_INT)
159 + return budget;
160 +
161 + napi_complete(napi);
162 + mtk_irq_enable(eth, MTK_RX_DONE_INT);
163
164 return rx_done;
165 }
166 @@ -1237,22 +1253,44 @@ static void mtk_tx_timeout(struct net_de
167 schedule_work(&eth->pending_work);
168 }
169
170 -static irqreturn_t mtk_handle_irq(int irq, void *_eth)
171 +static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
172 {
173 struct mtk_eth *eth = _eth;
174 u32 status;
175
176 status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
177 + status &= ~MTK_TX_DONE_INT;
178 +
179 if (unlikely(!status))
180 return IRQ_NONE;
181
182 - if (likely(status & (MTK_RX_DONE_INT | MTK_TX_DONE_INT))) {
183 + if (status & MTK_RX_DONE_INT) {
184 if (likely(napi_schedule_prep(&eth->rx_napi)))
185 __napi_schedule(&eth->rx_napi);
186 - } else {
187 - mtk_w32(eth, status, MTK_QMTK_INT_STATUS);
188 + mtk_irq_disable(eth, MTK_RX_DONE_INT);
189 }
190 - mtk_irq_disable(eth, (MTK_RX_DONE_INT | MTK_TX_DONE_INT));
191 + mtk_w32(eth, status, MTK_QMTK_INT_STATUS);
192 +
193 + return IRQ_HANDLED;
194 +}
195 +
196 +static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
197 +{
198 + struct mtk_eth *eth = _eth;
199 + u32 status;
200 +
201 + status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
202 + status &= ~MTK_RX_DONE_INT;
203 +
204 + if (unlikely(!status))
205 + return IRQ_NONE;
206 +
207 + if (status & MTK_TX_DONE_INT) {
208 + if (likely(napi_schedule_prep(&eth->tx_napi)))
209 + __napi_schedule(&eth->tx_napi);
210 + mtk_irq_disable(eth, MTK_TX_DONE_INT);
211 + }
212 + mtk_w32(eth, status, MTK_QMTK_INT_STATUS);
213
214 return IRQ_HANDLED;
215 }
216 @@ -1265,7 +1303,7 @@ static void mtk_poll_controller(struct n
217 u32 int_mask = MTK_TX_DONE_INT | MTK_RX_DONE_INT;
218
219 mtk_irq_disable(eth, int_mask);
220 - mtk_handle_irq(dev->irq, dev);
221 + mtk_handle_irq(dev->irq[0], dev);
222 mtk_irq_enable(eth, int_mask);
223 }
224 #endif
225 @@ -1301,6 +1339,7 @@ static int mtk_open(struct net_device *d
226 if (err)
227 return err;
228
229 + napi_enable(&eth->tx_napi);
230 napi_enable(&eth->rx_napi);
231 mtk_irq_enable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
232 }
233 @@ -1349,6 +1388,7 @@ static int mtk_stop(struct net_device *d
234 return 0;
235
236 mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
237 + napi_disable(&eth->tx_napi);
238 napi_disable(&eth->rx_napi);
239
240 mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
241 @@ -1386,7 +1426,11 @@ static int __init mtk_hw_init(struct mtk
242 /* Enable RX VLan Offloading */
243 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
244
245 - err = devm_request_irq(eth->dev, eth->irq, mtk_handle_irq, 0,
246 + err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0,
247 + dev_name(eth->dev), eth);
248 + if (err)
249 + return err;
250 + err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0,
251 dev_name(eth->dev), eth);
252 if (err)
253 return err;
254 @@ -1402,7 +1446,11 @@ static int __init mtk_hw_init(struct mtk
255 mtk_w32(eth, 0, MTK_RST_GL);
256
257 /* FE int grouping */
258 - mtk_w32(eth, 0, MTK_FE_INT_GRP);
259 + mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
260 + mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
261 + mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
262 + mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
263 + mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
264
265 for (i = 0; i < 2; i++) {
266 u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
267 @@ -1450,7 +1498,9 @@ static void mtk_uninit(struct net_device
268 phy_disconnect(mac->phy_dev);
269 mtk_mdio_cleanup(eth);
270 mtk_irq_disable(eth, ~0);
271 - free_irq(dev->irq, dev);
272 + free_irq(eth->irq[0], dev);
273 + free_irq(eth->irq[1], dev);
274 + free_irq(eth->irq[2], dev);
275 }
276
277 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
278 @@ -1725,10 +1775,10 @@ static int mtk_add_mac(struct mtk_eth *e
279 dev_err(eth->dev, "error bringing up device\n");
280 goto free_netdev;
281 }
282 - eth->netdev[id]->irq = eth->irq;
283 + eth->netdev[id]->irq = eth->irq[0];
284 netif_info(eth, probe, eth->netdev[id],
285 "mediatek frame engine at 0x%08lx, irq %d\n",
286 - eth->netdev[id]->base_addr, eth->netdev[id]->irq);
287 + eth->netdev[id]->base_addr, eth->irq[0]);
288
289 return 0;
290
291 @@ -1745,6 +1795,7 @@ static int mtk_probe(struct platform_dev
292 struct mtk_soc_data *soc;
293 struct mtk_eth *eth;
294 int err;
295 + int i;
296
297 match = of_match_device(of_mtk_match, &pdev->dev);
298 soc = (struct mtk_soc_data *)match->data;
299 @@ -1780,10 +1831,12 @@ static int mtk_probe(struct platform_dev
300 return PTR_ERR(eth->rstc);
301 }
302
303 - eth->irq = platform_get_irq(pdev, 0);
304 - if (eth->irq < 0) {
305 - dev_err(&pdev->dev, "no IRQ resource found\n");
306 - return -ENXIO;
307 + for (i = 0; i < 3; i++) {
308 + eth->irq[i] = platform_get_irq(pdev, i);
309 + if (eth->irq[i] < 0) {
310 + dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
311 + return -ENXIO;
312 + }
313 }
314
315 eth->clk_ethif = devm_clk_get(&pdev->dev, "ethif");
316 @@ -1824,7 +1877,9 @@ static int mtk_probe(struct platform_dev
317 * for NAPI to work
318 */
319 init_dummy_netdev(&eth->dummy_dev);
320 - netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_poll,
321 + netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
322 + MTK_NAPI_WEIGHT);
323 + netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx,
324 MTK_NAPI_WEIGHT);
325
326 platform_set_drvdata(pdev, eth);
327 @@ -1845,6 +1900,7 @@ static int mtk_remove(struct platform_de
328 clk_disable_unprepare(eth->clk_gp1);
329 clk_disable_unprepare(eth->clk_gp2);
330
331 + netif_napi_del(&eth->tx_napi);
332 netif_napi_del(&eth->rx_napi);
333 mtk_cleanup(eth);
334 platform_set_drvdata(pdev, NULL);
335 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
336 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
337 @@ -68,6 +68,10 @@
338 /* Unicast Filter MAC Address Register - High */
339 #define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000))
340
341 +/* PDMA Interrupt grouping registers */
342 +#define MTK_PDMA_INT_GRP1 0xa50
343 +#define MTK_PDMA_INT_GRP2 0xa54
344 +
345 /* QDMA TX Queue Configuration Registers */
346 #define MTK_QTX_CFG(x) (0x1800 + (x * 0x10))
347 #define QDMA_RES_THRES 4
348 @@ -124,6 +128,11 @@
349 #define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \
350 MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3)
351
352 +/* QDMA Interrupt grouping registers */
353 +#define MTK_QDMA_INT_GRP1 0x1a20
354 +#define MTK_QDMA_INT_GRP2 0x1a24
355 +#define MTK_RLS_DONE_INT BIT(0)
356 +
357 /* QDMA Interrupt Status Register */
358 #define MTK_QDMA_INT_MASK 0x1A1C
359
360 @@ -355,7 +364,8 @@ struct mtk_rx_ring {
361 * @dma_refcnt: track how many netdevs are using the DMA engine
362 * @tx_ring: Pointer to the memore holding info about the TX ring
363 * @rx_ring: Pointer to the memore holding info about the RX ring
364 - * @rx_napi: The NAPI struct
365 + * @tx_napi: The TX NAPI struct
366 + * @rx_napi: The RX NAPI struct
367 * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
368 * @phy_scratch_ring: physical address of scratch_ring
369 * @scratch_head: The scratch memory that scratch_ring points to.
370 @@ -376,7 +386,7 @@ struct mtk_eth {
371 struct net_device dummy_dev;
372 struct net_device *netdev[MTK_MAX_DEVS];
373 struct mtk_mac *mac[MTK_MAX_DEVS];
374 - int irq;
375 + int irq[3];
376 u32 msg_enable;
377 unsigned long sysclk;
378 struct regmap *ethsys;
379 @@ -384,6 +394,7 @@ struct mtk_eth {
380 atomic_t dma_refcnt;
381 struct mtk_tx_ring tx_ring;
382 struct mtk_rx_ring rx_ring;
383 + struct napi_struct tx_napi;
384 struct napi_struct rx_napi;
385 struct mtk_tx_dma *scratch_ring;
386 dma_addr_t phy_scratch_ring;
387 @@ -394,6 +405,7 @@ struct mtk_eth {
388 struct clk *clk_gp2;
389 struct mii_bus *mii_bus;
390 struct work_struct pending_work;
391 +
392 };
393
394 /* struct mtk_mac - the structure that holds the info about the MACs of the