kernel: add Intel/Lantiq VRX518 TC driver
[openwrt/staging/pepe2k.git] / package / kernel / lantiq / vrx518_tc / patches / 200-swplat.patch
1 The existing software receive and transmit path does not actually work.
2 This replaces it by a basic working implementation.
3
4 --- a/dcdp/atm_tc.c
5 +++ b/dcdp/atm_tc.c
6 @@ -603,7 +603,11 @@ static void atm_aca_init(struct atm_priv
7 cfg = &priv->tc_priv->cfg;
8
9 txin = &param.aca_txin;
10 +#if defined(__LITTLE_ENDIAN)
11 + txin->byteswap = 0;
12 +#else
13 txin->byteswap = 1;
14 +#endif
15 txin->hd_size_in_dw = cfg->txin.soc_desc_dwsz;
16 txin->pd_desc_base = SB_XBAR_ADDR(__ACA_TX_IN_PD_LIST_BASE);
17 txin->pd_desc_num = __ACA_TX_IN_PD_LIST_NUM;
18 @@ -625,7 +629,11 @@ static void atm_aca_init(struct atm_priv
19 txin->soc_cmlt_cnt_addr);
20
21 txout = &param.aca_txout;
22 +#if defined(__LITTLE_ENDIAN)
23 + txout->byteswap = 0;
24 +#else
25 txout->byteswap = 1;
26 +#endif
27 txout->hd_size_in_dw = cfg->txout.soc_desc_dwsz;
28 txout->pd_desc_base = SB_XBAR_ADDR(__ACA_TX_OUT_PD_LIST_BASE);
29 txout->pd_desc_num = __ACA_TX_OUT_PD_LIST_NUM;
30 @@ -647,7 +655,11 @@ static void atm_aca_init(struct atm_priv
31 txout->soc_cmlt_cnt_addr);
32
33 rxout = &param.aca_rxout;
34 +#if defined(__LITTLE_ENDIAN)
35 + rxout->byteswap = 0;
36 +#else
37 rxout->byteswap = 1;
38 +#endif
39 rxout->hd_size_in_dw = cfg->rxout.soc_desc_dwsz;
40 rxout->pd_desc_base = SB_XBAR_ADDR(__ACA_RX_OUT_PD_LIST_BASE);
41 rxout->pd_desc_num = __ACA_RX_OUT_PD_LIST_NUM;
42 @@ -669,7 +681,11 @@ static void atm_aca_init(struct atm_priv
43 rxout->soc_cmlt_cnt_addr);
44
45 rxin = &param.aca_rxin;
46 +#if defined(__LITTLE_ENDIAN)
47 + rxin->byteswap = 0;
48 +#else
49 rxin->byteswap = 1;
50 +#endif
51 rxin->hd_size_in_dw = cfg->rxin.soc_desc_dwsz;
52 rxin->pd_desc_base = SB_XBAR_ADDR(__RX_IN_PD_DES_LIST_BASE);
53 rxin->pd_desc_num = __ACA_RX_IN_PD_LIST_NUM;
54 @@ -1261,7 +1277,7 @@ static int ppe_ioctl(struct atm_dev *dev
55 static int ppe_send(struct atm_vcc *vcc, struct sk_buff *skb)
56 {
57 int ret, qid, mpoa_pt, mpoa_type, vid;
58 - unsigned int prio, conn;
59 + unsigned int prio, conn, len;
60 struct atm_priv *priv;
61
62 if (!vcc) {
63 @@ -1327,12 +1343,14 @@ static int ppe_send(struct atm_vcc *vcc,
64 tc_dbg(priv->tc_priv, MSG_TX, "vid: 0x%x, qid: 0x%x\n",
65 vid, qid);
66
67 + len = skb->len;
68 +
69 if (priv->tc_priv->tc_ops.send(NULL,
70 skb, qid, ATM_SL_PKT) == 0) {
71 priv->stats.aal5_tx_pkts++;
72 - priv->stats.aal5_tx_bytes += skb->len;
73 + priv->stats.aal5_tx_bytes += len;
74 priv->conn[conn].stats.aal5_tx_pkts++;
75 - priv->conn[conn].stats.aal5_tx_bytes += skb->len;
76 + priv->conn[conn].stats.aal5_tx_bytes += len;
77 priv->conn[conn].prio_tx_packets[prio]++;
78 } else {
79 tc_dbg(priv->tc_priv, MSG_TX, "ATM: TX fail\n");
80 --- a/dcdp/ptm_tc.c
81 +++ b/dcdp/ptm_tc.c
82 @@ -497,6 +497,7 @@ static int ptm_xmit(struct sk_buff *skb,
83 struct ptm_priv *ptm_tc = netdev_priv(dev);
84 int qid;
85 enum tc_pkt_type type;
86 + unsigned int len;
87
88 if (!showtime_stat(ptm_tc->tc_priv))
89 goto PTM_XMIT_DROP;
90 @@ -510,11 +511,13 @@ static int ptm_xmit(struct sk_buff *skb,
91 type = ptm_tc->tc_priv->tc_mode == TC_PTM_BND_MODE
92 ? PTM_BOND_PKT : PTM_SL_PKT;
93
94 + len = skb->len;
95 +
96 if (ptm_tc->tc_priv->tc_ops.send(dev, skb, qid, type) < 0)
97 ptm_tc->stats64.tx_dropped++;
98 else {
99 ptm_tc->stats64.tx_packets++;
100 - ptm_tc->stats64.tx_bytes += skb->len;
101 + ptm_tc->stats64.tx_bytes += len;
102 }
103
104 return 0;
105 @@ -631,7 +634,7 @@ static int ptm_dev_init(struct tc_priv *
106 const char macaddr[ETH_ALEN]
107 = {0xAC, 0x9A, 0x96, 0x11, 0x22, 0x33};
108
109 - dev = alloc_netdev_mq(sizeof(*ptm_tc), "dsl%d", NET_NAME_ENUM, ptm_setup, 4);
110 + dev = alloc_netdev(sizeof(*ptm_tc), "dsl%d", NET_NAME_ENUM, ptm_setup);
111 if (!dev) {
112 tc_dbg(tc_priv, MSG_INIT, "Cannot alloc net device\n");
113 return -ENOMEM;
114 @@ -2324,7 +2327,11 @@ static void ptm_aca_init(struct ptm_ep_p
115 cfg = &priv->tc_priv->cfg;
116
117 txin = &param.aca_txin;
118 +#if defined(__LITTLE_ENDIAN)
119 + txin->byteswap = 0;
120 +#else
121 txin->byteswap = 1;
122 +#endif
123 txin->hd_size_in_dw = cfg->txin.soc_desc_dwsz;
124 txin->pd_desc_base = SB_XBAR_ADDR(__ACA_TX_IN_PD_LIST_BASE);
125 txin->pd_desc_num = __ACA_TX_IN_PD_LIST_NUM;
126 @@ -2347,7 +2354,11 @@ static void ptm_aca_init(struct ptm_ep_p
127 txin->soc_cmlt_cnt_addr);
128
129 txout = &param.aca_txout;
130 +#if defined(__LITTLE_ENDIAN)
131 + txout->byteswap = 0;
132 +#else
133 txout->byteswap = 1;
134 +#endif
135 txout->hd_size_in_dw = cfg->txout.soc_desc_dwsz;
136 if (priv->tc_priv->param.cdma_desc_loc == LOC_IN_FPI)
137 txout->pd_desc_base = sb_r32(__TX_OUT_SHADOW_PTR) - phybase;
138 @@ -2373,7 +2384,11 @@ static void ptm_aca_init(struct ptm_ep_p
139 txout->soc_cmlt_cnt_addr);
140
141 rxout = &param.aca_rxout;
142 +#if defined(__LITTLE_ENDIAN)
143 + rxout->byteswap = 0;
144 +#else
145 rxout->byteswap = 1;
146 +#endif
147 rxout->hd_size_in_dw = cfg->rxout.soc_desc_dwsz;
148 if (priv->tc_priv->param.cdma_desc_loc == LOC_IN_FPI)
149 rxout->pd_desc_base = sb_r32(__RX_OUT_SHADOW_PTR) - phybase;
150 @@ -2399,7 +2414,11 @@ static void ptm_aca_init(struct ptm_ep_p
151 rxout->soc_cmlt_cnt_addr);
152
153 rxin = &param.aca_rxin;
154 +#if defined(__LITTLE_ENDIAN)
155 + rxin->byteswap = 0;
156 +#else
157 rxin->byteswap = 1;
158 +#endif
159 rxin->hd_size_in_dw = cfg->rxin.soc_desc_dwsz;
160 rxin->pd_desc_base = SB_XBAR_ADDR(__RX_IN_PD_DES_LIST_BASE);
161 rxin->pd_desc_num = __ACA_RX_IN_PD_LIST_NUM;
162 --- a/dcdp/platform/sw_plat.c
163 +++ b/dcdp/platform/sw_plat.c
164 @@ -36,10 +36,13 @@
165 #include <linux/printk.h>
166 #include <linux/etherdevice.h>
167 #include <linux/workqueue.h>
168 -#include "inc/dsl_tc.h"
169 +#include "../inc/dsl_tc.h"
170
171 #include "../inc/tc_main.h"
172 #include "../inc/reg_addr.h"
173 +#include "../inc/tc_common.h"
174 +
175 +#include "../inc/fw/vrx518_addr_def.h"
176
177
178 #define PMAC_SIZE 8
179 @@ -70,7 +73,7 @@ enum {
180 #define TXIN_DNUM 128
181 #define TXOUT_DNUM 128
182 #define RXOUT_DNUM 1024
183 -#define RXIN_DNUM 1024
184 +#define RXIN_DNUM 0
185
186 #define TXIN_CHECK_NUM 32
187
188 @@ -80,22 +83,32 @@ struct aca_ring {
189 void *umt_dst;
190 u32 umt_phydst;
191 u32 dnum;
192 + u32 dsize;
193 int idx; /* SoC RX/TX index */
194 - int cnt;
195 - void *cnt_addr;
196 - u32 cnt_phyaddr;
197 int ep_dev_idx;
198 };
199
200 +struct tx_list_item {
201 + size_t len;
202 + void *buf;
203 + dma_addr_t phyaddr;
204 +};
205 +
206 +struct tx_list {
207 + struct tx_list_item *data;
208 + u32 dnum;
209 +};
210 +
211 struct aca_ring_grp {
212 struct aca_ring rxin;
213 struct aca_ring txin;
214 struct aca_ring rxout;
215 struct aca_ring txout;
216 + struct tx_list txlist;
217 };
218
219 -#if 1
220 -struct dma_desc {
221 +#if defined(__LITTLE_ENDIAN)
222 +struct dma_tx_desc {
223 /* DW 0 */
224 u32 qid;
225 /* DW 1 */
226 @@ -112,8 +125,26 @@ struct dma_desc {
227 u32 c:1;
228 u32 own:1;
229 }__packed;
230 +
231 +struct dma_rx_desc {
232 + /* DW 0 */
233 + u32 qid;
234 + /* DW 1 */
235 + u32 res2;
236 + /* DW 2 */
237 + u32 data_len:16;
238 + u32 res0:7;
239 + u32 byte_off:3;
240 + u32 res1:2;
241 + u32 eop:1;
242 + u32 sop:1;
243 + u32 c:1;
244 + u32 own:1;
245 + /* DW 3 */
246 + u32 data_ptr;
247 +}__packed;
248 #else
249 -struct dma_desc {
250 +struct dma_tx_desc {
251 /* DW 0 */
252 u32 qid;
253 /* DW 1 */
254 @@ -131,14 +162,25 @@ struct dma_desc {
255 u32 data_len:16;
256 }__packed;
257
258 +struct dma_rx_desc {
259 + /* DW 0 */
260 + u32 qid;
261 + /* DW 1 */
262 + u32 res;
263 + /* DW 2 */
264 + u32 own:1;
265 + u32 c:1;
266 + u32 sop:1;
267 + u32 eop:1;
268 + u32 res1:2;
269 + u32 byte_off:3;
270 + u32 res0:7;
271 + u32 data_len:16;
272 + /* DW 3 */
273 + u32 data_ptr;
274 +}__packed;
275 #endif
276
277 -struct plat_dma {
278 - u32 chan; /* CHAN IID */
279 - u32 dma_chan; /* CONTROLLER/PORT/CHAN ID */
280 - u32 ds_dnum; /* DS descriptor number */
281 -};
282 -
283 struct plat_umt {
284 u32 id;
285 u32 cbm_id;
286 @@ -152,28 +194,28 @@ struct tc_req {
287 enum dsl_tc_mode tc_mode;
288 };
289
290 -#if 0
291 -struct tc_coc {
292 - enum ltq_cpufreq_state coc_stat;
293 - struct tasklet_struct coc_task;
294 +struct mem_map_entry {
295 + dma_addr_t phyaddr;
296 + void *mem;
297 + size_t size;
298 + struct hlist_node node;
299 };
300 -#endif
301
302 struct plat_priv {
303 struct tc_priv *tc_priv;
304 struct plat_umt umt[EP_MAX_NUM];
305 - struct plat_dma dma[EP_MAX_NUM];
306 struct ltq_mei_atm_showtime_info dsl_ops;
307 struct tc_req req_work;
308 struct aca_ring_grp soc_rings;
309 - /* struct tc_coc coc;*/
310 + struct net_device *netdev;
311 + DECLARE_HASHTABLE(mem_map, 8);
312 };
313
314 static struct plat_priv *g_plat_priv;
315 struct tasklet_struct txout_task;
316 struct tasklet_struct rxout_task;
317
318 -static void txout_action(struct tc_priv *priv, struct aca_ring *txout);
319 +static DEFINE_SPINLOCK(tx_spinlock);
320
321 void *ppa_callback_get(e_ltq_mei_cb_type type)
322 {
323 @@ -259,122 +301,65 @@ static inline struct tc_priv *plat_to_tc
324 return g_plat_priv->tc_priv;
325 }
326
327 -static int32_t plat_rx(struct net_device *rxdev, struct net_device *txdev,
328 - struct sk_buff *skb, int32_t len)
329 -{
330 - int32_t err;
331 - struct tc_priv *tc_priv = plat_to_tcpriv();
332 -
333 - if (unlikely(!rxdev)) {
334 - if (txdev != NULL)
335 - tc_dbg(tc_priv, MSG_RX,
336 - "Recv undelivered packet from DP lib\n");
337 - else
338 - tc_dbg(tc_priv, MSG_RX, "Recv unknown packet\n");
339 - err = -ENODEV;
340 - goto err1;
341 - }
342 -
343 - tc_priv->tc_ops.recv(rxdev, skb);
344 - return 0;
345 -
346 -err1:
347 - dev_kfree_skb_any(skb);
348 -
349 - return err;
350 -}
351 -
352 -#if 0
353 -static int32_t plat_get_subifid(struct net_device *dev, struct sk_buff *skb,
354 - void *subif_data, uint8_t dst_mac[MAX_ETH_ALEN],
355 - dp_subif_t *subif, uint32_t flags)
356 -{
357 - int qid;
358 - struct tc_priv *priv = plat_to_tcpriv();
359 -
360 - qid = priv->tc_ops.get_qid(dev, skb, subif_data, flags);
361 - if (qid < 0)
362 - return qid;
363 - else
364 - subif->subif = qid;
365 -
366 - return 0;
367 -}
368 -#endif
369 -
370 -#if 0
371 -static void plat_coc_tasklet(unsigned long arg)
372 -{
373 - /* change state to D0 */
374 - if (g_plat_priv->coc.coc_stat == LTQ_CPUFREQ_PS_D0)
375 - return;
376 -
377 - g_plat_priv->coc.coc_stat = LTQ_CPUFREQ_PS_D0;
378 -}
379 -
380 -static void plat_coc_req(void)
381 -{
382 - tasklet_schedule(&g_plat_priv->coc.coc_task);
383 -}
384 +static void *plat_mem_alloc(size_t size, enum tc_dir dir, u32 *phyaddr);
385 +static void *plat_mem_virt(u32 phyaddr);
386 +static void plat_mem_free(u32 phyaddr, enum tc_dir dir);
387
388 +static void txlist_free(struct tx_list *list);
389
390 -static int32_t plat_coc_stat(enum ltq_cpufreq_state new_state,
391 - enum ltq_cpufreq_state old_state, uint32_t flags)
392 +static int txlist_init(struct tx_list *list, u32 dnum)
393 {
394 - struct tc_priv *priv = plat_to_tcpriv();
395 - tc_dbg(priv, MSG_COC,
396 - "COC current state: %d, new state: %d, old state: %d\n",
397 - g_plat_priv->coc.coc_stat, new_state, old_state);
398 + struct tx_list_item *item;
399 + int i;
400
401 - if (g_plat_priv->coc.coc_stat != new_state) {
402 - g_plat_priv->coc.coc_stat = new_state;
403 + list->dnum = dnum;
404
405 - if (new_state == LTQ_CPUFREQ_PS_D3) {
406 - /* Enable interrupt for DS packet */
407 - priv->tc_ops.irq_on(MBOX_PKT_RX);
408 - } else {
409 - /* Disable interrupt for DS packet */
410 - priv->tc_ops.irq_off(MBOX_PKT_RX);
411 + list->data = kcalloc(dnum, sizeof(struct tx_list_item), GFP_KERNEL);
412 + if (!list->data) {
413 + pr_err("Failed to allocate TX list!\n");
414 + goto err;
415 + }
416 +
417 + for (i = 0; i < list->dnum; i++) {
418 + item = &list->data[i];
419 +
420 + // use plat_mem_alloc as these buffers will be mixed with buffers allocated in ptm_tc.c / atm_tc.c
421 + item->buf = plat_mem_alloc(DMA_PACKET_SZ, US_DIR, &item->phyaddr);
422 + if (!item->buf) {
423 + pr_err("Failed to allocate TX buffer!\n");
424 + goto err;
425 }
426 }
427
428 return 0;
429 -}
430 -#endif
431 -
432 -static inline int ring_dist(int idx1, int idx2, int size)
433 -{
434 - if (idx1 >= idx2)
435 - return (idx1 - idx2);
436 - else
437 - return (idx1 + size - idx2);
438 -}
439
440 -static inline int __ring_full(int idx, int cnt, u32 dnum)
441 -{
442 - if (ring_dist(idx, cnt, dnum) < dnum - 1)
443 - return 0;
444 - else
445 - return 1;
446 +err:
447 + txlist_free(list);
448 + return -1;
449 }
450
451 -static inline int ring_full(struct aca_ring *ring)
452 +static void txlist_free(struct tx_list *list)
453 {
454 - if (!__ring_full(ring->idx, ring->cnt, ring->dnum))
455 - return 0;
456 + struct tx_list_item *item;
457 + int i;
458
459 - /* if ring full, update cumulative counter and check again */
460 - ring->cnt = readl(ring->cnt_addr) % ring->dnum;
461 + if (list->data) {
462 + for (i = 0; i < list->dnum; i++) {
463 + item = &list->data[i];
464 +
465 + if (item->buf) {
466 + // use plat_mem_free as these buffers are mixed with buffers allocated in ptm_tc.c / atm_tc.c
467 + plat_mem_free(item->phyaddr, US_DIR);
468 + }
469 + }
470 + }
471
472 - return __ring_full(ring->idx, ring->cnt, ring->dnum);
473 + kfree(list->data);
474 }
475
476 -#define ring_idx_inc(ring, idx) \
477 - do { ring->idx = (ring->idx + 1) % ring->dnum; } while (0);
478 -
479 -static inline void ring_cnt_update(struct aca_ring *ring)
480 +static inline void ring_idx_inc(struct aca_ring *ring)
481 {
482 - ring->cnt = readl(ring->cnt_addr) % ring->dnum;
483 + ring->idx = (ring->idx + 1) % ring->dnum;
484 }
485
486 static struct sk_buff *txin_skb_prepare(struct sk_buff *skb)
487 @@ -399,252 +384,220 @@ static struct sk_buff *txin_skb_prepare(
488 return nskb;
489 }
490
491 -static int ring_mmap(void *mem, int size,
492 - enum dma_data_direction dir, u32 *addr)
493 -{
494 - struct device *pdev;
495 - dma_addr_t phy_addr;
496 - struct tc_priv *priv;
497 - u32 addr1;
498 -
499 - priv = g_plat_priv->tc_priv;
500 - pdev = priv->ep_dev[0].dev;
501 -
502 - phy_addr = dma_map_single(pdev, mem, size, dir);
503 - if (unlikely(dma_mapping_error(pdev, phy_addr))) {
504 - tc_err(priv, MSG_INIT,
505 - "DMA address mapping error: buf: 0x%x, size: %d, dir: %d\n",
506 - (u32)mem, size, dir);
507 - return -ENOMEM;
508 - }
509 - dma_unmap_single(pdev, phy_addr, size, dir);
510 -
511 - pr_info("vaddr: 0x%x, phyaddr: 0x%lx\n", (u32)mem, phy_addr);
512 - addr1 = (u32)phy_addr;
513 -
514 - if (addr)
515 - *addr = addr1;
516 -
517 - return 0;
518 -}
519 -
520 -static void txin_action(struct tc_priv *priv, struct aca_ring *txin,
521 +static int txin_action(struct tc_priv *priv, struct aca_ring *txin,
522 struct sk_buff *skb, int qid, enum tc_pkt_type type)
523 {
524 - struct dma_desc *desc, desc1;
525 - u32 phyaddr, *dst, *src;
526 - int i;
527 + struct device *pdev = priv->ep_dev[0].dev;
528 + struct aca_ring *txout = &g_plat_priv->soc_rings.txout;
529 + struct tx_list *txlist = &g_plat_priv->soc_rings.txlist;
530 + struct dma_tx_desc *desc;
531 + struct tx_list_item *txlist_item;
532 + unsigned long flags;
533 +
534 + if (!g_plat_priv->netdev) {
535 + spin_lock_irqsave(&tx_spinlock, flags);
536 + }
537
538 - if (ring_full(txin)) {
539 - tc_dbg(priv, MSG_TX,
540 - "TXIN Ring Full!: idx: %d, cnt: %d\n",
541 - txin->idx, txin->cnt);
542 + if ((txin->idx + 2) % txin->dnum == txout->idx) {
543 + if (g_plat_priv->netdev) {
544 + netif_stop_queue(g_plat_priv->netdev);
545 + }
546 + } else if ((txin->idx + 1) % txin->dnum == txout->idx) {
547 + tc_err(priv, MSG_TX, "TXIN ring full: txin: %d, txout: %d\n",
548 + txin->idx, txout->idx);
549 goto err1;
550 }
551
552 + desc = (struct dma_tx_desc *)txin->dbase_mem;
553 + desc += txin->idx;
554 +
555 + txlist_item = &txlist->data[txin->idx];
556 +
557 skb = txin_skb_prepare(skb);
558 if (!skb)
559 - return;
560 + goto err2;
561
562 - if (ring_mmap(skb->data, skb->len, DMA_TO_DEVICE, &phyaddr) < 0) {
563 - tc_err(priv, MSG_TX, "TXIN data mmap failed: 0x%x\n",
564 - (unsigned int)skb->data);
565 - goto err1;
566 - }
567 + /*
568 + * Copy the data to a buffer in the driver. This is necessary because there doesn't seem to be a timely signal
569 + * from the device when it has consumed a buffer, which would allow to safely free it. The data_ptr is only
570 + * returned in TXOUT after another fixed number of packets (depending on the size of internal buffers) has been
571 + * transmitted, which may not happen in the near future. Making a copy allows to free the SKB here.
572 + */
573 + memcpy(txlist_item->buf, skb->data, skb->len);
574
575 - /* init a new descriptor for the new skb */
576 - desc = (struct dma_desc *)txin->dbase_mem;
577 - desc += txin->idx;
578 + dma_sync_single_range_for_device(pdev, txlist_item->phyaddr, 0, skb->len, DMA_TO_DEVICE);
579
580 - memset(desc, 0, sizeof(*desc));
581 - memset(&desc1, 0, sizeof(desc1));
582 - desc1.own = 1;
583 - desc1.c = 1;
584 - desc1.sop = 1;
585 - desc1.eop = 1;
586 - desc1.byte_off = phyaddr & 0x7;
587 - desc1.data_len = skb->len;
588 -
589 - desc1.data_ptr = phyaddr & (~(0x7));
590 - desc1.qid = qid;
591 -
592 - dst = (u32 *)desc;
593 - src = (u32 *)&desc1;
594 - for (i = 0; i < DW_SZ(desc1); i++)
595 - dst[i] = cpu_to_be32(src[i]);
596 -
597 - pr_info("txin idx: %d\n", txin->idx);
598 - pr_info("descriptor dst val:(DW0-DW3): 0x%x, 0x%x, 0x%x, 0x%x\n",
599 - dst[0], dst[1], dst[2], dst[3]);
600 - pr_info("descriptor src val: (DW0-DW3): 0x%x, 0x%x, 0x%x, 0x%x\n",
601 - src[0], src[1], src[2], src[3]);
602 -
603 - if (ring_mmap(desc, sizeof(*desc), DMA_TO_DEVICE, NULL) < 0) {
604 - tc_err(priv, MSG_TX, "TXIN descriptor mmap failed: 0x%x\n",
605 - (unsigned int)desc);
606 + // this should never happen, the buffers are already aligned by kmalloc
607 + if (WARN_ON((txlist_item->phyaddr & 0x7) != 0))
608 goto err1;
609 +
610 + if (g_plat_priv->netdev) {
611 + netdev_sent_queue(g_plat_priv->netdev, skb->len);
612 }
613 + txlist_item->len = skb->len;
614 +
615 + memset(desc, 0, sizeof(*desc));
616
617 - ring_idx_inc(txin, idx);
618 + desc->data_ptr = txlist_item->phyaddr;
619 + desc->byte_off = 0;
620 + desc->data_len = skb->len;
621 + desc->qid = qid;
622 +
623 + desc->sop = 1;
624 + desc->eop = 1;
625 + desc->c = 0;
626 + desc->own = 1;
627 +
628 + dev_consume_skb_any(skb);
629 +
630 + ring_idx_inc(txin);
631
632 /* update TXIN UMT by 1 */
633 writel(1, txin->umt_dst);
634 - pr_info("TXIN send txin packet 1 packet\n");
635
636 - /* Free skb */
637 - dev_kfree_skb_any(skb);
638 + if (!g_plat_priv->netdev) {
639 + spin_unlock_irqrestore(&tx_spinlock, flags);
640 + }
641
642 - /* check txout for testing*/
643 - //txout_action(plat_to_tcpriv(), &g_plat_priv->soc_rings.txout);
644 - return;
645 + return 0;
646
647 err1:
648 - //skb->delay_free = 0;
649 dev_kfree_skb_any(skb);
650 +
651 +err2:
652 + if (!g_plat_priv->netdev) {
653 + spin_unlock_irqrestore(&tx_spinlock, flags);
654 + }
655 +
656 + return -1;
657 }
658
659 static void txout_action(struct tc_priv *priv, struct aca_ring *txout)
660 {
661 - int i, cnt;
662 - struct dma_desc *desc;
663 - u32 ptr;
664 - void *mem;
665 -
666 - ring_cnt_update(txout);
667 - cnt = ring_dist(txout->idx, txout->cnt, txout->dnum);
668 + struct aca_ring *txin = &g_plat_priv->soc_rings.txin;
669 + struct tx_list *txlist = &g_plat_priv->soc_rings.txlist;
670 + struct tx_list_item *txlist_item;
671 + int i, cnt, bytes;
672 + u32 *desc;
673 + unsigned long flags;
674 +
675 + cnt = 0;
676 + bytes = 0;
677 +
678 + if (g_plat_priv->netdev) {
679 + netif_tx_lock(g_plat_priv->netdev);
680 + } else {
681 + spin_lock_irqsave(&tx_spinlock, flags);
682 + }
683
684 - for (i = 0; i < cnt; i++) {
685 + for (i = 0; i < txout->dnum; i++) {
686 desc = txout->dbase_mem;
687 desc += txout->idx;
688 - /* read from memory */
689 - if (ring_mmap(desc, sizeof(*desc), DMA_FROM_DEVICE, NULL) < 0) {
690 - tc_err(priv, MSG_TX,
691 - "map TXOUT DMA descriptor failed\n");
692 - continue;
693 +
694 + // *desc seems to be a pointer to a QoSQ buffer or the data_ptr of some previously sent packet
695 + if (*desc == 0) {
696 + break;
697 }
698 - ptr = desc->data_ptr + desc->byte_off;
699 - mem = (void * __force)__va(ptr);
700 - kfree(mem);
701 - ring_idx_inc(txout, idx);
702 - }
703
704 - if (cnt)
705 - writel(cnt, txout->umt_dst);
706 - pr_info("TXOUT received %d descriptors\n", cnt);
707 -}
708 + if (txout->idx == txin->idx) {
709 + tc_err(priv, MSG_TX, "TXOUT unexpected non-zero descriptor: txin: %d, txout: %d\n",
710 + txin->idx, txout->idx);
711 + break;
712 + }
713
714 -static void rxin_action(struct tc_priv *priv,
715 - struct aca_ring *rxin, int size, int cnt)
716 -{
717 - int i, dist;
718 - struct dma_desc *desc;
719 - void *data_ptr;
720 - u32 phyaddr;
721 -
722 - if (ring_full(rxin)) {
723 - tc_dbg(priv, MSG_RX,
724 - "RXIN Ring Full!: idx: %d, cnt: %d\n",
725 - rxin->idx, rxin->cnt);
726 - return;
727 - }
728 + txlist_item = &txlist->data[txout->idx];
729
730 - dist = ring_dist(rxin->idx, rxin->cnt, rxin->dnum);
731 - if (cnt > dist) {
732 - WARN_ONCE(1, "RXIN NO enough room for free buffers: free: %d, room: %d\n",
733 - cnt, dist);
734 - cnt = dist;
735 + cnt++;
736 + bytes += txlist_item->len;
737 +
738 + /*
739 + * Reuse the returned buffer. The previous buffer should still be referenced by another descriptor.
740 + * When the driver is unloaded, all buffers in the txlist as well as those referenced by the
741 + * descriptors managed in ptm_tc.c or atm_tc.c will be freed.
742 + */
743 + txlist_item->buf = plat_mem_virt(*desc);
744 + txlist_item->phyaddr = *desc;
745 +
746 + *desc = 0;
747 +
748 + ring_idx_inc(txout);
749 }
750
751 - for (i = 0; i < cnt; i++) {
752 - data_ptr = kmalloc(size, GFP_ATOMIC);
753 - if (!data_ptr) {
754 - tc_err(priv, MSG_RX,
755 - "RXIN kmalloc data buffer failed: %d\n", size);
756 - goto err1;
757 - }
758 + if (cnt) {
759 + writel(cnt, txout->umt_dst+0x28); // TXOUT_HD_ACCUM_SUB instead of TXOUT_HD_ACCUM_ADD
760
761 - if (ring_mmap(data_ptr, size, DMA_FROM_DEVICE, &phyaddr) < 0) {
762 - tc_err(priv, MSG_RX,
763 - "RXIN kmalloc data buffer failed: %d\n", size);
764 - goto err2;
765 + if (g_plat_priv->netdev) {
766 + netdev_completed_queue(g_plat_priv->netdev, cnt, bytes);
767 }
768 + }
769
770 - desc = (struct dma_desc *)rxin->dbase_mem;
771 - desc += rxin->idx;
772 - memset(desc, 0, sizeof(*desc));
773 -
774 - desc->data_len = size;
775 - desc->byte_off = phyaddr & 0x7;
776 - desc->eop = 1;
777 - desc->sop = 1;
778 - desc->own = 1;
779 -
780 - desc->data_ptr = phyaddr;
781 -
782 -
783 - if (ring_mmap(desc, sizeof(*desc), DMA_TO_DEVICE, NULL) < 0) {
784 - tc_err(priv, MSG_RX, "RXIN descriptor mmap failed: 0x%x\n",
785 - (unsigned int)desc);
786 - goto err2;
787 - }
788 -
789 - ring_idx_inc(rxin, idx);
790 + if (g_plat_priv->netdev) {
791 + netif_tx_unlock(g_plat_priv->netdev);
792 + } else {
793 + spin_unlock_irqrestore(&tx_spinlock, flags);
794 }
795
796 - /* update RXIN UMT*/
797 - writel(i, rxin->umt_dst);
798 - pr_info("rxin refill %d descriptors\n", i);
799 - return;
800 + if (cnt && g_plat_priv->netdev && netif_queue_stopped(g_plat_priv->netdev)) {
801 + netif_wake_queue(g_plat_priv->netdev);
802 + }
803 +}
804
805 -err2:
806 - kfree(data_ptr);
807 -err1:
808 - if (i)
809 - writel(i, rxin->umt_dst);
810 - return;
811 +static void rxin_action(struct tc_priv *priv,
812 + struct aca_ring *rxin, int size, int cnt)
813 +{
814 + /* update RXIN UMT*/
815 + writel(cnt, rxin->umt_dst);
816 }
817
818 static int rxout_action(struct tc_priv *priv, struct aca_ring *rxout)
819 {
820 + struct device *pdev = priv->ep_dev[0].dev;
821 int i, cnt;
822 - struct dma_desc *desc;
823 - u32 ptr;
824 - void *mem;
825 + struct dma_rx_desc *desc;
826 + dma_addr_t phyaddr;
827 + void *ptr, *dst;
828 + size_t len;
829 struct sk_buff *skb;
830
831 - ring_cnt_update(rxout);
832 - cnt = ring_dist(rxout->idx, rxout->cnt, rxout->dnum);
833 -
834 - for (i = 0; i < cnt; i++) {
835 + cnt = 0;
836 + for (i = 0; i < rxout->dnum; i++) {
837 desc = rxout->dbase_mem;
838 desc += rxout->idx;
839
840 - /* read from memory */
841 - if (ring_mmap(desc, sizeof(*desc), DMA_FROM_DEVICE, NULL) < 0) {
842 - tc_err(priv, MSG_RX,
843 - "map RXOUT DMA descriptor failed\n");
844 - continue;
845 + if (!desc->own) {
846 + break;
847 }
848 - ptr = desc->data_ptr + desc->byte_off;
849 - mem = __va(ptr);
850 - skb = build_skb(mem, 0);
851 - if (!skb) {
852 - tc_err(priv, MSG_RX,
853 - "RXOUT build skb failed\n");
854 - kfree(mem);
855 - continue;
856 +
857 + // this seems to be a pointer to a DS PKT buffer
858 + phyaddr = desc->data_ptr + desc->byte_off;
859 + ptr = plat_mem_virt(phyaddr);
860 +
861 + len = desc->data_len;
862 +
863 + dma_sync_single_range_for_cpu(pdev, phyaddr, 0, len, DMA_FROM_DEVICE);
864 +
865 + skb = netdev_alloc_skb(g_plat_priv->netdev, len);
866 + if (unlikely(!skb)) {
867 + tc_err(priv, MSG_RX, "RXOUT SKB allocation failed\n");
868 + break;
869 }
870 - priv->tc_ops.recv(NULL, skb);
871 - ring_idx_inc(rxout, idx);
872 +
873 + dst = skb_put(skb, len);
874 + memcpy(dst, ptr, len);
875 +
876 + priv->tc_ops.recv(g_plat_priv->netdev, skb);
877 +
878 + desc->own = 0;
879 +
880 + cnt++;
881 + ring_idx_inc(rxout);
882 }
883
884 if (!cnt)
885 - tc_err(priv, MSG_RX, "RXOUT dummy interrupt: dbase: 0x%x, idx: %d, cnt: %d\n",
886 - (unsigned int)rxout->dbase_mem, rxout->idx, rxout->cnt);
887 + tc_err(priv, MSG_RX, "RXOUT spurious interrupt\n");
888 else
889 - writel(cnt, rxout->umt_dst);
890 + writel(cnt, rxout->umt_dst+0x28); // RXOUT_HD_ACCUM_SUB instead of RXOUT_HD_ACCUM_ADD
891
892 - pr_info("txout received %d packets\n", cnt);
893 return cnt;
894 }
895
896 @@ -669,7 +622,6 @@ static void plat_rxout_tasklet(unsigned
897 struct aca_ring *rxin = &priv->soc_rings.rxin;
898 struct dc_ep_dev *ep_dev = &tcpriv->ep_dev[rxout->ep_dev_idx];
899 int cnt;
900 -
901
902 cnt = rxout_action(tcpriv, rxout);
903 if (cnt)
904 @@ -687,68 +639,144 @@ static int plat_send(struct net_device *
905 {
906 struct plat_priv *priv = g_plat_priv;
907 struct aca_ring *txin = &priv->soc_rings.txin;
908 + int res;
909
910 - txin_action(priv->tc_priv, txin, skb, qid, type);
911 + res = txin_action(priv->tc_priv, txin, skb, qid, type);
912
913 - return 0;
914 + return res;
915 +}
916 +
917 +static void plat_mem_init(void)
918 +{
919 + struct plat_priv *priv = g_plat_priv;
920 +
921 + hash_init(priv->mem_map);
922 }
923
924 /* return virtual address */
925 -static void *plat_mem_alloc(size_t size, enum tc_dir dir)
926 +static void *plat_mem_alloc(size_t size, enum tc_dir dir, u32 *phyaddr)
927 {
928 - return kmalloc(size, GFP_KERNEL);
929 + struct plat_priv *priv = g_plat_priv;
930 + struct tc_priv *tcpriv = priv->tc_priv;
931 + struct device *pdev = tcpriv->ep_dev[0].dev;
932 + enum dma_data_direction dma_dir;
933 + struct mem_map_entry *entry;
934 +
935 + entry = kzalloc(sizeof(struct mem_map_entry), GFP_KERNEL);
936 + if (!entry)
937 + goto err;
938 +
939 + entry->size = size;
940 +
941 + entry->mem = kmalloc(size, GFP_KERNEL);
942 + if (!entry->mem)
943 + goto err_alloc;
944 +
945 + dma_dir = (dir == DS_DIR) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
946 +
947 + entry->phyaddr = dma_map_single(pdev, entry->mem, entry->size, dma_dir);
948 + if (unlikely(dma_mapping_error(pdev, entry->phyaddr))) {
949 + tc_err(priv, MSG_INIT,
950 + "plat_mem_alloc: DMA mapping error: buf: 0x%x, size: %d, dir: %d\n",
951 + (u32)entry->mem, size, dir);
952 +
953 + goto err_map;
954 + }
955 +
956 + hash_add(g_plat_priv->mem_map, &entry->node, entry->phyaddr);
957 +
958 + *phyaddr = entry->phyaddr;
959 + return entry->mem;
960 +
961 +err_map:
962 + kfree(entry->mem);
963 +
964 +err_alloc:
965 + kfree(entry);
966 +
967 +err:
968 + return NULL;
969 }
970
971 -static void plat_mem_free(u32 phy_addr, enum tc_dir dir)
972 +static void *plat_mem_virt(u32 phyaddr)
973 {
974 - void *mem;
975 + struct mem_map_entry *entry;
976 +
977 + hash_for_each_possible(g_plat_priv->mem_map, entry, node, phyaddr)
978 + if (entry->phyaddr == phyaddr)
979 + return entry->mem;
980 +
981 + WARN_ON(1);
982 + return NULL;
983 +}
984 +
985 +static struct mem_map_entry *plat_mem_entry(u32 phyaddr)
986 +{
987 + struct mem_map_entry *entry;
988 +
989 + hash_for_each_possible(g_plat_priv->mem_map, entry, node, phyaddr)
990 + if (entry->phyaddr == phyaddr)
991 + return entry;
992
993 - mem = (void * __force)__va(phy_addr);
994 - kfree(mem);
995 + return NULL;
996 }
997
998 -static void aca_soc_ring_init(struct tc_priv *priv,
999 - struct aca_ring *ring, u32 dnum, u32 dsize)
1000 +static void plat_mem_free(u32 phyaddr, enum tc_dir dir)
1001 {
1002 + struct tc_priv *priv = g_plat_priv->tc_priv;
1003 + struct device *pdev = priv->ep_dev[0].dev;
1004 + enum dma_data_direction dma_dir;
1005 + struct mem_map_entry *entry;
1006 +
1007 + entry = plat_mem_entry(phyaddr);
1008 + if (WARN_ON(!entry))
1009 + return;
1010 +
1011 + hash_del(&entry->node);
1012 +
1013 + dma_dir = (dir == DS_DIR) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1014 + dma_unmap_single(pdev, entry->phyaddr, entry->size, dma_dir);
1015 +
1016 + kfree(entry->mem);
1017 +
1018 + kfree(entry);
1019 +}
1020 +
1021 +static int ring_init(struct tc_priv *priv, struct aca_ring *ring, u32 dnum, u32 dsize)
1022 +{
1023 + struct device *pdev = priv->ep_dev[0].dev;
1024 int size;
1025 - struct device *pdev;
1026
1027 memset(ring, 0, sizeof(*ring));
1028 ring->dnum = dnum;
1029 + ring->dsize = dsize;
1030 +
1031 + if (ring->dnum == 0) {
1032 + return 0;
1033 + }
1034 +
1035 size = dsize * dnum;
1036 - pdev = priv->ep_dev[0].dev;
1037
1038 - ring->dbase_mem = kmalloc(size, GFP_KERNEL);
1039 + ring->dbase_mem = dma_alloc_coherent(pdev, size, &(ring->dbase_phymem), GFP_KERNEL);
1040 if (!ring->dbase_mem) {
1041 - tc_err(priv, MSG_INIT, "Allocate SoC Ring fail: %d\n", dnum);
1042 - return;
1043 + tc_err(priv, MSG_INIT, "Ring allocation failed: %d\n", dnum);
1044 + return -1;
1045 }
1046
1047 - ring_mmap(ring->dbase_mem, size, DMA_FROM_DEVICE, &(ring->dbase_phymem));
1048 - tc_dbg(priv, MSG_INIT, "ring: membase: 0x%x, phybase: 0x%x, dnum: %d\n",
1049 - (u32)ring->dbase_mem, ring->dbase_phymem, ring->dnum);
1050 -
1051 - size = sizeof(u32);
1052 - ring->cnt_addr = kzalloc(size, GFP_KERNEL);
1053 - if (!ring->cnt_addr) {
1054 - tc_err(priv, MSG_INIT, "Allocate cumulative counter fail!\n");
1055 - return;
1056 - }
1057 + return 0;
1058 +}
1059
1060 - ring_mmap(ring->cnt_addr, size, DMA_TO_DEVICE, &(ring->cnt_phyaddr));
1061 - tc_dbg(priv, MSG_INIT, "ring: cumulative cnt addr: 0x%x, phy address: 0x%x\n",
1062 - (u32)ring->cnt_addr, ring->cnt_phyaddr);
1063 +#define ring_dnum(tcpriv, name1, name2) ((!tcpriv->param.name1##_dnum) ? name2##_DNUM : tcpriv->param.name1##_dnum)
1064
1065 - return;
1066 -}
1067 +static void ring_free(struct tc_priv *priv, struct aca_ring *ring)
1068 +{
1069 + struct device *pdev = priv->ep_dev[0].dev;
1070
1071 -#define ring_init(tcpriv, ring, name1, name2, num, size) \
1072 -{ \
1073 - if (!tcpriv->param.name1##_dnum) \
1074 - num = name2##_DNUM; \
1075 - else \
1076 - num = tcpriv->param.name1##_dnum; \
1077 - aca_soc_ring_init(tcpriv, ring, num, size); \
1078 + if (ring->dnum == 0) {
1079 + return;
1080 + }
1081 +
1082 + dma_free_coherent(pdev, ring->dsize * ring->dnum, ring->dbase_mem, ring->dbase_phymem);
1083 }
1084
1085 static irqreturn_t aca_rx_irq_handler(int irq, void *dev_id)
1086 @@ -777,39 +805,55 @@ static irqreturn_t aca_tx_irq_handler(in
1087 return IRQ_HANDLED;
1088 }
1089
1090 -static void irq_init(struct tc_priv *priv, const char *dev_name)
1091 +static void plat_irq_init(struct tc_priv *priv, const char *dev_name)
1092 {
1093 int ret;
1094 int i;
1095 - char name[IFNAMSIZ];
1096 + //char name[IFNAMSIZ];
1097
1098 for (i = 0; i < EP_MAX_NUM && i < priv->ep_num; i++) {
1099 - sprintf(name, "%s%d", dev_name, i);
1100 + //snprintf(name, sizeof(name), "aca-rxo%d", i);
1101
1102 ret = devm_request_irq(priv->ep_dev[i].dev, priv->ep_dev[i].aca_rx_irq,
1103 - aca_rx_irq_handler, 0, name, &priv->ep_dev[i]);
1104 + aca_rx_irq_handler, 0, "aca-rxo", &priv->ep_dev[i]);
1105
1106 if (ret) {
1107 tc_err(priv, MSG_INIT,
1108 "ACA RX IRQ request Fail!: irq: %d, ep_id: %d\n",
1109 priv->ep_dev[i].aca_rx_irq, i);
1110 //return;
1111 - }
1112 + }
1113 +
1114 + //snprintf(name, sizeof(name), "aca-txo%d", i);
1115
1116 ret = devm_request_irq(priv->ep_dev[i].dev, priv->ep_dev[i].aca_tx_irq,
1117 - aca_tx_irq_handler, 0, name, &priv->ep_dev[i]);
1118 + aca_tx_irq_handler, 0, "aca-txo", &priv->ep_dev[i]);
1119
1120 if (ret) {
1121 tc_err(priv, MSG_INIT,
1122 "ACA TX IRQ request Fail!: irq: %d, ep_id: %d\n",
1123 priv->ep_dev[i].aca_tx_irq, i);
1124 //return;
1125 - }
1126 + }
1127 }
1128
1129 return;
1130 }
1131
1132 +static void plat_irq_free(struct tc_priv *priv)
1133 +{
1134 + int i;
1135 +
1136 + for (i = 0; i < EP_MAX_NUM && i < priv->ep_num; i++) {
1137 +
1138 + /* Unregister RX irq handler */
1139 + devm_free_irq(priv->ep_dev[i].dev, priv->ep_dev[i].aca_rx_irq, &priv->ep_dev[i]);
1140 +
1141 + /* Unregister TX irq handler */
1142 + devm_free_irq(priv->ep_dev[i].dev, priv->ep_dev[i].aca_tx_irq, &priv->ep_dev[i]);
1143 + }
1144 +}
1145 +
1146 /**
1147 * Decide txin/rxout queue size
1148 * Create a tx/rx queue
1149 @@ -819,29 +863,68 @@ static int plat_dp_init(struct plat_priv
1150 struct tc_priv *tcpriv;
1151 struct aca_ring_grp *soc_rings;
1152 struct aca_ring *ring;
1153 - int size;
1154 u32 dnum;
1155 + int i;
1156 + int ret = 0;
1157
1158 tcpriv = priv->tc_priv;
1159
1160 - size = sizeof(struct dma_desc);
1161 + plat_mem_init();
1162 +
1163 soc_rings = &priv->soc_rings;
1164
1165 /* txin ring */
1166 ring = &soc_rings->txin;
1167 - ring_init(tcpriv, ring, txin, TXIN, dnum, size);
1168 + dnum = ring_dnum(tcpriv, txin, TXIN);
1169 + ret = txlist_init(&soc_rings->txlist, dnum);
1170 + if (ret < 0)
1171 + goto err5;
1172 + ret = ring_init(tcpriv, ring, dnum, sizeof(struct dma_tx_desc));
1173 + if (ret < 0)
1174 + goto err4;
1175
1176 /* txout ring */
1177 ring = &soc_rings->txout;
1178 - ring_init(tcpriv, ring, txout, TXOUT, dnum, size);
1179 + dnum = ring_dnum(tcpriv, txout, TXOUT);
1180 + ret = ring_init(tcpriv, ring, dnum, sizeof(u32));
1181 + if (ret < 0)
1182 + goto err3;
1183 +
1184 /* rxin ring */
1185 ring = &soc_rings->rxin;
1186 - ring_init(tcpriv, ring, rxin, RXIN, dnum, size);
1187 + dnum = ring_dnum(tcpriv, rxin, RXIN);
1188 + ret |= ring_init(tcpriv, ring, dnum, sizeof(struct dma_rx_desc));
1189 + if (ret < 0)
1190 + goto err2;
1191 +
1192 /* rxout ring */
1193 ring = &soc_rings->rxout;
1194 - ring_init(tcpriv, ring, rxout, RXOUT, dnum, size);
1195 + dnum = ring_dnum(tcpriv, rxout, RXOUT);
1196 + ret = ring_init(tcpriv, ring, dnum, sizeof(struct dma_rx_desc));
1197 + if (ret < 0)
1198 + goto err1;
1199 +
1200 + for (i = 0; i < EP_MAX_NUM && i < tcpriv->ep_num; i++) {
1201 +
1202 + /* Enable RX interrupt */
1203 + tcpriv->ep_dev[i].hw_ops->icu_en(&tcpriv->ep_dev[i], ACA_HOSTIF_RX);
1204 +
1205 + /* Enable TX interrupt */
1206 + tcpriv->ep_dev[i].hw_ops->icu_en(&tcpriv->ep_dev[i], ACA_HOSTIF_TX);
1207 + }
1208
1209 return 0;
1210 +
1211 +err1:
1212 + ring_free(tcpriv, &soc_rings->rxin);
1213 +err2:
1214 + ring_free(tcpriv, &soc_rings->txout);
1215 +err3:
1216 + ring_free(tcpriv, &soc_rings->txin);
1217 +err4:
1218 + txlist_free(&soc_rings->txlist);
1219 +err5:
1220 + return ret;
1221 }
1222
1223 /**
1224 @@ -850,6 +933,26 @@ static int plat_dp_init(struct plat_priv
1225 */
1226 static void plat_dp_exit(struct plat_priv *priv)
1227 {
1228 + struct tc_priv *tcpriv = priv->tc_priv;
1229 + struct aca_ring_grp *soc_rings = &priv->soc_rings;
1230 + int i;
1231 +
1232 + for (i = 0; i < EP_MAX_NUM && i < tcpriv->ep_num; i++) {
1233 +
1234 + /* Disable RX interrupt */
1235 + tcpriv->ep_dev[i].hw_ops->icu_mask(&tcpriv->ep_dev[i], ACA_HOSTIF_RX);
1236 +
1237 + /* Disable TX interrupt */
1238 + tcpriv->ep_dev[i].hw_ops->icu_mask(&tcpriv->ep_dev[i], ACA_HOSTIF_TX);
1239 + }
1240 +
1241 + ring_free(tcpriv, &soc_rings->txin);
1242 + ring_free(tcpriv, &soc_rings->txout);
1243 + ring_free(tcpriv, &soc_rings->rxin);
1244 + ring_free(tcpriv, &soc_rings->rxout);
1245 +
1246 + txlist_free(&soc_rings->txlist);
1247 +
1248 return;
1249 }
1250
1251 @@ -858,45 +961,45 @@ static int plat_soc_cfg_get(struct soc_c
1252 struct plat_priv *priv = g_plat_priv;
1253
1254 /* TXIN */
1255 - cfg->txin_dbase = priv->soc_rings.txin.dbase_phymem;
1256 - cfg->txin_dnum = priv->soc_rings.txin.dnum;
1257 - cfg->txin_desc_dwsz = DW_SZ(struct dma_desc);
1258 - cfg->txin_cnt_phyaddr = priv->soc_rings.txin.cnt_phyaddr;
1259 + cfg->txin.soc_phydbase = priv->soc_rings.txin.dbase_phymem;
1260 + cfg->txin.soc_dnum = priv->soc_rings.txin.dnum;
1261 + cfg->txin.soc_desc_dwsz = DW_SZ(struct dma_tx_desc);
1262 /* TXOUT */
1263 - cfg->txout_dbase = priv->soc_rings.txout.dbase_phymem;
1264 - cfg->txout_dnum = priv->soc_rings.txout.dnum;
1265 - cfg->txout_desc_dwsz = DW_SZ(struct dma_desc);
1266 - cfg->txout_cnt_phyaddr = priv->soc_rings.txout.cnt_phyaddr;
1267 + cfg->txout.soc_phydbase = priv->soc_rings.txout.dbase_phymem;
1268 + cfg->txout.soc_dnum = priv->soc_rings.txout.dnum;
1269 + cfg->txout.soc_desc_dwsz = DW_SZ(u32);
1270 /* RXOUT */
1271 - cfg->rxout_dbase = priv->soc_rings.rxout.dbase_phymem;
1272 - cfg->rxout_dnum = priv->soc_rings.rxout.dnum;
1273 - cfg->rxout_desc_dwsz = DW_SZ(struct dma_desc);
1274 - cfg->rxout_cnt_phyaddr = priv->soc_rings.rxout.cnt_phyaddr;
1275 + cfg->rxout.soc_phydbase = priv->soc_rings.rxout.dbase_phymem;
1276 + cfg->rxout.soc_dnum = priv->soc_rings.rxout.dnum;
1277 + cfg->rxout.soc_desc_dwsz = DW_SZ(struct dma_rx_desc);
1278 /* RXIN */
1279 - cfg->rxin_dbase = priv->soc_rings.rxin.dbase_phymem;
1280 - cfg->rxin_dnum = priv->soc_rings.rxin.dnum;
1281 - cfg->rxin_desc_dwsz = DW_SZ(struct dma_desc);
1282 - cfg->rxin_cnt_phyaddr = priv->soc_rings.rxin.cnt_phyaddr;
1283 + cfg->rxin.soc_phydbase = priv->soc_rings.rxin.dbase_phymem;
1284 + cfg->rxin.soc_dnum = priv->soc_rings.rxin.dnum;
1285 + cfg->rxin.soc_desc_dwsz = DW_SZ(struct dma_rx_desc);
1286
1287 tc_info(priv->tc_priv, MSG_INIT,
1288 "id: %d, txin(0x%x: %d, 0x%x), txout(0x%x: %d, 0x%x), rxin(0x%x: %d, 0x%x), rxout(0x%x: %d, 0x%x)\n",
1289 - id, cfg->txin_dbase, cfg->txin_dnum, cfg->txin_cnt_phyaddr,
1290 - cfg->txout_dbase, cfg->txout_dnum, cfg->txout_cnt_phyaddr,
1291 - cfg->rxin_dbase, cfg->rxout_dnum, cfg->rxin_cnt_phyaddr,
1292 - cfg->rxout_dbase, cfg->rxout_dnum, cfg->rxout_cnt_phyaddr);
1293 + id, cfg->txin.soc_phydbase, cfg->txin.soc_dnum, cfg->txin.soc_cnt_phyaddr,
1294 + cfg->txout.soc_phydbase, cfg->txout.soc_dnum, cfg->txout.soc_cnt_phyaddr,
1295 + cfg->rxin.soc_phydbase, cfg->rxin.soc_dnum, cfg->rxin.soc_cnt_phyaddr,
1296 + cfg->rxout.soc_phydbase, cfg->rxout.soc_dnum, cfg->rxout.soc_cnt_phyaddr);
1297
1298 return 0;
1299 }
1300
1301 -static int plat_open(struct net_device *pdev, char *dev_name,
1302 - int *subif, int flag)
1303 +static int plat_open(struct net_device *pdev, const char *dev_name,
1304 + int id, int flag)
1305 {
1306 + g_plat_priv->netdev = pdev;
1307 +
1308 return 0;
1309 }
1310
1311 -static void plat_close(struct net_device *pdev, char *dev_name,
1312 - int subif, int flag)
1313 +static void plat_close(struct net_device *pdev, const char *dev_name,
1314 + int flag)
1315 {
1316 + g_plat_priv->netdev = NULL;
1317 +
1318 return;
1319 }
1320
1321 @@ -971,7 +1074,6 @@ static void plat_disable_us(int en)
1322 static int plat_get_mib(struct net_device *pdev,
1323 struct rtnl_link_stats64 *stat)
1324 {
1325 - pr_info("%s is not supported\n", __func__);
1326 return -ENOTSUPP;
1327 }
1328
1329 @@ -1181,8 +1283,8 @@ int platform_init(struct tc_priv *tc_pri
1330 INIT_WORK(&priv->req_work.work, plat_tc_req_workqueue);
1331 tasklet_init(&txout_task, plat_txout_tasklet, 0);
1332 tasklet_init(&rxout_task, plat_rxout_tasklet, 0);
1333 - irq_init(tc_priv, drv_name);
1334 - //tasklet_init(&priv->coc.coc_task, plat_coc_tasklet, 0);
1335 + plat_irq_init(tc_priv, drv_name);
1336 +
1337 plat_tc_ops_setup(tc_priv);
1338 plat_dsl_ops_setup();
1339
1340 @@ -1201,8 +1303,15 @@ void platform_dsl_exit(void)
1341
1342 void platform_exit(void)
1343 {
1344 - //tasklet_kill(&g_plat_priv->coc.coc_task);
1345 + struct tc_priv *tcpriv = plat_to_tcpriv();
1346 +
1347 + tasklet_kill(&txout_task);
1348 + tasklet_kill(&rxout_task);
1349 +
1350 + plat_irq_free(tcpriv);
1351 +
1352 plat_dp_exit(g_plat_priv);
1353 +
1354 g_plat_priv = NULL;
1355 }
1356