ipq40xx: add v5.4 support
[openwrt/staging/rmilecki.git] / target / linux / ipq40xx / files-5.4 / drivers / net / ethernet / qualcomm / essedma / edma.c
1 /*
2 * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14 */
15
16 #include <linux/platform_device.h>
17 #include <linux/if_vlan.h>
18 #include "ess_edma.h"
19 #include "edma.h"
20
21 extern struct net_device *edma_netdev[EDMA_MAX_PORTID_SUPPORTED];
22 bool edma_stp_rstp;
23 u16 edma_ath_eth_type;
24
25 /* edma_skb_priority_offset()
26 * get edma skb priority
27 */
28 static unsigned int edma_skb_priority_offset(struct sk_buff *skb)
29 {
30 return (skb->priority >> 2) & 1;
31 }
32
33 /* edma_alloc_tx_ring()
34 * Allocate Tx descriptors ring
35 */
36 static int edma_alloc_tx_ring(struct edma_common_info *edma_cinfo,
37 struct edma_tx_desc_ring *etdr)
38 {
39 struct platform_device *pdev = edma_cinfo->pdev;
40
41 /* Initialize ring */
42 etdr->size = sizeof(struct edma_sw_desc) * etdr->count;
43 etdr->sw_next_to_fill = 0;
44 etdr->sw_next_to_clean = 0;
45
46 /* Allocate SW descriptors */
47 etdr->sw_desc = vzalloc(etdr->size);
48 if (!etdr->sw_desc) {
49 dev_err(&pdev->dev, "buffer alloc of tx ring failed=%p", etdr);
50 return -ENOMEM;
51 }
52
53 /* Allocate HW descriptors */
54 etdr->hw_desc = dma_alloc_coherent(&pdev->dev, etdr->size, &etdr->dma,
55 GFP_KERNEL);
56 if (!etdr->hw_desc) {
57 dev_err(&pdev->dev, "descriptor allocation for tx ring failed");
58 vfree(etdr->sw_desc);
59 return -ENOMEM;
60 }
61
62 return 0;
63 }
64
65 /* edma_free_tx_ring()
66 * Free tx rings allocated by edma_alloc_tx_rings
67 */
68 static void edma_free_tx_ring(struct edma_common_info *edma_cinfo,
69 struct edma_tx_desc_ring *etdr)
70 {
71 struct platform_device *pdev = edma_cinfo->pdev;
72
73 if (likely(etdr->dma))
74 dma_free_coherent(&pdev->dev, etdr->size, etdr->hw_desc,
75 etdr->dma);
76
77 vfree(etdr->sw_desc);
78 etdr->sw_desc = NULL;
79 }
80
81 /* edma_alloc_rx_ring()
82 * allocate rx descriptor ring
83 */
84 static int edma_alloc_rx_ring(struct edma_common_info *edma_cinfo,
85 struct edma_rfd_desc_ring *erxd)
86 {
87 struct platform_device *pdev = edma_cinfo->pdev;
88
89 erxd->size = sizeof(struct edma_sw_desc) * erxd->count;
90 erxd->sw_next_to_fill = 0;
91 erxd->sw_next_to_clean = 0;
92
93 /* Allocate SW descriptors */
94 erxd->sw_desc = vzalloc(erxd->size);
95 if (!erxd->sw_desc)
96 return -ENOMEM;
97
98 /* Alloc HW descriptors */
99 erxd->hw_desc = dma_alloc_coherent(&pdev->dev, erxd->size, &erxd->dma,
100 GFP_KERNEL);
101 if (!erxd->hw_desc) {
102 vfree(erxd->sw_desc);
103 return -ENOMEM;
104 }
105
106 /* Initialize pending_fill */
107 erxd->pending_fill = 0;
108
109 return 0;
110 }
111
112 /* edma_free_rx_ring()
113 * Free rx ring allocated by alloc_rx_ring
114 */
115 static void edma_free_rx_ring(struct edma_common_info *edma_cinfo,
116 struct edma_rfd_desc_ring *rxdr)
117 {
118 struct platform_device *pdev = edma_cinfo->pdev;
119
120 if (likely(rxdr->dma))
121 dma_free_coherent(&pdev->dev, rxdr->size, rxdr->hw_desc,
122 rxdr->dma);
123
124 vfree(rxdr->sw_desc);
125 rxdr->sw_desc = NULL;
126 }
127
128 /* edma_configure_tx()
129 * Configure transmission control data
130 */
131 static void edma_configure_tx(struct edma_common_info *edma_cinfo)
132 {
133 u32 txq_ctrl_data;
134
135 txq_ctrl_data = (EDMA_TPD_BURST << EDMA_TXQ_NUM_TPD_BURST_SHIFT);
136 txq_ctrl_data |= EDMA_TXQ_CTRL_TPD_BURST_EN;
137 txq_ctrl_data |= (EDMA_TXF_BURST << EDMA_TXQ_TXF_BURST_NUM_SHIFT);
138 edma_write_reg(EDMA_REG_TXQ_CTRL, txq_ctrl_data);
139 }
140
141
142 /* edma_configure_rx()
143 * configure reception control data
144 */
145 static void edma_configure_rx(struct edma_common_info *edma_cinfo)
146 {
147 struct edma_hw *hw = &edma_cinfo->hw;
148 u32 rss_type, rx_desc1, rxq_ctrl_data;
149
150 /* Set RSS type */
151 rss_type = hw->rss_type;
152 edma_write_reg(EDMA_REG_RSS_TYPE, rss_type);
153
154 /* Set RFD burst number */
155 rx_desc1 = (EDMA_RFD_BURST << EDMA_RXQ_RFD_BURST_NUM_SHIFT);
156
157 /* Set RFD prefetch threshold */
158 rx_desc1 |= (EDMA_RFD_THR << EDMA_RXQ_RFD_PF_THRESH_SHIFT);
159
160 /* Set RFD in host ring low threshold to generte interrupt */
161 rx_desc1 |= (EDMA_RFD_LTHR << EDMA_RXQ_RFD_LOW_THRESH_SHIFT);
162 edma_write_reg(EDMA_REG_RX_DESC1, rx_desc1);
163
164 /* Set Rx FIFO threshold to start to DMA data to host */
165 rxq_ctrl_data = EDMA_FIFO_THRESH_128_BYTE;
166
167 if (!edma_cinfo->is_single_phy) {
168 /* Set RX remove vlan bit */
169 rxq_ctrl_data |= EDMA_RXQ_CTRL_RMV_VLAN;
170 }
171
172 edma_write_reg(EDMA_REG_RXQ_CTRL, rxq_ctrl_data);
173 }
174
175 /* edma_alloc_rx_buf()
176 * does skb allocation for the received packets.
177 */
178 static int edma_alloc_rx_buf(struct edma_common_info
179 *edma_cinfo,
180 struct edma_rfd_desc_ring *erdr,
181 int cleaned_count, int queue_id)
182 {
183 struct platform_device *pdev = edma_cinfo->pdev;
184 struct edma_rx_free_desc *rx_desc;
185 struct edma_sw_desc *sw_desc;
186 struct sk_buff *skb;
187 unsigned int i;
188 u16 prod_idx, length;
189 u32 reg_data;
190
191 if (cleaned_count > erdr->count)
192 cleaned_count = erdr->count - 1;
193
194 i = erdr->sw_next_to_fill;
195
196 while (cleaned_count) {
197 sw_desc = &erdr->sw_desc[i];
198 length = edma_cinfo->rx_head_buffer_len;
199
200 if (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_REUSE) {
201 skb = sw_desc->skb;
202
203 /* Clear REUSE Flag */
204 sw_desc->flags &= ~EDMA_SW_DESC_FLAG_SKB_REUSE;
205 } else {
206 /* alloc skb */
207 skb = netdev_alloc_skb_ip_align(edma_netdev[0], length);
208 if (!skb) {
209 /* Better luck next round */
210 break;
211 }
212 }
213
214 if (edma_cinfo->page_mode) {
215 struct page *pg = alloc_page(GFP_ATOMIC);
216
217 if (!pg) {
218 dev_kfree_skb_any(skb);
219 break;
220 }
221
222 sw_desc->dma = dma_map_page(&pdev->dev, pg, 0,
223 edma_cinfo->rx_page_buffer_len,
224 DMA_FROM_DEVICE);
225 if (dma_mapping_error(&pdev->dev,
226 sw_desc->dma)) {
227 __free_page(pg);
228 dev_kfree_skb_any(skb);
229 break;
230 }
231
232 skb_fill_page_desc(skb, 0, pg, 0,
233 edma_cinfo->rx_page_buffer_len);
234 sw_desc->flags = EDMA_SW_DESC_FLAG_SKB_FRAG;
235 sw_desc->length = edma_cinfo->rx_page_buffer_len;
236 } else {
237 sw_desc->dma = dma_map_single(&pdev->dev, skb->data,
238 length, DMA_FROM_DEVICE);
239 if (dma_mapping_error(&pdev->dev,
240 sw_desc->dma)) {
241 dev_kfree_skb_any(skb);
242 break;
243 }
244
245 sw_desc->flags = EDMA_SW_DESC_FLAG_SKB_HEAD;
246 sw_desc->length = length;
247 }
248
249 /* Update the buffer info */
250 sw_desc->skb = skb;
251 rx_desc = (&((struct edma_rx_free_desc *)(erdr->hw_desc))[i]);
252 rx_desc->buffer_addr = cpu_to_le64(sw_desc->dma);
253 if (++i == erdr->count)
254 i = 0;
255 cleaned_count--;
256 }
257
258 erdr->sw_next_to_fill = i;
259
260 if (i == 0)
261 prod_idx = erdr->count - 1;
262 else
263 prod_idx = i - 1;
264
265 /* Update the producer index */
266 edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), &reg_data);
267 reg_data &= ~EDMA_RFD_PROD_IDX_BITS;
268 reg_data |= prod_idx;
269 edma_write_reg(EDMA_REG_RFD_IDX_Q(queue_id), reg_data);
270
271 /* If we couldn't allocate all the buffers
272 * we increment the alloc failure counters
273 */
274 if (cleaned_count)
275 edma_cinfo->edma_ethstats.rx_alloc_fail_ctr++;
276
277 return cleaned_count;
278 }
279
280 /* edma_init_desc()
281 * update descriptor ring size, buffer and producer/consumer index
282 */
283 static void edma_init_desc(struct edma_common_info *edma_cinfo)
284 {
285 struct edma_rfd_desc_ring *rfd_ring;
286 struct edma_tx_desc_ring *etdr;
287 int i = 0, j = 0;
288 u32 data = 0;
289 u16 hw_cons_idx = 0;
290
291 /* Set the base address of every TPD ring. */
292 for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
293 etdr = edma_cinfo->tpd_ring[i];
294
295 /* Update descriptor ring base address */
296 edma_write_reg(EDMA_REG_TPD_BASE_ADDR_Q(i), (u32)etdr->dma);
297 edma_read_reg(EDMA_REG_TPD_IDX_Q(i), &data);
298
299 /* Calculate hardware consumer index */
300 hw_cons_idx = (data >> EDMA_TPD_CONS_IDX_SHIFT) & 0xffff;
301 etdr->sw_next_to_fill = hw_cons_idx;
302 etdr->sw_next_to_clean = hw_cons_idx;
303 data &= ~(EDMA_TPD_PROD_IDX_MASK << EDMA_TPD_PROD_IDX_SHIFT);
304 data |= hw_cons_idx;
305
306 /* update producer index */
307 edma_write_reg(EDMA_REG_TPD_IDX_Q(i), data);
308
309 /* update SW consumer index register */
310 edma_write_reg(EDMA_REG_TX_SW_CONS_IDX_Q(i), hw_cons_idx);
311
312 /* Set TPD ring size */
313 edma_write_reg(EDMA_REG_TPD_RING_SIZE,
314 edma_cinfo->tx_ring_count &
315 EDMA_TPD_RING_SIZE_MASK);
316 }
317
318 for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
319 rfd_ring = edma_cinfo->rfd_ring[j];
320 /* Update Receive Free descriptor ring base address */
321 edma_write_reg(EDMA_REG_RFD_BASE_ADDR_Q(j),
322 (u32)(rfd_ring->dma));
323 j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
324 }
325
326 data = edma_cinfo->rx_head_buffer_len;
327 if (edma_cinfo->page_mode)
328 data = edma_cinfo->rx_page_buffer_len;
329
330 data &= EDMA_RX_BUF_SIZE_MASK;
331 data <<= EDMA_RX_BUF_SIZE_SHIFT;
332
333 /* Update RFD ring size and RX buffer size */
334 data |= (edma_cinfo->rx_ring_count & EDMA_RFD_RING_SIZE_MASK)
335 << EDMA_RFD_RING_SIZE_SHIFT;
336
337 edma_write_reg(EDMA_REG_RX_DESC0, data);
338
339 /* Disable TX FIFO low watermark and high watermark */
340 edma_write_reg(EDMA_REG_TXF_WATER_MARK, 0);
341
342 /* Load all of base address above */
343 edma_read_reg(EDMA_REG_TX_SRAM_PART, &data);
344 data |= 1 << EDMA_LOAD_PTR_SHIFT;
345 edma_write_reg(EDMA_REG_TX_SRAM_PART, data);
346 }
347
348 /* edma_receive_checksum
349 * Api to check checksum on receive packets
350 */
351 static void edma_receive_checksum(struct edma_rx_return_desc *rd,
352 struct sk_buff *skb)
353 {
354 skb_checksum_none_assert(skb);
355
356 /* check the RRD IP/L4 checksum bit to see if
357 * its set, which in turn indicates checksum
358 * failure.
359 */
360 if (rd->rrd6 & EDMA_RRD_CSUM_FAIL_MASK)
361 return;
362
363 skb->ip_summed = CHECKSUM_UNNECESSARY;
364 }
365
366 /* edma_clean_rfd()
367 * clean up rx resourcers on error
368 */
369 static void edma_clean_rfd(struct edma_rfd_desc_ring *erdr, u16 index)
370 {
371 struct edma_rx_free_desc *rx_desc;
372 struct edma_sw_desc *sw_desc;
373
374 rx_desc = (&((struct edma_rx_free_desc *)(erdr->hw_desc))[index]);
375 sw_desc = &erdr->sw_desc[index];
376 if (sw_desc->skb) {
377 dev_kfree_skb_any(sw_desc->skb);
378 sw_desc->skb = NULL;
379 }
380
381 memset(rx_desc, 0, sizeof(struct edma_rx_free_desc));
382 }
383
384 /* edma_rx_complete_fraglist()
385 * Complete Rx processing for fraglist skbs
386 */
387 static void edma_rx_complete_stp_rstp(struct sk_buff *skb, int port_id, struct edma_rx_return_desc *rd)
388 {
389 int i;
390 u32 priority;
391 u16 port_type;
392 u8 mac_addr[EDMA_ETH_HDR_LEN];
393
394 port_type = (rd->rrd1 >> EDMA_RRD_PORT_TYPE_SHIFT)
395 & EDMA_RRD_PORT_TYPE_MASK;
396 /* if port type is 0x4, then only proceed with
397 * other stp/rstp calculation
398 */
399 if (port_type == EDMA_RX_ATH_HDR_RSTP_PORT_TYPE) {
400 u8 bpdu_mac[6] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00};
401
402 /* calculate the frame priority */
403 priority = (rd->rrd1 >> EDMA_RRD_PRIORITY_SHIFT)
404 & EDMA_RRD_PRIORITY_MASK;
405
406 for (i = 0; i < EDMA_ETH_HDR_LEN; i++)
407 mac_addr[i] = skb->data[i];
408
409 /* Check if destination mac addr is bpdu addr */
410 if (!memcmp(mac_addr, bpdu_mac, 6)) {
411 /* destination mac address is BPDU
412 * destination mac address, then add
413 * atheros header to the packet.
414 */
415 u16 athr_hdr = (EDMA_RX_ATH_HDR_VERSION << EDMA_RX_ATH_HDR_VERSION_SHIFT) |
416 (priority << EDMA_RX_ATH_HDR_PRIORITY_SHIFT) |
417 (EDMA_RX_ATH_HDR_RSTP_PORT_TYPE << EDMA_RX_ATH_PORT_TYPE_SHIFT) | port_id;
418 skb_push(skb, 4);
419 memcpy(skb->data, mac_addr, EDMA_ETH_HDR_LEN);
420 *(uint16_t *)&skb->data[12] = htons(edma_ath_eth_type);
421 *(uint16_t *)&skb->data[14] = htons(athr_hdr);
422 }
423 }
424 }
425
426 /*
427 * edma_rx_complete_fraglist()
428 * Complete Rx processing for fraglist skbs
429 */
430 static int edma_rx_complete_fraglist(struct sk_buff *skb, u16 num_rfds, u16 length, u32 sw_next_to_clean,
431 u16 *cleaned_count, struct edma_rfd_desc_ring *erdr, struct edma_common_info *edma_cinfo)
432 {
433 struct platform_device *pdev = edma_cinfo->pdev;
434 struct edma_hw *hw = &edma_cinfo->hw;
435 struct sk_buff *skb_temp;
436 struct edma_sw_desc *sw_desc;
437 int i;
438 u16 size_remaining;
439
440 skb->data_len = 0;
441 skb->tail += (hw->rx_head_buff_size - 16);
442 skb->len = skb->truesize = length;
443 size_remaining = length - (hw->rx_head_buff_size - 16);
444
445 /* clean-up all related sw_descs */
446 for (i = 1; i < num_rfds; i++) {
447 struct sk_buff *skb_prev;
448 sw_desc = &erdr->sw_desc[sw_next_to_clean];
449 skb_temp = sw_desc->skb;
450
451 dma_unmap_single(&pdev->dev, sw_desc->dma,
452 sw_desc->length, DMA_FROM_DEVICE);
453
454 if (size_remaining < hw->rx_head_buff_size)
455 skb_put(skb_temp, size_remaining);
456 else
457 skb_put(skb_temp, hw->rx_head_buff_size);
458
459 /*
460 * If we are processing the first rfd, we link
461 * skb->frag_list to the skb corresponding to the
462 * first RFD
463 */
464 if (i == 1)
465 skb_shinfo(skb)->frag_list = skb_temp;
466 else
467 skb_prev->next = skb_temp;
468 skb_prev = skb_temp;
469 skb_temp->next = NULL;
470
471 skb->data_len += skb_temp->len;
472 size_remaining -= skb_temp->len;
473
474 /* Increment SW index */
475 sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
476 (*cleaned_count)++;
477 }
478
479 return sw_next_to_clean;
480 }
481
482 /* edma_rx_complete_paged()
483 * Complete Rx processing for paged skbs
484 */
485 static int edma_rx_complete_paged(struct sk_buff *skb, u16 num_rfds, u16 length, u32 sw_next_to_clean,
486 u16 *cleaned_count, struct edma_rfd_desc_ring *erdr, struct edma_common_info *edma_cinfo)
487 {
488 struct platform_device *pdev = edma_cinfo->pdev;
489 struct sk_buff *skb_temp;
490 struct edma_sw_desc *sw_desc;
491 int i;
492 u16 size_remaining;
493
494 skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
495
496 /* Setup skbuff fields */
497 skb->len = length;
498
499 if (likely(num_rfds <= 1)) {
500 skb->data_len = length;
501 skb->truesize += edma_cinfo->rx_page_buffer_len;
502 skb_fill_page_desc(skb, 0, skb_frag_page(frag),
503 16, length);
504 } else {
505 skb_frag_size_sub(frag, 16);
506 skb->data_len = skb_frag_size(frag);
507 skb->truesize += edma_cinfo->rx_page_buffer_len;
508 size_remaining = length - skb_frag_size(frag);
509
510 skb_fill_page_desc(skb, 0, skb_frag_page(frag),
511 16, skb_frag_size(frag));
512
513 /* clean-up all related sw_descs */
514 for (i = 1; i < num_rfds; i++) {
515 sw_desc = &erdr->sw_desc[sw_next_to_clean];
516 skb_temp = sw_desc->skb;
517 frag = &skb_shinfo(skb_temp)->frags[0];
518 dma_unmap_page(&pdev->dev, sw_desc->dma,
519 sw_desc->length, DMA_FROM_DEVICE);
520
521 if (size_remaining < edma_cinfo->rx_page_buffer_len)
522 skb_frag_size_set(frag, size_remaining);
523
524 skb_fill_page_desc(skb, i, skb_frag_page(frag),
525 0, skb_frag_size(frag));
526
527 skb_shinfo(skb_temp)->nr_frags = 0;
528 dev_kfree_skb_any(skb_temp);
529
530 skb->data_len += skb_frag_size(frag);
531 skb->truesize += edma_cinfo->rx_page_buffer_len;
532 size_remaining -= skb_frag_size(frag);
533
534 /* Increment SW index */
535 sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
536 (*cleaned_count)++;
537 }
538 }
539
540 return sw_next_to_clean;
541 }
542
543 /*
544 * edma_rx_complete()
545 * Main api called from the poll function to process rx packets.
546 */
547 static u16 edma_rx_complete(struct edma_common_info *edma_cinfo,
548 int *work_done, int work_to_do, int queue_id,
549 struct napi_struct *napi)
550 {
551 struct platform_device *pdev = edma_cinfo->pdev;
552 struct edma_rfd_desc_ring *erdr = edma_cinfo->rfd_ring[queue_id];
553 struct net_device *netdev;
554 struct edma_adapter *adapter;
555 struct edma_sw_desc *sw_desc;
556 struct sk_buff *skb;
557 struct edma_rx_return_desc *rd;
558 u16 hash_type, rrd[8], cleaned_count = 0, length = 0, num_rfds = 1,
559 sw_next_to_clean, hw_next_to_clean = 0, vlan = 0, ret_count = 0;
560 u32 data = 0;
561 u8 *vaddr;
562 int port_id, i, drop_count = 0;
563 u32 priority;
564 u16 count = erdr->count, rfd_avail;
565 u8 queue_to_rxid[8] = {0, 0, 1, 1, 2, 2, 3, 3};
566
567 cleaned_count = erdr->pending_fill;
568 sw_next_to_clean = erdr->sw_next_to_clean;
569
570 edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), &data);
571 hw_next_to_clean = (data >> EDMA_RFD_CONS_IDX_SHIFT) &
572 EDMA_RFD_CONS_IDX_MASK;
573
574 do {
575 while (sw_next_to_clean != hw_next_to_clean) {
576 if (!work_to_do)
577 break;
578
579 sw_desc = &erdr->sw_desc[sw_next_to_clean];
580 skb = sw_desc->skb;
581
582 /* Unmap the allocated buffer */
583 if (likely(sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_HEAD))
584 dma_unmap_single(&pdev->dev, sw_desc->dma,
585 sw_desc->length, DMA_FROM_DEVICE);
586 else
587 dma_unmap_page(&pdev->dev, sw_desc->dma,
588 sw_desc->length, DMA_FROM_DEVICE);
589
590 /* Get RRD */
591 if (edma_cinfo->page_mode) {
592 vaddr = kmap_atomic(skb_frag_page(&skb_shinfo(skb)->frags[0]));
593 memcpy((uint8_t *)&rrd[0], vaddr, 16);
594 rd = (struct edma_rx_return_desc *)rrd;
595 kunmap_atomic(vaddr);
596 } else {
597 rd = (struct edma_rx_return_desc *)skb->data;
598 }
599
600 /* Check if RRD is valid */
601 if (!(rd->rrd7 & EDMA_RRD_DESC_VALID)) {
602 edma_clean_rfd(erdr, sw_next_to_clean);
603 sw_next_to_clean = (sw_next_to_clean + 1) &
604 (erdr->count - 1);
605 cleaned_count++;
606 continue;
607 }
608
609 /* Get the number of RFDs from RRD */
610 num_rfds = rd->rrd1 & EDMA_RRD_NUM_RFD_MASK;
611
612 /* Get Rx port ID from switch */
613 port_id = (rd->rrd1 >> EDMA_PORT_ID_SHIFT) & EDMA_PORT_ID_MASK;
614 if ((!port_id) || (port_id > EDMA_MAX_PORTID_SUPPORTED)) {
615 dev_err(&pdev->dev, "Invalid RRD source port bit set");
616 for (i = 0; i < num_rfds; i++) {
617 edma_clean_rfd(erdr, sw_next_to_clean);
618 sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
619 cleaned_count++;
620 }
621 continue;
622 }
623
624 /* check if we have a sink for the data we receive.
625 * If the interface isn't setup, we have to drop the
626 * incoming data for now.
627 */
628 netdev = edma_cinfo->portid_netdev_lookup_tbl[port_id];
629 if (!netdev) {
630 edma_clean_rfd(erdr, sw_next_to_clean);
631 sw_next_to_clean = (sw_next_to_clean + 1) &
632 (erdr->count - 1);
633 cleaned_count++;
634 continue;
635 }
636 adapter = netdev_priv(netdev);
637
638 /* This code is added to handle a usecase where high
639 * priority stream and a low priority stream are
640 * received simultaneously on DUT. The problem occurs
641 * if one of the Rx rings is full and the corresponding
642 * core is busy with other stuff. This causes ESS CPU
643 * port to backpressure all incoming traffic including
644 * high priority one. We monitor free descriptor count
645 * on each CPU and whenever it reaches threshold (< 80),
646 * we drop all low priority traffic and let only high
647 * priotiy traffic pass through. We can hence avoid
648 * ESS CPU port to send backpressure on high priroity
649 * stream.
650 */
651 priority = (rd->rrd1 >> EDMA_RRD_PRIORITY_SHIFT)
652 & EDMA_RRD_PRIORITY_MASK;
653 if (likely(!priority && !edma_cinfo->page_mode && (num_rfds <= 1))) {
654 rfd_avail = (count + sw_next_to_clean - hw_next_to_clean - 1) & (count - 1);
655 if (rfd_avail < EDMA_RFD_AVAIL_THR) {
656 sw_desc->flags = EDMA_SW_DESC_FLAG_SKB_REUSE;
657 sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
658 adapter->stats.rx_dropped++;
659 cleaned_count++;
660 drop_count++;
661 if (drop_count == 3) {
662 work_to_do--;
663 (*work_done)++;
664 drop_count = 0;
665 }
666 if (cleaned_count >= EDMA_RX_BUFFER_WRITE) {
667 /* If buffer clean count reaches 16, we replenish HW buffers. */
668 ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id);
669 edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id),
670 sw_next_to_clean);
671 cleaned_count = ret_count;
672 erdr->pending_fill = ret_count;
673 }
674 continue;
675 }
676 }
677
678 work_to_do--;
679 (*work_done)++;
680
681 /* Increment SW index */
682 sw_next_to_clean = (sw_next_to_clean + 1) &
683 (erdr->count - 1);
684
685 cleaned_count++;
686
687 /* Get the packet size and allocate buffer */
688 length = rd->rrd6 & EDMA_RRD_PKT_SIZE_MASK;
689
690 if (edma_cinfo->page_mode) {
691 /* paged skb */
692 sw_next_to_clean = edma_rx_complete_paged(skb, num_rfds, length, sw_next_to_clean, &cleaned_count, erdr, edma_cinfo);
693 if (!pskb_may_pull(skb, ETH_HLEN)) {
694 dev_kfree_skb_any(skb);
695 continue;
696 }
697 } else {
698 /* single or fraglist skb */
699
700 /* Addition of 16 bytes is required, as in the packet
701 * first 16 bytes are rrd descriptors, so actual data
702 * starts from an offset of 16.
703 */
704 skb_reserve(skb, 16);
705 if (likely((num_rfds <= 1) || !edma_cinfo->fraglist_mode)) {
706 skb_put(skb, length);
707 } else {
708 sw_next_to_clean = edma_rx_complete_fraglist(skb, num_rfds, length, sw_next_to_clean, &cleaned_count, erdr, edma_cinfo);
709 }
710 }
711
712 if (edma_stp_rstp) {
713 edma_rx_complete_stp_rstp(skb, port_id, rd);
714 }
715
716 skb->protocol = eth_type_trans(skb, netdev);
717
718 /* Record Rx queue for RFS/RPS and fill flow hash from HW */
719 skb_record_rx_queue(skb, queue_to_rxid[queue_id]);
720 if (netdev->features & NETIF_F_RXHASH) {
721 hash_type = (rd->rrd5 >> EDMA_HASH_TYPE_SHIFT);
722 if ((hash_type > EDMA_HASH_TYPE_START) && (hash_type < EDMA_HASH_TYPE_END))
723 skb_set_hash(skb, rd->rrd2, PKT_HASH_TYPE_L4);
724 }
725
726 #ifdef CONFIG_NF_FLOW_COOKIE
727 skb->flow_cookie = rd->rrd3 & EDMA_RRD_FLOW_COOKIE_MASK;
728 #endif
729 edma_receive_checksum(rd, skb);
730
731 /* Process VLAN HW acceleration indication provided by HW */
732 if (unlikely(adapter->default_vlan_tag != rd->rrd4)) {
733 vlan = rd->rrd4;
734 if (likely(rd->rrd7 & EDMA_RRD_CVLAN))
735 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
736 else if (rd->rrd1 & EDMA_RRD_SVLAN)
737 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan);
738 }
739
740 /* Update rx statistics */
741 adapter->stats.rx_packets++;
742 adapter->stats.rx_bytes += length;
743
744 /* Check if we reached refill threshold */
745 if (cleaned_count >= EDMA_RX_BUFFER_WRITE) {
746 ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id);
747 edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id),
748 sw_next_to_clean);
749 cleaned_count = ret_count;
750 erdr->pending_fill = ret_count;
751 }
752
753 /* At this point skb should go to stack */
754 napi_gro_receive(napi, skb);
755 }
756
757 /* Check if we still have NAPI budget */
758 if (!work_to_do)
759 break;
760
761 /* Read index once again since we still have NAPI budget */
762 edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), &data);
763 hw_next_to_clean = (data >> EDMA_RFD_CONS_IDX_SHIFT) &
764 EDMA_RFD_CONS_IDX_MASK;
765 } while (hw_next_to_clean != sw_next_to_clean);
766
767 erdr->sw_next_to_clean = sw_next_to_clean;
768
769 /* Refill here in case refill threshold wasn't reached */
770 if (likely(cleaned_count)) {
771 ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id);
772 erdr->pending_fill = ret_count;
773 if (ret_count) {
774 if (net_ratelimit())
775 dev_dbg(&pdev->dev, "Not all buffers was reallocated");
776 }
777
778 edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id),
779 erdr->sw_next_to_clean);
780 }
781
782 return erdr->pending_fill;
783 }
784
785 /* edma_delete_rfs_filter()
786 * Remove RFS filter from switch
787 */
788 static int edma_delete_rfs_filter(struct edma_adapter *adapter,
789 struct edma_rfs_filter_node *filter_node)
790 {
791 int res = -1;
792
793 struct flow_keys *keys = &filter_node->keys;
794
795 if (likely(adapter->set_rfs_rule))
796 res = (*adapter->set_rfs_rule)(adapter->netdev,
797 flow_get_u32_src(keys), flow_get_u32_dst(keys),
798 keys->ports.src, keys->ports.dst,
799 keys->basic.ip_proto, filter_node->rq_id, 0);
800
801 return res;
802 }
803
804 /* edma_add_rfs_filter()
805 * Add RFS filter to switch
806 */
807 static int edma_add_rfs_filter(struct edma_adapter *adapter,
808 struct flow_keys *keys, u16 rq,
809 struct edma_rfs_filter_node *filter_node)
810 {
811 int res = -1;
812
813 struct flow_keys *dest_keys = &filter_node->keys;
814
815 memcpy(dest_keys, &filter_node->keys, sizeof(*dest_keys));
816 /*
817 dest_keys->control = keys->control;
818 dest_keys->basic = keys->basic;
819 dest_keys->addrs = keys->addrs;
820 dest_keys->ports = keys->ports;
821 dest_keys.ip_proto = keys->ip_proto;
822 */
823 /* Call callback registered by ESS driver */
824 if (likely(adapter->set_rfs_rule))
825 res = (*adapter->set_rfs_rule)(adapter->netdev, flow_get_u32_src(keys),
826 flow_get_u32_dst(keys), keys->ports.src, keys->ports.dst,
827 keys->basic.ip_proto, rq, 1);
828
829 return res;
830 }
831
832 /* edma_rfs_key_search()
833 * Look for existing RFS entry
834 */
835 static struct edma_rfs_filter_node *edma_rfs_key_search(struct hlist_head *h,
836 struct flow_keys *key)
837 {
838 struct edma_rfs_filter_node *p;
839
840 hlist_for_each_entry(p, h, node)
841 if (flow_get_u32_src(&p->keys) == flow_get_u32_src(key) &&
842 flow_get_u32_dst(&p->keys) == flow_get_u32_dst(key) &&
843 p->keys.ports.src == key->ports.src &&
844 p->keys.ports.dst == key->ports.dst &&
845 p->keys.basic.ip_proto == key->basic.ip_proto)
846 return p;
847 return NULL;
848 }
849
850 /* edma_initialise_rfs_flow_table()
851 * Initialise EDMA RFS flow table
852 */
853 static void edma_initialise_rfs_flow_table(struct edma_adapter *adapter)
854 {
855 int i;
856
857 spin_lock_init(&adapter->rfs.rfs_ftab_lock);
858
859 /* Initialize EDMA flow hash table */
860 for (i = 0; i < EDMA_RFS_FLOW_ENTRIES; i++)
861 INIT_HLIST_HEAD(&adapter->rfs.hlist_head[i]);
862
863 adapter->rfs.max_num_filter = EDMA_RFS_FLOW_ENTRIES;
864 adapter->rfs.filter_available = adapter->rfs.max_num_filter;
865 adapter->rfs.hashtoclean = 0;
866
867 /* Add timer to get periodic RFS updates from OS */
868 timer_setup(&adapter->rfs.expire_rfs, edma_flow_may_expire, 0);
869 mod_timer(&adapter->rfs.expire_rfs, jiffies + HZ / 4);
870 }
871
872 /* edma_free_rfs_flow_table()
873 * Free EDMA RFS flow table
874 */
875 static void edma_free_rfs_flow_table(struct edma_adapter *adapter)
876 {
877 int i;
878
879 /* Remove sync timer */
880 del_timer_sync(&adapter->rfs.expire_rfs);
881 spin_lock_bh(&adapter->rfs.rfs_ftab_lock);
882
883 /* Free EDMA RFS table entries */
884 adapter->rfs.filter_available = 0;
885
886 /* Clean-up EDMA flow hash table */
887 for (i = 0; i < EDMA_RFS_FLOW_ENTRIES; i++) {
888 struct hlist_head *hhead;
889 struct hlist_node *tmp;
890 struct edma_rfs_filter_node *filter_node;
891 int res;
892
893 hhead = &adapter->rfs.hlist_head[i];
894 hlist_for_each_entry_safe(filter_node, tmp, hhead, node) {
895 res = edma_delete_rfs_filter(adapter, filter_node);
896 if (res < 0)
897 dev_warn(&adapter->netdev->dev,
898 "EDMA going down but RFS entry %d not allowed to be flushed by Switch",
899 filter_node->flow_id);
900 hlist_del(&filter_node->node);
901 kfree(filter_node);
902 }
903 }
904 spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
905 }
906
907 /* edma_tx_unmap_and_free()
908 * clean TX buffer
909 */
910 static inline void edma_tx_unmap_and_free(struct platform_device *pdev,
911 struct edma_sw_desc *sw_desc)
912 {
913 struct sk_buff *skb = sw_desc->skb;
914
915 if (likely((sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_HEAD) ||
916 (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_FRAGLIST)))
917 /* unmap_single for skb head area */
918 dma_unmap_single(&pdev->dev, sw_desc->dma,
919 sw_desc->length, DMA_TO_DEVICE);
920 else if (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_FRAG)
921 /* unmap page for paged fragments */
922 dma_unmap_page(&pdev->dev, sw_desc->dma,
923 sw_desc->length, DMA_TO_DEVICE);
924
925 if (likely(sw_desc->flags & EDMA_SW_DESC_FLAG_LAST))
926 dev_kfree_skb_any(skb);
927
928 sw_desc->flags = 0;
929 }
930
931 /* edma_tx_complete()
932 * Used to clean tx queues and update hardware and consumer index
933 */
934 static void edma_tx_complete(struct edma_common_info *edma_cinfo, int queue_id)
935 {
936 struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
937 struct edma_sw_desc *sw_desc;
938 struct platform_device *pdev = edma_cinfo->pdev;
939 int i;
940
941 u16 sw_next_to_clean = etdr->sw_next_to_clean;
942 u16 hw_next_to_clean;
943 u32 data = 0;
944
945 edma_read_reg(EDMA_REG_TPD_IDX_Q(queue_id), &data);
946 hw_next_to_clean = (data >> EDMA_TPD_CONS_IDX_SHIFT) & EDMA_TPD_CONS_IDX_MASK;
947
948 /* clean the buffer here */
949 while (sw_next_to_clean != hw_next_to_clean) {
950 sw_desc = &etdr->sw_desc[sw_next_to_clean];
951 edma_tx_unmap_and_free(pdev, sw_desc);
952 sw_next_to_clean = (sw_next_to_clean + 1) & (etdr->count - 1);
953 }
954
955 etdr->sw_next_to_clean = sw_next_to_clean;
956
957 /* update the TPD consumer index register */
958 edma_write_reg(EDMA_REG_TX_SW_CONS_IDX_Q(queue_id), sw_next_to_clean);
959
960 /* Wake the queue if queue is stopped and netdev link is up */
961 for (i = 0; i < EDMA_MAX_NETDEV_PER_QUEUE && etdr->nq[i] ; i++) {
962 if (netif_tx_queue_stopped(etdr->nq[i])) {
963 if ((etdr->netdev[i]) && netif_carrier_ok(etdr->netdev[i]))
964 netif_tx_wake_queue(etdr->nq[i]);
965 }
966 }
967 }
968
969 /* edma_get_tx_buffer()
970 * Get sw_desc corresponding to the TPD
971 */
972 static struct edma_sw_desc *edma_get_tx_buffer(struct edma_common_info *edma_cinfo,
973 struct edma_tx_desc *tpd, int queue_id)
974 {
975 struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
976 return &etdr->sw_desc[tpd - (struct edma_tx_desc *)etdr->hw_desc];
977 }
978
979 /* edma_get_next_tpd()
980 * Return a TPD descriptor for transfer
981 */
982 static struct edma_tx_desc *edma_get_next_tpd(struct edma_common_info *edma_cinfo,
983 int queue_id)
984 {
985 struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
986 u16 sw_next_to_fill = etdr->sw_next_to_fill;
987 struct edma_tx_desc *tpd_desc =
988 (&((struct edma_tx_desc *)(etdr->hw_desc))[sw_next_to_fill]);
989
990 etdr->sw_next_to_fill = (etdr->sw_next_to_fill + 1) & (etdr->count - 1);
991
992 return tpd_desc;
993 }
994
995 /* edma_tpd_available()
996 * Check number of free TPDs
997 */
998 static inline u16 edma_tpd_available(struct edma_common_info *edma_cinfo,
999 int queue_id)
1000 {
1001 struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
1002
1003 u16 sw_next_to_fill;
1004 u16 sw_next_to_clean;
1005 u16 count = 0;
1006
1007 sw_next_to_clean = etdr->sw_next_to_clean;
1008 sw_next_to_fill = etdr->sw_next_to_fill;
1009
1010 if (likely(sw_next_to_clean <= sw_next_to_fill))
1011 count = etdr->count;
1012
1013 return count + sw_next_to_clean - sw_next_to_fill - 1;
1014 }
1015
1016 /* edma_tx_queue_get()
1017 * Get the starting number of the queue
1018 */
1019 static inline int edma_tx_queue_get(struct edma_adapter *adapter,
1020 struct sk_buff *skb, int txq_id)
1021 {
1022 /* skb->priority is used as an index to skb priority table
1023 * and based on packet priority, correspong queue is assigned.
1024 */
1025 return adapter->tx_start_offset[txq_id] + edma_skb_priority_offset(skb);
1026 }
1027
1028 /* edma_tx_update_hw_idx()
1029 * update the producer index for the ring transmitted
1030 */
1031 static void edma_tx_update_hw_idx(struct edma_common_info *edma_cinfo,
1032 struct sk_buff *skb, int queue_id)
1033 {
1034 struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
1035 u32 tpd_idx_data;
1036
1037 /* Read and update the producer index */
1038 edma_read_reg(EDMA_REG_TPD_IDX_Q(queue_id), &tpd_idx_data);
1039 tpd_idx_data &= ~EDMA_TPD_PROD_IDX_BITS;
1040 tpd_idx_data |= (etdr->sw_next_to_fill & EDMA_TPD_PROD_IDX_MASK)
1041 << EDMA_TPD_PROD_IDX_SHIFT;
1042
1043 edma_write_reg(EDMA_REG_TPD_IDX_Q(queue_id), tpd_idx_data);
1044 }
1045
1046 /* edma_rollback_tx()
1047 * Function to retrieve tx resources in case of error
1048 */
1049 static void edma_rollback_tx(struct edma_adapter *adapter,
1050 struct edma_tx_desc *start_tpd, int queue_id)
1051 {
1052 struct edma_tx_desc_ring *etdr = adapter->edma_cinfo->tpd_ring[queue_id];
1053 struct edma_sw_desc *sw_desc;
1054 struct edma_tx_desc *tpd = NULL;
1055 u16 start_index, index;
1056
1057 start_index = start_tpd - (struct edma_tx_desc *)(etdr->hw_desc);
1058
1059 index = start_index;
1060 while (index != etdr->sw_next_to_fill) {
1061 tpd = (&((struct edma_tx_desc *)(etdr->hw_desc))[index]);
1062 sw_desc = &etdr->sw_desc[index];
1063 edma_tx_unmap_and_free(adapter->pdev, sw_desc);
1064 memset(tpd, 0, sizeof(struct edma_tx_desc));
1065 if (++index == etdr->count)
1066 index = 0;
1067 }
1068 etdr->sw_next_to_fill = start_index;
1069 }
1070
1071 /* edma_tx_map_and_fill()
1072 * gets called from edma_xmit_frame
1073 *
1074 * This is where the dma of the buffer to be transmitted
1075 * gets mapped
1076 */
1077 static int edma_tx_map_and_fill(struct edma_common_info *edma_cinfo,
1078 struct edma_adapter *adapter, struct sk_buff *skb, int queue_id,
1079 unsigned int flags_transmit, u16 from_cpu, u16 dp_bitmap,
1080 bool packet_is_rstp, int nr_frags)
1081 {
1082 struct edma_sw_desc *sw_desc = NULL;
1083 struct platform_device *pdev = edma_cinfo->pdev;
1084 struct edma_tx_desc *tpd = NULL, *start_tpd = NULL;
1085 struct sk_buff *iter_skb;
1086 int i = 0;
1087 u32 word1 = 0, word3 = 0, lso_word1 = 0, svlan_tag = 0;
1088 u16 buf_len, lso_desc_len = 0;
1089
1090 /* It should either be a nr_frags skb or fraglist skb but not both */
1091 BUG_ON(nr_frags && skb_has_frag_list(skb));
1092
1093 if (skb_is_gso(skb)) {
1094 /* TODO: What additional checks need to be performed here */
1095 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
1096 lso_word1 |= EDMA_TPD_IPV4_EN;
1097 ip_hdr(skb)->check = 0;
1098 tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1099 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
1100 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
1101 lso_word1 |= EDMA_TPD_LSO_V2_EN;
1102 ipv6_hdr(skb)->payload_len = 0;
1103 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1104 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
1105 } else
1106 return -EINVAL;
1107
1108 lso_word1 |= EDMA_TPD_LSO_EN | ((skb_shinfo(skb)->gso_size & EDMA_TPD_MSS_MASK) << EDMA_TPD_MSS_SHIFT) |
1109 (skb_transport_offset(skb) << EDMA_TPD_HDR_SHIFT);
1110 } else if (flags_transmit & EDMA_HW_CHECKSUM) {
1111 u8 css, cso;
1112 cso = skb_checksum_start_offset(skb);
1113 css = cso + skb->csum_offset;
1114
1115 word1 |= (EDMA_TPD_CUSTOM_CSUM_EN);
1116 word1 |= (cso >> 1) << EDMA_TPD_HDR_SHIFT;
1117 word1 |= ((css >> 1) << EDMA_TPD_CUSTOM_CSUM_SHIFT);
1118 }
1119
1120 if (skb->protocol == htons(ETH_P_PPP_SES))
1121 word1 |= EDMA_TPD_PPPOE_EN;
1122
1123 if (flags_transmit & EDMA_VLAN_TX_TAG_INSERT_FLAG) {
1124 switch(skb->vlan_proto) {
1125 case htons(ETH_P_8021Q):
1126 word3 |= (1 << EDMA_TX_INS_CVLAN);
1127 word3 |= skb_vlan_tag_get(skb) << EDMA_TX_CVLAN_TAG_SHIFT;
1128 break;
1129 case htons(ETH_P_8021AD):
1130 word1 |= (1 << EDMA_TX_INS_SVLAN);
1131 svlan_tag = skb_vlan_tag_get(skb) << EDMA_TX_SVLAN_TAG_SHIFT;
1132 break;
1133 default:
1134 dev_err(&pdev->dev, "no ctag or stag present\n");
1135 goto vlan_tag_error;
1136 }
1137 } else if (flags_transmit & EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG) {
1138 word3 |= (1 << EDMA_TX_INS_CVLAN);
1139 word3 |= (adapter->default_vlan_tag) << EDMA_TX_CVLAN_TAG_SHIFT;
1140 }
1141
1142 if (packet_is_rstp) {
1143 word3 |= dp_bitmap << EDMA_TPD_PORT_BITMAP_SHIFT;
1144 word3 |= from_cpu << EDMA_TPD_FROM_CPU_SHIFT;
1145 } else {
1146 word3 |= adapter->dp_bitmap << EDMA_TPD_PORT_BITMAP_SHIFT;
1147 }
1148
1149 buf_len = skb_headlen(skb);
1150
1151 if (lso_word1) {
1152 if (lso_word1 & EDMA_TPD_LSO_V2_EN) {
1153
1154 /* IPv6 LSOv2 descriptor */
1155 start_tpd = tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1156 sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1157 sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_NONE;
1158
1159 /* LSOv2 descriptor overrides addr field to pass length */
1160 tpd->addr = cpu_to_le16(skb->len);
1161 tpd->svlan_tag = svlan_tag;
1162 tpd->word1 = word1 | lso_word1;
1163 tpd->word3 = word3;
1164 }
1165
1166 tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1167 if (!start_tpd)
1168 start_tpd = tpd;
1169 sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1170
1171 /* The last buffer info contain the skb address,
1172 * so skb will be freed after unmap
1173 */
1174 sw_desc->length = lso_desc_len;
1175 sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_HEAD;
1176
1177 sw_desc->dma = dma_map_single(&adapter->pdev->dev,
1178 skb->data, buf_len, DMA_TO_DEVICE);
1179 if (dma_mapping_error(&pdev->dev, sw_desc->dma))
1180 goto dma_error;
1181
1182 tpd->addr = cpu_to_le32(sw_desc->dma);
1183 tpd->len = cpu_to_le16(buf_len);
1184
1185 tpd->svlan_tag = svlan_tag;
1186 tpd->word1 = word1 | lso_word1;
1187 tpd->word3 = word3;
1188
1189 /* The last buffer info contain the skb address,
1190 * so it will be freed after unmap
1191 */
1192 sw_desc->length = lso_desc_len;
1193 sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_HEAD;
1194
1195 buf_len = 0;
1196 }
1197
1198 if (likely(buf_len)) {
1199
1200 /* TODO Do not dequeue descriptor if there is a potential error */
1201 tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1202
1203 if (!start_tpd)
1204 start_tpd = tpd;
1205
1206 sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1207
1208 /* The last buffer info contain the skb address,
1209 * so it will be free after unmap
1210 */
1211 sw_desc->length = buf_len;
1212 sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_HEAD;
1213 sw_desc->dma = dma_map_single(&adapter->pdev->dev,
1214 skb->data, buf_len, DMA_TO_DEVICE);
1215 if (dma_mapping_error(&pdev->dev, sw_desc->dma))
1216 goto dma_error;
1217
1218 tpd->addr = cpu_to_le32(sw_desc->dma);
1219 tpd->len = cpu_to_le16(buf_len);
1220
1221 tpd->svlan_tag = svlan_tag;
1222 tpd->word1 = word1 | lso_word1;
1223 tpd->word3 = word3;
1224 }
1225
1226 /* Walk through all paged fragments */
1227 while (nr_frags--) {
1228 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1229 buf_len = skb_frag_size(frag);
1230 tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1231 sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1232 sw_desc->length = buf_len;
1233 sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_FRAG;
1234
1235 sw_desc->dma = skb_frag_dma_map(&pdev->dev, frag, 0, buf_len, DMA_TO_DEVICE);
1236
1237 if (dma_mapping_error(NULL, sw_desc->dma))
1238 goto dma_error;
1239
1240 tpd->addr = cpu_to_le32(sw_desc->dma);
1241 tpd->len = cpu_to_le16(buf_len);
1242
1243 tpd->svlan_tag = svlan_tag;
1244 tpd->word1 = word1 | lso_word1;
1245 tpd->word3 = word3;
1246 i++;
1247 }
1248
1249 /* Walk through all fraglist skbs */
1250 skb_walk_frags(skb, iter_skb) {
1251 buf_len = iter_skb->len;
1252 tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1253 sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1254 sw_desc->length = buf_len;
1255 sw_desc->dma = dma_map_single(&adapter->pdev->dev,
1256 iter_skb->data, buf_len, DMA_TO_DEVICE);
1257
1258 if (dma_mapping_error(NULL, sw_desc->dma))
1259 goto dma_error;
1260
1261 tpd->addr = cpu_to_le32(sw_desc->dma);
1262 tpd->len = cpu_to_le16(buf_len);
1263 tpd->svlan_tag = svlan_tag;
1264 tpd->word1 = word1 | lso_word1;
1265 tpd->word3 = word3;
1266 sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_FRAGLIST;
1267 }
1268
1269 if (tpd)
1270 tpd->word1 |= 1 << EDMA_TPD_EOP_SHIFT;
1271
1272 sw_desc->skb = skb;
1273 sw_desc->flags |= EDMA_SW_DESC_FLAG_LAST;
1274
1275 return 0;
1276
1277 dma_error:
1278 edma_rollback_tx(adapter, start_tpd, queue_id);
1279 dev_err(&pdev->dev, "TX DMA map failed\n");
1280 vlan_tag_error:
1281 return -ENOMEM;
1282 }
1283
1284 /* edma_check_link()
1285 * check Link status
1286 */
1287 static int edma_check_link(struct edma_adapter *adapter)
1288 {
1289 struct phy_device *phydev = adapter->phydev;
1290
1291 if (!(adapter->poll_required))
1292 return __EDMA_LINKUP;
1293
1294 if (phydev->link)
1295 return __EDMA_LINKUP;
1296
1297 return __EDMA_LINKDOWN;
1298 }
1299
1300 /* edma_adjust_link()
1301 * check for edma link status
1302 */
1303 void edma_adjust_link(struct net_device *netdev)
1304 {
1305 int status;
1306 struct edma_adapter *adapter = netdev_priv(netdev);
1307 struct phy_device *phydev = adapter->phydev;
1308
1309 if (!test_bit(__EDMA_UP, &adapter->state_flags))
1310 return;
1311
1312 status = edma_check_link(adapter);
1313
1314 if (status == __EDMA_LINKUP && adapter->link_state == __EDMA_LINKDOWN) {
1315 dev_info(&adapter->pdev->dev, "%s: GMAC Link is up with phy_speed=%d\n", netdev->name, phydev->speed);
1316 adapter->link_state = __EDMA_LINKUP;
1317 if (adapter->edma_cinfo->is_single_phy) {
1318 ess_set_port_status_speed(adapter->edma_cinfo, phydev,
1319 ffs(adapter->dp_bitmap) - 1);
1320 }
1321 netif_carrier_on(netdev);
1322 if (netif_running(netdev))
1323 netif_tx_wake_all_queues(netdev);
1324 } else if (status == __EDMA_LINKDOWN && adapter->link_state == __EDMA_LINKUP) {
1325 dev_info(&adapter->pdev->dev, "%s: GMAC Link is down\n", netdev->name);
1326 adapter->link_state = __EDMA_LINKDOWN;
1327 netif_carrier_off(netdev);
1328 netif_tx_stop_all_queues(netdev);
1329 }
1330 }
1331
1332 /* edma_get_stats()
1333 * Statistics api used to retreive the tx/rx statistics
1334 */
1335 struct net_device_stats *edma_get_stats(struct net_device *netdev)
1336 {
1337 struct edma_adapter *adapter = netdev_priv(netdev);
1338
1339 return &adapter->stats;
1340 }
1341
1342 /* edma_xmit()
1343 * Main api to be called by the core for packet transmission
1344 */
1345 netdev_tx_t edma_xmit(struct sk_buff *skb,
1346 struct net_device *net_dev)
1347 {
1348 struct edma_adapter *adapter = netdev_priv(net_dev);
1349 struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
1350 struct edma_tx_desc_ring *etdr;
1351 u16 from_cpu, dp_bitmap, txq_id;
1352 int ret, nr_frags = 0, num_tpds_needed = 1, queue_id;
1353 unsigned int flags_transmit = 0;
1354 bool packet_is_rstp = false;
1355 struct netdev_queue *nq = NULL;
1356
1357 if (skb_shinfo(skb)->nr_frags) {
1358 nr_frags = skb_shinfo(skb)->nr_frags;
1359 num_tpds_needed += nr_frags;
1360 } else if (skb_has_frag_list(skb)) {
1361 struct sk_buff *iter_skb;
1362
1363 skb_walk_frags(skb, iter_skb)
1364 num_tpds_needed++;
1365 }
1366
1367 if (num_tpds_needed > EDMA_MAX_SKB_FRAGS) {
1368 dev_err(&net_dev->dev,
1369 "skb received with fragments %d which is more than %lu",
1370 num_tpds_needed, EDMA_MAX_SKB_FRAGS);
1371 dev_kfree_skb_any(skb);
1372 adapter->stats.tx_errors++;
1373 return NETDEV_TX_OK;
1374 }
1375
1376 if (edma_stp_rstp) {
1377 u16 ath_hdr, ath_eth_type;
1378 u8 mac_addr[EDMA_ETH_HDR_LEN];
1379 ath_eth_type = ntohs(*(uint16_t *)&skb->data[12]);
1380 if (ath_eth_type == edma_ath_eth_type) {
1381 packet_is_rstp = true;
1382 ath_hdr = htons(*(uint16_t *)&skb->data[14]);
1383 dp_bitmap = ath_hdr & EDMA_TX_ATH_HDR_PORT_BITMAP_MASK;
1384 from_cpu = (ath_hdr & EDMA_TX_ATH_HDR_FROM_CPU_MASK) >> EDMA_TX_ATH_HDR_FROM_CPU_SHIFT;
1385 memcpy(mac_addr, skb->data, EDMA_ETH_HDR_LEN);
1386
1387 skb_pull(skb, 4);
1388
1389 memcpy(skb->data, mac_addr, EDMA_ETH_HDR_LEN);
1390 }
1391 }
1392
1393 /* this will be one of the 4 TX queues exposed to linux kernel */
1394 txq_id = skb_get_queue_mapping(skb);
1395 queue_id = edma_tx_queue_get(adapter, skb, txq_id);
1396 etdr = edma_cinfo->tpd_ring[queue_id];
1397 nq = netdev_get_tx_queue(net_dev, txq_id);
1398
1399 local_bh_disable();
1400 /* Tx is not handled in bottom half context. Hence, we need to protect
1401 * Tx from tasks and bottom half
1402 */
1403
1404 if (num_tpds_needed > edma_tpd_available(edma_cinfo, queue_id)) {
1405 /* not enough descriptor, just stop queue */
1406 netif_tx_stop_queue(nq);
1407 local_bh_enable();
1408 dev_dbg(&net_dev->dev, "Not enough descriptors available");
1409 edma_cinfo->edma_ethstats.tx_desc_error++;
1410 return NETDEV_TX_BUSY;
1411 }
1412
1413 /* Check and mark VLAN tag offload */
1414 if (!adapter->edma_cinfo->is_single_phy) {
1415 if (unlikely(skb_vlan_tag_present(skb)))
1416 flags_transmit |= EDMA_VLAN_TX_TAG_INSERT_FLAG;
1417 else if (adapter->default_vlan_tag)
1418 flags_transmit |= EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG;
1419 }
1420
1421 /* Check and mark checksum offload */
1422 if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
1423 flags_transmit |= EDMA_HW_CHECKSUM;
1424
1425 /* Map and fill descriptor for Tx */
1426 ret = edma_tx_map_and_fill(edma_cinfo, adapter, skb, queue_id,
1427 flags_transmit, from_cpu, dp_bitmap, packet_is_rstp, nr_frags);
1428 if (ret) {
1429 dev_kfree_skb_any(skb);
1430 adapter->stats.tx_errors++;
1431 goto netdev_okay;
1432 }
1433
1434 /* Update SW producer index */
1435 edma_tx_update_hw_idx(edma_cinfo, skb, queue_id);
1436
1437 /* update tx statistics */
1438 adapter->stats.tx_packets++;
1439 adapter->stats.tx_bytes += skb->len;
1440
1441 netdev_okay:
1442 local_bh_enable();
1443 return NETDEV_TX_OK;
1444 }
1445
1446 /*
1447 * edma_flow_may_expire()
1448 * Timer function called periodically to delete the node
1449 */
1450 void edma_flow_may_expire(struct timer_list *t)
1451 {
1452 struct edma_rfs_flow_table *table = from_timer(table, t, expire_rfs);
1453 struct edma_adapter *adapter =
1454 container_of(table, typeof(*adapter), rfs);
1455 int j;
1456
1457 spin_lock_bh(&adapter->rfs.rfs_ftab_lock);
1458 for (j = 0; j < EDMA_RFS_EXPIRE_COUNT_PER_CALL; j++) {
1459 struct hlist_head *hhead;
1460 struct hlist_node *tmp;
1461 struct edma_rfs_filter_node *n;
1462 bool res;
1463
1464 hhead = &adapter->rfs.hlist_head[adapter->rfs.hashtoclean++];
1465 hlist_for_each_entry_safe(n, tmp, hhead, node) {
1466 res = rps_may_expire_flow(adapter->netdev, n->rq_id,
1467 n->flow_id, n->filter_id);
1468 if (res) {
1469 int ret;
1470 ret = edma_delete_rfs_filter(adapter, n);
1471 if (ret < 0)
1472 dev_dbg(&adapter->netdev->dev,
1473 "RFS entry %d not allowed to be flushed by Switch",
1474 n->flow_id);
1475 else {
1476 hlist_del(&n->node);
1477 kfree(n);
1478 adapter->rfs.filter_available++;
1479 }
1480 }
1481 }
1482 }
1483
1484 adapter->rfs.hashtoclean = adapter->rfs.hashtoclean & (EDMA_RFS_FLOW_ENTRIES - 1);
1485 spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
1486 mod_timer(&adapter->rfs.expire_rfs, jiffies + HZ / 4);
1487 }
1488
1489 /* edma_rx_flow_steer()
1490 * Called by core to to steer the flow to CPU
1491 */
1492 int edma_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
1493 u16 rxq, u32 flow_id)
1494 {
1495 struct flow_keys keys;
1496 struct edma_rfs_filter_node *filter_node;
1497 struct edma_adapter *adapter = netdev_priv(dev);
1498 u16 hash_tblid;
1499 int res;
1500
1501 if (skb->protocol == htons(ETH_P_IPV6)) {
1502 dev_err(&adapter->pdev->dev, "IPv6 not supported\n");
1503 res = -EINVAL;
1504 goto no_protocol_err;
1505 }
1506
1507 /* Dissect flow parameters
1508 * We only support IPv4 + TCP/UDP
1509 */
1510 res = skb_flow_dissect_flow_keys(skb, &keys, 0);
1511 if (!((keys.basic.ip_proto == IPPROTO_TCP) || (keys.basic.ip_proto == IPPROTO_UDP))) {
1512 res = -EPROTONOSUPPORT;
1513 goto no_protocol_err;
1514 }
1515
1516 /* Check if table entry exists */
1517 hash_tblid = skb_get_hash_raw(skb) & EDMA_RFS_FLOW_ENTRIES_MASK;
1518
1519 spin_lock_bh(&adapter->rfs.rfs_ftab_lock);
1520 filter_node = edma_rfs_key_search(&adapter->rfs.hlist_head[hash_tblid], &keys);
1521
1522 if (filter_node) {
1523 if (rxq == filter_node->rq_id) {
1524 res = -EEXIST;
1525 goto out;
1526 } else {
1527 res = edma_delete_rfs_filter(adapter, filter_node);
1528 if (res < 0)
1529 dev_warn(&adapter->netdev->dev,
1530 "Cannot steer flow %d to different queue",
1531 filter_node->flow_id);
1532 else {
1533 adapter->rfs.filter_available++;
1534 res = edma_add_rfs_filter(adapter, &keys, rxq, filter_node);
1535 if (res < 0) {
1536 dev_warn(&adapter->netdev->dev,
1537 "Cannot steer flow %d to different queue",
1538 filter_node->flow_id);
1539 } else {
1540 adapter->rfs.filter_available--;
1541 filter_node->rq_id = rxq;
1542 filter_node->filter_id = res;
1543 }
1544 }
1545 }
1546 } else {
1547 if (adapter->rfs.filter_available == 0) {
1548 res = -EBUSY;
1549 goto out;
1550 }
1551
1552 filter_node = kmalloc(sizeof(*filter_node), GFP_ATOMIC);
1553 if (!filter_node) {
1554 res = -ENOMEM;
1555 goto out;
1556 }
1557
1558 res = edma_add_rfs_filter(adapter, &keys, rxq, filter_node);
1559 if (res < 0) {
1560 kfree(filter_node);
1561 goto out;
1562 }
1563
1564 adapter->rfs.filter_available--;
1565 filter_node->rq_id = rxq;
1566 filter_node->filter_id = res;
1567 filter_node->flow_id = flow_id;
1568 filter_node->keys = keys;
1569 INIT_HLIST_NODE(&filter_node->node);
1570 hlist_add_head(&filter_node->node, &adapter->rfs.hlist_head[hash_tblid]);
1571 }
1572
1573 out:
1574 spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
1575 no_protocol_err:
1576 return res;
1577 }
1578
1579 /* edma_register_rfs_filter()
1580 * Add RFS filter callback
1581 */
1582 int edma_register_rfs_filter(struct net_device *netdev,
1583 set_rfs_filter_callback_t set_filter)
1584 {
1585 struct edma_adapter *adapter = netdev_priv(netdev);
1586
1587 spin_lock_bh(&adapter->rfs.rfs_ftab_lock);
1588
1589 if (adapter->set_rfs_rule) {
1590 spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
1591 return -1;
1592 }
1593
1594 adapter->set_rfs_rule = set_filter;
1595 spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
1596
1597 return 0;
1598 }
1599
1600 /* edma_alloc_tx_rings()
1601 * Allocate rx rings
1602 */
1603 int edma_alloc_tx_rings(struct edma_common_info *edma_cinfo)
1604 {
1605 struct platform_device *pdev = edma_cinfo->pdev;
1606 int i, err = 0;
1607
1608 for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
1609 err = edma_alloc_tx_ring(edma_cinfo, edma_cinfo->tpd_ring[i]);
1610 if (err) {
1611 dev_err(&pdev->dev, "Tx Queue alloc %u failed\n", i);
1612 return err;
1613 }
1614 }
1615
1616 return 0;
1617 }
1618
1619 /* edma_free_tx_rings()
1620 * Free tx rings
1621 */
1622 void edma_free_tx_rings(struct edma_common_info *edma_cinfo)
1623 {
1624 int i;
1625
1626 for (i = 0; i < edma_cinfo->num_tx_queues; i++)
1627 edma_free_tx_ring(edma_cinfo, edma_cinfo->tpd_ring[i]);
1628 }
1629
1630 /* edma_free_tx_resources()
1631 * Free buffers associated with tx rings
1632 */
1633 void edma_free_tx_resources(struct edma_common_info *edma_cinfo)
1634 {
1635 struct edma_tx_desc_ring *etdr;
1636 struct edma_sw_desc *sw_desc;
1637 struct platform_device *pdev = edma_cinfo->pdev;
1638 int i, j;
1639
1640 for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
1641 etdr = edma_cinfo->tpd_ring[i];
1642 for (j = 0; j < EDMA_TX_RING_SIZE; j++) {
1643 sw_desc = &etdr->sw_desc[j];
1644 if (sw_desc->flags & (EDMA_SW_DESC_FLAG_SKB_HEAD |
1645 EDMA_SW_DESC_FLAG_SKB_FRAG | EDMA_SW_DESC_FLAG_SKB_FRAGLIST))
1646 edma_tx_unmap_and_free(pdev, sw_desc);
1647 }
1648 }
1649 }
1650
1651 /* edma_alloc_rx_rings()
1652 * Allocate rx rings
1653 */
1654 int edma_alloc_rx_rings(struct edma_common_info *edma_cinfo)
1655 {
1656 struct platform_device *pdev = edma_cinfo->pdev;
1657 int i, j, err = 0;
1658
1659 for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
1660 err = edma_alloc_rx_ring(edma_cinfo, edma_cinfo->rfd_ring[j]);
1661 if (err) {
1662 dev_err(&pdev->dev, "Rx Queue alloc%u failed\n", i);
1663 return err;
1664 }
1665 j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1666 }
1667
1668 return 0;
1669 }
1670
1671 /* edma_free_rx_rings()
1672 * free rx rings
1673 */
1674 void edma_free_rx_rings(struct edma_common_info *edma_cinfo)
1675 {
1676 int i, j;
1677
1678 for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
1679 edma_free_rx_ring(edma_cinfo, edma_cinfo->rfd_ring[j]);
1680 j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1681 }
1682 }
1683
1684 /* edma_free_queues()
1685 * Free the queues allocaated
1686 */
1687 void edma_free_queues(struct edma_common_info *edma_cinfo)
1688 {
1689 int i , j;
1690
1691 for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
1692 if (edma_cinfo->tpd_ring[i])
1693 kfree(edma_cinfo->tpd_ring[i]);
1694 edma_cinfo->tpd_ring[i] = NULL;
1695 }
1696
1697 for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
1698 if (edma_cinfo->rfd_ring[j])
1699 kfree(edma_cinfo->rfd_ring[j]);
1700 edma_cinfo->rfd_ring[j] = NULL;
1701 j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1702 }
1703
1704 edma_cinfo->num_rx_queues = 0;
1705 edma_cinfo->num_tx_queues = 0;
1706
1707 return;
1708 }
1709
1710 /* edma_free_rx_resources()
1711 * Free buffers associated with tx rings
1712 */
1713 void edma_free_rx_resources(struct edma_common_info *edma_cinfo)
1714 {
1715 struct edma_rfd_desc_ring *erdr;
1716 struct edma_sw_desc *sw_desc;
1717 struct platform_device *pdev = edma_cinfo->pdev;
1718 int i, j, k;
1719
1720 for (i = 0, k = 0; i < edma_cinfo->num_rx_queues; i++) {
1721 erdr = edma_cinfo->rfd_ring[k];
1722 for (j = 0; j < EDMA_RX_RING_SIZE; j++) {
1723 sw_desc = &erdr->sw_desc[j];
1724 if (likely(sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_HEAD)) {
1725 dma_unmap_single(&pdev->dev, sw_desc->dma,
1726 sw_desc->length, DMA_FROM_DEVICE);
1727 edma_clean_rfd(erdr, j);
1728 } else if ((sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_FRAG)) {
1729 dma_unmap_page(&pdev->dev, sw_desc->dma,
1730 sw_desc->length, DMA_FROM_DEVICE);
1731 edma_clean_rfd(erdr, j);
1732 }
1733 }
1734 k += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1735
1736 }
1737 }
1738
1739 /* edma_alloc_queues_tx()
1740 * Allocate memory for all rings
1741 */
1742 int edma_alloc_queues_tx(struct edma_common_info *edma_cinfo)
1743 {
1744 int i;
1745
1746 for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
1747 struct edma_tx_desc_ring *etdr;
1748 etdr = kzalloc(sizeof(struct edma_tx_desc_ring), GFP_KERNEL);
1749 if (!etdr)
1750 goto err;
1751 etdr->count = edma_cinfo->tx_ring_count;
1752 edma_cinfo->tpd_ring[i] = etdr;
1753 }
1754
1755 return 0;
1756 err:
1757 edma_free_queues(edma_cinfo);
1758 return -1;
1759 }
1760
1761 /* edma_alloc_queues_rx()
1762 * Allocate memory for all rings
1763 */
1764 int edma_alloc_queues_rx(struct edma_common_info *edma_cinfo)
1765 {
1766 int i, j;
1767
1768 for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
1769 struct edma_rfd_desc_ring *rfd_ring;
1770 rfd_ring = kzalloc(sizeof(struct edma_rfd_desc_ring),
1771 GFP_KERNEL);
1772 if (!rfd_ring)
1773 goto err;
1774 rfd_ring->count = edma_cinfo->rx_ring_count;
1775 edma_cinfo->rfd_ring[j] = rfd_ring;
1776 j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1777 }
1778 return 0;
1779 err:
1780 edma_free_queues(edma_cinfo);
1781 return -1;
1782 }
1783
1784 /* edma_clear_irq_status()
1785 * Clear interrupt status
1786 */
1787 void edma_clear_irq_status()
1788 {
1789 edma_write_reg(EDMA_REG_RX_ISR, 0xff);
1790 edma_write_reg(EDMA_REG_TX_ISR, 0xffff);
1791 edma_write_reg(EDMA_REG_MISC_ISR, 0x1fff);
1792 edma_write_reg(EDMA_REG_WOL_ISR, 0x1);
1793 };
1794
1795 /* edma_configure()
1796 * Configure skb, edma interrupts and control register.
1797 */
1798 int edma_configure(struct edma_common_info *edma_cinfo)
1799 {
1800 struct edma_hw *hw = &edma_cinfo->hw;
1801 u32 intr_modrt_data;
1802 u32 intr_ctrl_data = 0;
1803 int i, j, ret_count;
1804
1805 edma_read_reg(EDMA_REG_INTR_CTRL, &intr_ctrl_data);
1806 intr_ctrl_data &= ~(1 << EDMA_INTR_SW_IDX_W_TYP_SHIFT);
1807 intr_ctrl_data |= hw->intr_sw_idx_w << EDMA_INTR_SW_IDX_W_TYP_SHIFT;
1808 edma_write_reg(EDMA_REG_INTR_CTRL, intr_ctrl_data);
1809
1810 edma_clear_irq_status();
1811
1812 /* Clear any WOL status */
1813 edma_write_reg(EDMA_REG_WOL_CTRL, 0);
1814 intr_modrt_data = (EDMA_TX_IMT << EDMA_IRQ_MODRT_TX_TIMER_SHIFT);
1815 intr_modrt_data |= (EDMA_RX_IMT << EDMA_IRQ_MODRT_RX_TIMER_SHIFT);
1816 edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, intr_modrt_data);
1817 edma_configure_tx(edma_cinfo);
1818 edma_configure_rx(edma_cinfo);
1819
1820 /* Allocate the RX buffer */
1821 for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
1822 struct edma_rfd_desc_ring *ring = edma_cinfo->rfd_ring[j];
1823 ret_count = edma_alloc_rx_buf(edma_cinfo, ring, ring->count, j);
1824 if (ret_count) {
1825 dev_dbg(&edma_cinfo->pdev->dev, "not all rx buffers allocated\n");
1826 }
1827 j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1828 }
1829
1830 /* Configure descriptor Ring */
1831 edma_init_desc(edma_cinfo);
1832 return 0;
1833 }
1834
1835 /* edma_irq_enable()
1836 * Enable default interrupt generation settings
1837 */
1838 void edma_irq_enable(struct edma_common_info *edma_cinfo)
1839 {
1840 struct edma_hw *hw = &edma_cinfo->hw;
1841 int i, j;
1842
1843 edma_write_reg(EDMA_REG_RX_ISR, 0xff);
1844 for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
1845 edma_write_reg(EDMA_REG_RX_INT_MASK_Q(j), hw->rx_intr_mask);
1846 j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1847 }
1848 edma_write_reg(EDMA_REG_TX_ISR, 0xffff);
1849 for (i = 0; i < edma_cinfo->num_tx_queues; i++)
1850 edma_write_reg(EDMA_REG_TX_INT_MASK_Q(i), hw->tx_intr_mask);
1851 }
1852
1853 /* edma_irq_disable()
1854 * Disable Interrupt
1855 */
1856 void edma_irq_disable(struct edma_common_info *edma_cinfo)
1857 {
1858 int i;
1859
1860 for (i = 0; i < EDMA_MAX_RECEIVE_QUEUE; i++)
1861 edma_write_reg(EDMA_REG_RX_INT_MASK_Q(i), 0x0);
1862
1863 for (i = 0; i < EDMA_MAX_TRANSMIT_QUEUE; i++)
1864 edma_write_reg(EDMA_REG_TX_INT_MASK_Q(i), 0x0);
1865 edma_write_reg(EDMA_REG_MISC_IMR, 0);
1866 edma_write_reg(EDMA_REG_WOL_IMR, 0);
1867 }
1868
1869 /* edma_free_irqs()
1870 * Free All IRQs
1871 */
1872 void edma_free_irqs(struct edma_adapter *adapter)
1873 {
1874 struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
1875 int i, j;
1876 int k = ((edma_cinfo->num_rx_queues == 4) ? 1 : 2);
1877
1878 for (i = 0; i < CONFIG_NR_CPUS; i++) {
1879 for (j = edma_cinfo->edma_percpu_info[i].tx_start; j < (edma_cinfo->edma_percpu_info[i].tx_start + 4); j++)
1880 free_irq(edma_cinfo->tx_irq[j], &edma_cinfo->edma_percpu_info[i]);
1881
1882 for (j = edma_cinfo->edma_percpu_info[i].rx_start; j < (edma_cinfo->edma_percpu_info[i].rx_start + k); j++)
1883 free_irq(edma_cinfo->rx_irq[j], &edma_cinfo->edma_percpu_info[i]);
1884 }
1885 }
1886
1887 /* edma_enable_rx_ctrl()
1888 * Enable RX queue control
1889 */
1890 void edma_enable_rx_ctrl(struct edma_hw *hw)
1891 {
1892 u32 data;
1893
1894 edma_read_reg(EDMA_REG_RXQ_CTRL, &data);
1895 data |= EDMA_RXQ_CTRL_EN;
1896 edma_write_reg(EDMA_REG_RXQ_CTRL, data);
1897 }
1898
1899
1900 /* edma_enable_tx_ctrl()
1901 * Enable TX queue control
1902 */
1903 void edma_enable_tx_ctrl(struct edma_hw *hw)
1904 {
1905 u32 data;
1906
1907 edma_read_reg(EDMA_REG_TXQ_CTRL, &data);
1908 data |= EDMA_TXQ_CTRL_TXQ_EN;
1909 edma_write_reg(EDMA_REG_TXQ_CTRL, data);
1910 }
1911
1912 /* edma_stop_rx_tx()
1913 * Disable RX/TQ Queue control
1914 */
1915 void edma_stop_rx_tx(struct edma_hw *hw)
1916 {
1917 u32 data;
1918
1919 edma_read_reg(EDMA_REG_RXQ_CTRL, &data);
1920 data &= ~EDMA_RXQ_CTRL_EN;
1921 edma_write_reg(EDMA_REG_RXQ_CTRL, data);
1922 edma_read_reg(EDMA_REG_TXQ_CTRL, &data);
1923 data &= ~EDMA_TXQ_CTRL_TXQ_EN;
1924 edma_write_reg(EDMA_REG_TXQ_CTRL, data);
1925 }
1926
1927 /* edma_reset()
1928 * Reset the EDMA
1929 */
1930 int edma_reset(struct edma_common_info *edma_cinfo)
1931 {
1932 struct edma_hw *hw = &edma_cinfo->hw;
1933
1934 edma_irq_disable(edma_cinfo);
1935
1936 edma_clear_irq_status();
1937
1938 edma_stop_rx_tx(hw);
1939
1940 return 0;
1941 }
1942
1943 /* edma_fill_netdev()
1944 * Fill netdev for each etdr
1945 */
1946 int edma_fill_netdev(struct edma_common_info *edma_cinfo, int queue_id,
1947 int dev, int txq_id)
1948 {
1949 struct edma_tx_desc_ring *etdr;
1950 int i = 0;
1951
1952 etdr = edma_cinfo->tpd_ring[queue_id];
1953
1954 while (etdr->netdev[i])
1955 i++;
1956
1957 if (i >= EDMA_MAX_NETDEV_PER_QUEUE)
1958 return -1;
1959
1960 /* Populate the netdev associated with the tpd ring */
1961 etdr->netdev[i] = edma_netdev[dev];
1962 etdr->nq[i] = netdev_get_tx_queue(edma_netdev[dev], txq_id);
1963
1964 return 0;
1965 }
1966
1967 /* edma_set_mac()
1968 * Change the Ethernet Address of the NIC
1969 */
1970 int edma_set_mac_addr(struct net_device *netdev, void *p)
1971 {
1972 struct sockaddr *addr = p;
1973
1974 if (!is_valid_ether_addr(addr->sa_data))
1975 return -EINVAL;
1976
1977 if (netif_running(netdev))
1978 return -EBUSY;
1979
1980 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1981 return 0;
1982 }
1983
1984 /* edma_set_stp_rstp()
1985 * set stp/rstp
1986 */
1987 void edma_set_stp_rstp(bool rstp)
1988 {
1989 edma_stp_rstp = rstp;
1990 }
1991
1992 /* edma_assign_ath_hdr_type()
1993 * assign atheros header eth type
1994 */
1995 void edma_assign_ath_hdr_type(int eth_type)
1996 {
1997 edma_ath_eth_type = eth_type & EDMA_ETH_TYPE_MASK;
1998 }
1999
2000 /* edma_get_default_vlan_tag()
2001 * Used by other modules to get the default vlan tag
2002 */
2003 int edma_get_default_vlan_tag(struct net_device *netdev)
2004 {
2005 struct edma_adapter *adapter = netdev_priv(netdev);
2006
2007 if (adapter->default_vlan_tag)
2008 return adapter->default_vlan_tag;
2009
2010 return 0;
2011 }
2012
2013 /* edma_open()
2014 * gets called when netdevice is up, start the queue.
2015 */
2016 int edma_open(struct net_device *netdev)
2017 {
2018 struct edma_adapter *adapter = netdev_priv(netdev);
2019 struct platform_device *pdev = adapter->edma_cinfo->pdev;
2020
2021 netif_tx_start_all_queues(netdev);
2022 edma_initialise_rfs_flow_table(adapter);
2023 set_bit(__EDMA_UP, &adapter->state_flags);
2024
2025 /* if Link polling is enabled, in our case enabled for WAN, then
2026 * do a phy start, else always set link as UP
2027 */
2028 if (adapter->poll_required) {
2029 if (!IS_ERR(adapter->phydev)) {
2030 phy_start(adapter->phydev);
2031 phy_start_aneg(adapter->phydev);
2032 adapter->link_state = __EDMA_LINKDOWN;
2033 } else {
2034 dev_dbg(&pdev->dev, "Invalid PHY device for a link polled interface\n");
2035 }
2036 } else {
2037 adapter->link_state = __EDMA_LINKUP;
2038 netif_carrier_on(netdev);
2039 }
2040
2041 return 0;
2042 }
2043
2044
2045 /* edma_close()
2046 * gets called when netdevice is down, stops the queue.
2047 */
2048 int edma_close(struct net_device *netdev)
2049 {
2050 struct edma_adapter *adapter = netdev_priv(netdev);
2051
2052 edma_free_rfs_flow_table(adapter);
2053 netif_carrier_off(netdev);
2054 netif_tx_stop_all_queues(netdev);
2055
2056 if (adapter->poll_required) {
2057 if (!IS_ERR(adapter->phydev))
2058 phy_stop(adapter->phydev);
2059 }
2060
2061 adapter->link_state = __EDMA_LINKDOWN;
2062
2063 /* Set GMAC state to UP before link state is checked
2064 */
2065 clear_bit(__EDMA_UP, &adapter->state_flags);
2066
2067 return 0;
2068 }
2069
2070 /* edma_poll
2071 * polling function that gets called when the napi gets scheduled.
2072 *
2073 * Main sequence of task performed in this api
2074 * is clear irq status -> clear_tx_irq -> clean_rx_irq->
2075 * enable interrupts.
2076 */
2077 int edma_poll(struct napi_struct *napi, int budget)
2078 {
2079 struct edma_per_cpu_queues_info *edma_percpu_info = container_of(napi,
2080 struct edma_per_cpu_queues_info, napi);
2081 struct edma_common_info *edma_cinfo = edma_percpu_info->edma_cinfo;
2082 u32 reg_data;
2083 u32 shadow_rx_status, shadow_tx_status;
2084 int queue_id;
2085 int i, work_done = 0;
2086 u16 rx_pending_fill;
2087
2088 /* Store the Rx/Tx status by ANDing it with
2089 * appropriate CPU RX?TX mask
2090 */
2091 edma_read_reg(EDMA_REG_RX_ISR, &reg_data);
2092 edma_percpu_info->rx_status |= reg_data & edma_percpu_info->rx_mask;
2093 shadow_rx_status = edma_percpu_info->rx_status;
2094 edma_read_reg(EDMA_REG_TX_ISR, &reg_data);
2095 edma_percpu_info->tx_status |= reg_data & edma_percpu_info->tx_mask;
2096 shadow_tx_status = edma_percpu_info->tx_status;
2097
2098 /* Every core will have a start, which will be computed
2099 * in probe and stored in edma_percpu_info->tx_start variable.
2100 * We will shift the status bit by tx_start to obtain
2101 * status bits for the core on which the current processing
2102 * is happening. Since, there are 4 tx queues per core,
2103 * we will run the loop till we get the correct queue to clear.
2104 */
2105 while (edma_percpu_info->tx_status) {
2106 queue_id = ffs(edma_percpu_info->tx_status) - 1;
2107 edma_tx_complete(edma_cinfo, queue_id);
2108 edma_percpu_info->tx_status &= ~(1 << queue_id);
2109 }
2110
2111 /* Every core will have a start, which will be computed
2112 * in probe and stored in edma_percpu_info->tx_start variable.
2113 * We will shift the status bit by tx_start to obtain
2114 * status bits for the core on which the current processing
2115 * is happening. Since, there are 4 tx queues per core, we
2116 * will run the loop till we get the correct queue to clear.
2117 */
2118 while (edma_percpu_info->rx_status) {
2119 queue_id = ffs(edma_percpu_info->rx_status) - 1;
2120 rx_pending_fill = edma_rx_complete(edma_cinfo, &work_done,
2121 budget, queue_id, napi);
2122
2123 if (likely(work_done < budget)) {
2124 if (rx_pending_fill) {
2125 /* reschedule poll() to refill rx buffer deficit */
2126 work_done = budget;
2127 break;
2128 }
2129 edma_percpu_info->rx_status &= ~(1 << queue_id);
2130 } else {
2131 break;
2132 }
2133 }
2134
2135 /* Clear the status register, to avoid the interrupts to
2136 * reoccur.This clearing of interrupt status register is
2137 * done here as writing to status register only takes place
2138 * once the producer/consumer index has been updated to
2139 * reflect that the packet transmission/reception went fine.
2140 */
2141 edma_write_reg(EDMA_REG_RX_ISR, shadow_rx_status);
2142 edma_write_reg(EDMA_REG_TX_ISR, shadow_tx_status);
2143
2144 /* If budget not fully consumed, exit the polling mode */
2145 if (likely(work_done < budget)) {
2146 napi_complete(napi);
2147
2148 /* re-enable the interrupts */
2149 for (i = 0; i < edma_cinfo->num_rxq_per_core; i++)
2150 edma_write_reg(EDMA_REG_RX_INT_MASK_Q(edma_percpu_info->rx_start + i), 0x1);
2151 for (i = 0; i < edma_cinfo->num_txq_per_core; i++)
2152 edma_write_reg(EDMA_REG_TX_INT_MASK_Q(edma_percpu_info->tx_start + i), 0x1);
2153 }
2154
2155 return work_done;
2156 }
2157
2158 /* edma interrupt()
2159 * interrupt handler
2160 */
2161 irqreturn_t edma_interrupt(int irq, void *dev)
2162 {
2163 struct edma_per_cpu_queues_info *edma_percpu_info = (struct edma_per_cpu_queues_info *) dev;
2164 struct edma_common_info *edma_cinfo = edma_percpu_info->edma_cinfo;
2165 int i;
2166
2167 /* Unmask the TX/RX interrupt register */
2168 for (i = 0; i < edma_cinfo->num_rxq_per_core; i++)
2169 edma_write_reg(EDMA_REG_RX_INT_MASK_Q(edma_percpu_info->rx_start + i), 0x0);
2170
2171 for (i = 0; i < edma_cinfo->num_txq_per_core; i++)
2172 edma_write_reg(EDMA_REG_TX_INT_MASK_Q(edma_percpu_info->tx_start + i), 0x0);
2173
2174 napi_schedule(&edma_percpu_info->napi);
2175
2176 return IRQ_HANDLED;
2177 }