2 * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved.
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 #include <linux/platform_device.h>
17 #include <linux/if_vlan.h>
21 extern struct net_device
*edma_netdev
[EDMA_MAX_PORTID_SUPPORTED
];
23 u16 edma_ath_eth_type
;
25 /* edma_skb_priority_offset()
26 * get edma skb priority
28 static unsigned int edma_skb_priority_offset(struct sk_buff
*skb
)
30 return (skb
->priority
>> 2) & 1;
33 /* edma_alloc_tx_ring()
34 * Allocate Tx descriptors ring
36 static int edma_alloc_tx_ring(struct edma_common_info
*edma_cinfo
,
37 struct edma_tx_desc_ring
*etdr
)
39 struct platform_device
*pdev
= edma_cinfo
->pdev
;
42 etdr
->size
= sizeof(struct edma_sw_desc
) * etdr
->count
;
43 etdr
->sw_next_to_fill
= 0;
44 etdr
->sw_next_to_clean
= 0;
46 /* Allocate SW descriptors */
47 etdr
->sw_desc
= vzalloc(etdr
->size
);
49 dev_err(&pdev
->dev
, "buffer alloc of tx ring failed=%p", etdr
);
53 /* Allocate HW descriptors */
54 etdr
->hw_desc
= dma_alloc_coherent(&pdev
->dev
, etdr
->size
, &etdr
->dma
,
57 dev_err(&pdev
->dev
, "descriptor allocation for tx ring failed");
65 /* edma_free_tx_ring()
66 * Free tx rings allocated by edma_alloc_tx_rings
68 static void edma_free_tx_ring(struct edma_common_info
*edma_cinfo
,
69 struct edma_tx_desc_ring
*etdr
)
71 struct platform_device
*pdev
= edma_cinfo
->pdev
;
73 if (likely(etdr
->dma
))
74 dma_free_coherent(&pdev
->dev
, etdr
->size
, etdr
->hw_desc
,
81 /* edma_alloc_rx_ring()
82 * allocate rx descriptor ring
84 static int edma_alloc_rx_ring(struct edma_common_info
*edma_cinfo
,
85 struct edma_rfd_desc_ring
*erxd
)
87 struct platform_device
*pdev
= edma_cinfo
->pdev
;
89 erxd
->size
= sizeof(struct edma_sw_desc
) * erxd
->count
;
90 erxd
->sw_next_to_fill
= 0;
91 erxd
->sw_next_to_clean
= 0;
93 /* Allocate SW descriptors */
94 erxd
->sw_desc
= vzalloc(erxd
->size
);
98 /* Alloc HW descriptors */
99 erxd
->hw_desc
= dma_alloc_coherent(&pdev
->dev
, erxd
->size
, &erxd
->dma
,
101 if (!erxd
->hw_desc
) {
102 vfree(erxd
->sw_desc
);
106 /* Initialize pending_fill */
107 erxd
->pending_fill
= 0;
112 /* edma_free_rx_ring()
113 * Free rx ring allocated by alloc_rx_ring
115 static void edma_free_rx_ring(struct edma_common_info
*edma_cinfo
,
116 struct edma_rfd_desc_ring
*rxdr
)
118 struct platform_device
*pdev
= edma_cinfo
->pdev
;
120 if (likely(rxdr
->dma
))
121 dma_free_coherent(&pdev
->dev
, rxdr
->size
, rxdr
->hw_desc
,
124 vfree(rxdr
->sw_desc
);
125 rxdr
->sw_desc
= NULL
;
128 /* edma_configure_tx()
129 * Configure transmission control data
131 static void edma_configure_tx(struct edma_common_info
*edma_cinfo
)
135 txq_ctrl_data
= (EDMA_TPD_BURST
<< EDMA_TXQ_NUM_TPD_BURST_SHIFT
);
136 txq_ctrl_data
|= EDMA_TXQ_CTRL_TPD_BURST_EN
;
137 txq_ctrl_data
|= (EDMA_TXF_BURST
<< EDMA_TXQ_TXF_BURST_NUM_SHIFT
);
138 edma_write_reg(EDMA_REG_TXQ_CTRL
, txq_ctrl_data
);
142 /* edma_configure_rx()
143 * configure reception control data
145 static void edma_configure_rx(struct edma_common_info
*edma_cinfo
)
147 struct edma_hw
*hw
= &edma_cinfo
->hw
;
148 u32 rss_type
, rx_desc1
, rxq_ctrl_data
;
151 rss_type
= hw
->rss_type
;
152 edma_write_reg(EDMA_REG_RSS_TYPE
, rss_type
);
154 /* Set RFD burst number */
155 rx_desc1
= (EDMA_RFD_BURST
<< EDMA_RXQ_RFD_BURST_NUM_SHIFT
);
157 /* Set RFD prefetch threshold */
158 rx_desc1
|= (EDMA_RFD_THR
<< EDMA_RXQ_RFD_PF_THRESH_SHIFT
);
160 /* Set RFD in host ring low threshold to generte interrupt */
161 rx_desc1
|= (EDMA_RFD_LTHR
<< EDMA_RXQ_RFD_LOW_THRESH_SHIFT
);
162 edma_write_reg(EDMA_REG_RX_DESC1
, rx_desc1
);
164 /* Set Rx FIFO threshold to start to DMA data to host */
165 rxq_ctrl_data
= EDMA_FIFO_THRESH_128_BYTE
;
167 if (!edma_cinfo
->is_single_phy
) {
168 /* Set RX remove vlan bit */
169 rxq_ctrl_data
|= EDMA_RXQ_CTRL_RMV_VLAN
;
172 edma_write_reg(EDMA_REG_RXQ_CTRL
, rxq_ctrl_data
);
175 /* edma_alloc_rx_buf()
176 * does skb allocation for the received packets.
178 static int edma_alloc_rx_buf(struct edma_common_info
180 struct edma_rfd_desc_ring
*erdr
,
181 int cleaned_count
, int queue_id
)
183 struct platform_device
*pdev
= edma_cinfo
->pdev
;
184 struct edma_rx_free_desc
*rx_desc
;
185 struct edma_sw_desc
*sw_desc
;
188 u16 prod_idx
, length
;
191 if (cleaned_count
> erdr
->count
)
192 cleaned_count
= erdr
->count
- 1;
194 i
= erdr
->sw_next_to_fill
;
196 while (cleaned_count
) {
197 sw_desc
= &erdr
->sw_desc
[i
];
198 length
= edma_cinfo
->rx_head_buffer_len
;
200 if (sw_desc
->flags
& EDMA_SW_DESC_FLAG_SKB_REUSE
) {
203 /* Clear REUSE Flag */
204 sw_desc
->flags
&= ~EDMA_SW_DESC_FLAG_SKB_REUSE
;
207 skb
= netdev_alloc_skb_ip_align(edma_netdev
[0], length
);
209 /* Better luck next round */
214 if (edma_cinfo
->page_mode
) {
215 struct page
*pg
= alloc_page(GFP_ATOMIC
);
218 dev_kfree_skb_any(skb
);
222 sw_desc
->dma
= dma_map_page(&pdev
->dev
, pg
, 0,
223 edma_cinfo
->rx_page_buffer_len
,
225 if (dma_mapping_error(&pdev
->dev
,
228 dev_kfree_skb_any(skb
);
232 skb_fill_page_desc(skb
, 0, pg
, 0,
233 edma_cinfo
->rx_page_buffer_len
);
234 sw_desc
->flags
= EDMA_SW_DESC_FLAG_SKB_FRAG
;
235 sw_desc
->length
= edma_cinfo
->rx_page_buffer_len
;
237 sw_desc
->dma
= dma_map_single(&pdev
->dev
, skb
->data
,
238 length
, DMA_FROM_DEVICE
);
239 if (dma_mapping_error(&pdev
->dev
,
241 dev_kfree_skb_any(skb
);
245 sw_desc
->flags
= EDMA_SW_DESC_FLAG_SKB_HEAD
;
246 sw_desc
->length
= length
;
249 /* Update the buffer info */
251 rx_desc
= (&((struct edma_rx_free_desc
*)(erdr
->hw_desc
))[i
]);
252 rx_desc
->buffer_addr
= cpu_to_le64(sw_desc
->dma
);
253 if (++i
== erdr
->count
)
258 erdr
->sw_next_to_fill
= i
;
261 prod_idx
= erdr
->count
- 1;
265 /* Update the producer index */
266 edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id
), ®_data
);
267 reg_data
&= ~EDMA_RFD_PROD_IDX_BITS
;
268 reg_data
|= prod_idx
;
269 edma_write_reg(EDMA_REG_RFD_IDX_Q(queue_id
), reg_data
);
271 /* If we couldn't allocate all the buffers
272 * we increment the alloc failure counters
275 edma_cinfo
->edma_ethstats
.rx_alloc_fail_ctr
++;
277 return cleaned_count
;
281 * update descriptor ring size, buffer and producer/consumer index
283 static void edma_init_desc(struct edma_common_info
*edma_cinfo
)
285 struct edma_rfd_desc_ring
*rfd_ring
;
286 struct edma_tx_desc_ring
*etdr
;
291 /* Set the base address of every TPD ring. */
292 for (i
= 0; i
< edma_cinfo
->num_tx_queues
; i
++) {
293 etdr
= edma_cinfo
->tpd_ring
[i
];
295 /* Update descriptor ring base address */
296 edma_write_reg(EDMA_REG_TPD_BASE_ADDR_Q(i
), (u32
)etdr
->dma
);
297 edma_read_reg(EDMA_REG_TPD_IDX_Q(i
), &data
);
299 /* Calculate hardware consumer index */
300 hw_cons_idx
= (data
>> EDMA_TPD_CONS_IDX_SHIFT
) & 0xffff;
301 etdr
->sw_next_to_fill
= hw_cons_idx
;
302 etdr
->sw_next_to_clean
= hw_cons_idx
;
303 data
&= ~(EDMA_TPD_PROD_IDX_MASK
<< EDMA_TPD_PROD_IDX_SHIFT
);
306 /* update producer index */
307 edma_write_reg(EDMA_REG_TPD_IDX_Q(i
), data
);
309 /* update SW consumer index register */
310 edma_write_reg(EDMA_REG_TX_SW_CONS_IDX_Q(i
), hw_cons_idx
);
312 /* Set TPD ring size */
313 edma_write_reg(EDMA_REG_TPD_RING_SIZE
,
314 edma_cinfo
->tx_ring_count
&
315 EDMA_TPD_RING_SIZE_MASK
);
318 for (i
= 0, j
= 0; i
< edma_cinfo
->num_rx_queues
; i
++) {
319 rfd_ring
= edma_cinfo
->rfd_ring
[j
];
320 /* Update Receive Free descriptor ring base address */
321 edma_write_reg(EDMA_REG_RFD_BASE_ADDR_Q(j
),
322 (u32
)(rfd_ring
->dma
));
323 j
+= ((edma_cinfo
->num_rx_queues
== 4) ? 2 : 1);
326 data
= edma_cinfo
->rx_head_buffer_len
;
327 if (edma_cinfo
->page_mode
)
328 data
= edma_cinfo
->rx_page_buffer_len
;
330 data
&= EDMA_RX_BUF_SIZE_MASK
;
331 data
<<= EDMA_RX_BUF_SIZE_SHIFT
;
333 /* Update RFD ring size and RX buffer size */
334 data
|= (edma_cinfo
->rx_ring_count
& EDMA_RFD_RING_SIZE_MASK
)
335 << EDMA_RFD_RING_SIZE_SHIFT
;
337 edma_write_reg(EDMA_REG_RX_DESC0
, data
);
339 /* Disable TX FIFO low watermark and high watermark */
340 edma_write_reg(EDMA_REG_TXF_WATER_MARK
, 0);
342 /* Load all of base address above */
343 edma_read_reg(EDMA_REG_TX_SRAM_PART
, &data
);
344 data
|= 1 << EDMA_LOAD_PTR_SHIFT
;
345 edma_write_reg(EDMA_REG_TX_SRAM_PART
, data
);
348 /* edma_receive_checksum
349 * Api to check checksum on receive packets
351 static void edma_receive_checksum(struct edma_rx_return_desc
*rd
,
354 skb_checksum_none_assert(skb
);
356 /* check the RRD IP/L4 checksum bit to see if
357 * its set, which in turn indicates checksum
360 if (rd
->rrd6
& EDMA_RRD_CSUM_FAIL_MASK
)
363 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
367 * clean up rx resourcers on error
369 static void edma_clean_rfd(struct edma_rfd_desc_ring
*erdr
, u16 index
)
371 struct edma_rx_free_desc
*rx_desc
;
372 struct edma_sw_desc
*sw_desc
;
374 rx_desc
= (&((struct edma_rx_free_desc
*)(erdr
->hw_desc
))[index
]);
375 sw_desc
= &erdr
->sw_desc
[index
];
377 dev_kfree_skb_any(sw_desc
->skb
);
381 memset(rx_desc
, 0, sizeof(struct edma_rx_free_desc
));
384 /* edma_rx_complete_fraglist()
385 * Complete Rx processing for fraglist skbs
387 static void edma_rx_complete_stp_rstp(struct sk_buff
*skb
, int port_id
, struct edma_rx_return_desc
*rd
)
392 u8 mac_addr
[EDMA_ETH_HDR_LEN
];
394 port_type
= (rd
->rrd1
>> EDMA_RRD_PORT_TYPE_SHIFT
)
395 & EDMA_RRD_PORT_TYPE_MASK
;
396 /* if port type is 0x4, then only proceed with
397 * other stp/rstp calculation
399 if (port_type
== EDMA_RX_ATH_HDR_RSTP_PORT_TYPE
) {
400 u8 bpdu_mac
[6] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00};
402 /* calculate the frame priority */
403 priority
= (rd
->rrd1
>> EDMA_RRD_PRIORITY_SHIFT
)
404 & EDMA_RRD_PRIORITY_MASK
;
406 for (i
= 0; i
< EDMA_ETH_HDR_LEN
; i
++)
407 mac_addr
[i
] = skb
->data
[i
];
409 /* Check if destination mac addr is bpdu addr */
410 if (!memcmp(mac_addr
, bpdu_mac
, 6)) {
411 /* destination mac address is BPDU
412 * destination mac address, then add
413 * atheros header to the packet.
415 u16 athr_hdr
= (EDMA_RX_ATH_HDR_VERSION
<< EDMA_RX_ATH_HDR_VERSION_SHIFT
) |
416 (priority
<< EDMA_RX_ATH_HDR_PRIORITY_SHIFT
) |
417 (EDMA_RX_ATH_HDR_RSTP_PORT_TYPE
<< EDMA_RX_ATH_PORT_TYPE_SHIFT
) | port_id
;
419 memcpy(skb
->data
, mac_addr
, EDMA_ETH_HDR_LEN
);
420 *(uint16_t *)&skb
->data
[12] = htons(edma_ath_eth_type
);
421 *(uint16_t *)&skb
->data
[14] = htons(athr_hdr
);
427 * edma_rx_complete_fraglist()
428 * Complete Rx processing for fraglist skbs
430 static int edma_rx_complete_fraglist(struct sk_buff
*skb
, u16 num_rfds
, u16 length
, u32 sw_next_to_clean
,
431 u16
*cleaned_count
, struct edma_rfd_desc_ring
*erdr
, struct edma_common_info
*edma_cinfo
)
433 struct platform_device
*pdev
= edma_cinfo
->pdev
;
434 struct edma_hw
*hw
= &edma_cinfo
->hw
;
435 struct sk_buff
*skb_temp
;
436 struct edma_sw_desc
*sw_desc
;
441 skb
->tail
+= (hw
->rx_head_buff_size
- 16);
442 skb
->len
= skb
->truesize
= length
;
443 size_remaining
= length
- (hw
->rx_head_buff_size
- 16);
445 /* clean-up all related sw_descs */
446 for (i
= 1; i
< num_rfds
; i
++) {
447 struct sk_buff
*skb_prev
;
448 sw_desc
= &erdr
->sw_desc
[sw_next_to_clean
];
449 skb_temp
= sw_desc
->skb
;
451 dma_unmap_single(&pdev
->dev
, sw_desc
->dma
,
452 sw_desc
->length
, DMA_FROM_DEVICE
);
454 if (size_remaining
< hw
->rx_head_buff_size
)
455 skb_put(skb_temp
, size_remaining
);
457 skb_put(skb_temp
, hw
->rx_head_buff_size
);
460 * If we are processing the first rfd, we link
461 * skb->frag_list to the skb corresponding to the
465 skb_shinfo(skb
)->frag_list
= skb_temp
;
467 skb_prev
->next
= skb_temp
;
469 skb_temp
->next
= NULL
;
471 skb
->data_len
+= skb_temp
->len
;
472 size_remaining
-= skb_temp
->len
;
474 /* Increment SW index */
475 sw_next_to_clean
= (sw_next_to_clean
+ 1) & (erdr
->count
- 1);
479 return sw_next_to_clean
;
482 /* edma_rx_complete_paged()
483 * Complete Rx processing for paged skbs
485 static int edma_rx_complete_paged(struct sk_buff
*skb
, u16 num_rfds
, u16 length
, u32 sw_next_to_clean
,
486 u16
*cleaned_count
, struct edma_rfd_desc_ring
*erdr
, struct edma_common_info
*edma_cinfo
)
488 struct platform_device
*pdev
= edma_cinfo
->pdev
;
489 struct sk_buff
*skb_temp
;
490 struct edma_sw_desc
*sw_desc
;
494 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[0];
496 /* Setup skbuff fields */
499 if (likely(num_rfds
<= 1)) {
500 skb
->data_len
= length
;
501 skb
->truesize
+= edma_cinfo
->rx_page_buffer_len
;
502 skb_fill_page_desc(skb
, 0, skb_frag_page(frag
),
505 skb_frag_size_sub(frag
, 16);
506 skb
->data_len
= skb_frag_size(frag
);
507 skb
->truesize
+= edma_cinfo
->rx_page_buffer_len
;
508 size_remaining
= length
- skb_frag_size(frag
);
510 skb_fill_page_desc(skb
, 0, skb_frag_page(frag
),
511 16, skb_frag_size(frag
));
513 /* clean-up all related sw_descs */
514 for (i
= 1; i
< num_rfds
; i
++) {
515 sw_desc
= &erdr
->sw_desc
[sw_next_to_clean
];
516 skb_temp
= sw_desc
->skb
;
517 frag
= &skb_shinfo(skb_temp
)->frags
[0];
518 dma_unmap_page(&pdev
->dev
, sw_desc
->dma
,
519 sw_desc
->length
, DMA_FROM_DEVICE
);
521 if (size_remaining
< edma_cinfo
->rx_page_buffer_len
)
522 skb_frag_size_set(frag
, size_remaining
);
524 skb_fill_page_desc(skb
, i
, skb_frag_page(frag
),
525 0, skb_frag_size(frag
));
527 skb_shinfo(skb_temp
)->nr_frags
= 0;
528 dev_kfree_skb_any(skb_temp
);
530 skb
->data_len
+= skb_frag_size(frag
);
531 skb
->truesize
+= edma_cinfo
->rx_page_buffer_len
;
532 size_remaining
-= skb_frag_size(frag
);
534 /* Increment SW index */
535 sw_next_to_clean
= (sw_next_to_clean
+ 1) & (erdr
->count
- 1);
540 return sw_next_to_clean
;
545 * Main api called from the poll function to process rx packets.
547 static u16
edma_rx_complete(struct edma_common_info
*edma_cinfo
,
548 int *work_done
, int work_to_do
, int queue_id
,
549 struct napi_struct
*napi
)
551 struct platform_device
*pdev
= edma_cinfo
->pdev
;
552 struct edma_rfd_desc_ring
*erdr
= edma_cinfo
->rfd_ring
[queue_id
];
553 struct net_device
*netdev
;
554 struct edma_adapter
*adapter
;
555 struct edma_sw_desc
*sw_desc
;
557 struct edma_rx_return_desc
*rd
;
558 u16 hash_type
, rrd
[8], cleaned_count
= 0, length
= 0, num_rfds
= 1,
559 sw_next_to_clean
, hw_next_to_clean
= 0, vlan
= 0, ret_count
= 0;
562 int port_id
, i
, drop_count
= 0;
564 u16 count
= erdr
->count
, rfd_avail
;
565 u8 queue_to_rxid
[8] = {0, 0, 1, 1, 2, 2, 3, 3};
567 cleaned_count
= erdr
->pending_fill
;
568 sw_next_to_clean
= erdr
->sw_next_to_clean
;
570 edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id
), &data
);
571 hw_next_to_clean
= (data
>> EDMA_RFD_CONS_IDX_SHIFT
) &
572 EDMA_RFD_CONS_IDX_MASK
;
575 while (sw_next_to_clean
!= hw_next_to_clean
) {
579 sw_desc
= &erdr
->sw_desc
[sw_next_to_clean
];
582 /* Unmap the allocated buffer */
583 if (likely(sw_desc
->flags
& EDMA_SW_DESC_FLAG_SKB_HEAD
))
584 dma_unmap_single(&pdev
->dev
, sw_desc
->dma
,
585 sw_desc
->length
, DMA_FROM_DEVICE
);
587 dma_unmap_page(&pdev
->dev
, sw_desc
->dma
,
588 sw_desc
->length
, DMA_FROM_DEVICE
);
591 if (edma_cinfo
->page_mode
) {
592 vaddr
= kmap_atomic(skb_frag_page(&skb_shinfo(skb
)->frags
[0]));
593 memcpy((uint8_t *)&rrd
[0], vaddr
, 16);
594 rd
= (struct edma_rx_return_desc
*)rrd
;
595 kunmap_atomic(vaddr
);
597 rd
= (struct edma_rx_return_desc
*)skb
->data
;
600 /* Check if RRD is valid */
601 if (!(rd
->rrd7
& EDMA_RRD_DESC_VALID
)) {
602 edma_clean_rfd(erdr
, sw_next_to_clean
);
603 sw_next_to_clean
= (sw_next_to_clean
+ 1) &
609 /* Get the number of RFDs from RRD */
610 num_rfds
= rd
->rrd1
& EDMA_RRD_NUM_RFD_MASK
;
612 /* Get Rx port ID from switch */
613 port_id
= (rd
->rrd1
>> EDMA_PORT_ID_SHIFT
) & EDMA_PORT_ID_MASK
;
614 if ((!port_id
) || (port_id
> EDMA_MAX_PORTID_SUPPORTED
)) {
615 dev_err(&pdev
->dev
, "Invalid RRD source port bit set");
616 for (i
= 0; i
< num_rfds
; i
++) {
617 edma_clean_rfd(erdr
, sw_next_to_clean
);
618 sw_next_to_clean
= (sw_next_to_clean
+ 1) & (erdr
->count
- 1);
624 /* check if we have a sink for the data we receive.
625 * If the interface isn't setup, we have to drop the
626 * incoming data for now.
628 netdev
= edma_cinfo
->portid_netdev_lookup_tbl
[port_id
];
630 edma_clean_rfd(erdr
, sw_next_to_clean
);
631 sw_next_to_clean
= (sw_next_to_clean
+ 1) &
636 adapter
= netdev_priv(netdev
);
638 /* This code is added to handle a usecase where high
639 * priority stream and a low priority stream are
640 * received simultaneously on DUT. The problem occurs
641 * if one of the Rx rings is full and the corresponding
642 * core is busy with other stuff. This causes ESS CPU
643 * port to backpressure all incoming traffic including
644 * high priority one. We monitor free descriptor count
645 * on each CPU and whenever it reaches threshold (< 80),
646 * we drop all low priority traffic and let only high
647 * priotiy traffic pass through. We can hence avoid
648 * ESS CPU port to send backpressure on high priroity
651 priority
= (rd
->rrd1
>> EDMA_RRD_PRIORITY_SHIFT
)
652 & EDMA_RRD_PRIORITY_MASK
;
653 if (likely(!priority
&& !edma_cinfo
->page_mode
&& (num_rfds
<= 1))) {
654 rfd_avail
= (count
+ sw_next_to_clean
- hw_next_to_clean
- 1) & (count
- 1);
655 if (rfd_avail
< EDMA_RFD_AVAIL_THR
) {
656 sw_desc
->flags
= EDMA_SW_DESC_FLAG_SKB_REUSE
;
657 sw_next_to_clean
= (sw_next_to_clean
+ 1) & (erdr
->count
- 1);
658 adapter
->stats
.rx_dropped
++;
661 if (drop_count
== 3) {
666 if (cleaned_count
>= EDMA_RX_BUFFER_WRITE
) {
667 /* If buffer clean count reaches 16, we replenish HW buffers. */
668 ret_count
= edma_alloc_rx_buf(edma_cinfo
, erdr
, cleaned_count
, queue_id
);
669 edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id
),
671 cleaned_count
= ret_count
;
672 erdr
->pending_fill
= ret_count
;
681 /* Increment SW index */
682 sw_next_to_clean
= (sw_next_to_clean
+ 1) &
687 /* Get the packet size and allocate buffer */
688 length
= rd
->rrd6
& EDMA_RRD_PKT_SIZE_MASK
;
690 if (edma_cinfo
->page_mode
) {
692 sw_next_to_clean
= edma_rx_complete_paged(skb
, num_rfds
, length
, sw_next_to_clean
, &cleaned_count
, erdr
, edma_cinfo
);
693 if (!pskb_may_pull(skb
, ETH_HLEN
)) {
694 dev_kfree_skb_any(skb
);
698 /* single or fraglist skb */
700 /* Addition of 16 bytes is required, as in the packet
701 * first 16 bytes are rrd descriptors, so actual data
702 * starts from an offset of 16.
704 skb_reserve(skb
, 16);
705 if (likely((num_rfds
<= 1) || !edma_cinfo
->fraglist_mode
)) {
706 skb_put(skb
, length
);
708 sw_next_to_clean
= edma_rx_complete_fraglist(skb
, num_rfds
, length
, sw_next_to_clean
, &cleaned_count
, erdr
, edma_cinfo
);
713 edma_rx_complete_stp_rstp(skb
, port_id
, rd
);
716 skb
->protocol
= eth_type_trans(skb
, netdev
);
718 /* Record Rx queue for RFS/RPS and fill flow hash from HW */
719 skb_record_rx_queue(skb
, queue_to_rxid
[queue_id
]);
720 if (netdev
->features
& NETIF_F_RXHASH
) {
721 hash_type
= (rd
->rrd5
>> EDMA_HASH_TYPE_SHIFT
);
722 if ((hash_type
> EDMA_HASH_TYPE_START
) && (hash_type
< EDMA_HASH_TYPE_END
))
723 skb_set_hash(skb
, rd
->rrd2
, PKT_HASH_TYPE_L4
);
726 #ifdef CONFIG_NF_FLOW_COOKIE
727 skb
->flow_cookie
= rd
->rrd3
& EDMA_RRD_FLOW_COOKIE_MASK
;
729 edma_receive_checksum(rd
, skb
);
731 /* Process VLAN HW acceleration indication provided by HW */
732 if (unlikely(adapter
->default_vlan_tag
!= rd
->rrd4
)) {
734 if (likely(rd
->rrd7
& EDMA_RRD_CVLAN
))
735 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vlan
);
736 else if (rd
->rrd1
& EDMA_RRD_SVLAN
)
737 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021AD
), vlan
);
740 /* Update rx statistics */
741 adapter
->stats
.rx_packets
++;
742 adapter
->stats
.rx_bytes
+= length
;
744 /* Check if we reached refill threshold */
745 if (cleaned_count
>= EDMA_RX_BUFFER_WRITE
) {
746 ret_count
= edma_alloc_rx_buf(edma_cinfo
, erdr
, cleaned_count
, queue_id
);
747 edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id
),
749 cleaned_count
= ret_count
;
750 erdr
->pending_fill
= ret_count
;
753 /* At this point skb should go to stack */
754 napi_gro_receive(napi
, skb
);
757 /* Check if we still have NAPI budget */
761 /* Read index once again since we still have NAPI budget */
762 edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id
), &data
);
763 hw_next_to_clean
= (data
>> EDMA_RFD_CONS_IDX_SHIFT
) &
764 EDMA_RFD_CONS_IDX_MASK
;
765 } while (hw_next_to_clean
!= sw_next_to_clean
);
767 erdr
->sw_next_to_clean
= sw_next_to_clean
;
769 /* Refill here in case refill threshold wasn't reached */
770 if (likely(cleaned_count
)) {
771 ret_count
= edma_alloc_rx_buf(edma_cinfo
, erdr
, cleaned_count
, queue_id
);
772 erdr
->pending_fill
= ret_count
;
775 dev_dbg(&pdev
->dev
, "Not all buffers was reallocated");
778 edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id
),
779 erdr
->sw_next_to_clean
);
782 return erdr
->pending_fill
;
785 /* edma_delete_rfs_filter()
786 * Remove RFS filter from switch
788 static int edma_delete_rfs_filter(struct edma_adapter
*adapter
,
789 struct edma_rfs_filter_node
*filter_node
)
793 struct flow_keys
*keys
= &filter_node
->keys
;
795 if (likely(adapter
->set_rfs_rule
))
796 res
= (*adapter
->set_rfs_rule
)(adapter
->netdev
,
797 flow_get_u32_src(keys
), flow_get_u32_dst(keys
),
798 keys
->ports
.src
, keys
->ports
.dst
,
799 keys
->basic
.ip_proto
, filter_node
->rq_id
, 0);
804 /* edma_add_rfs_filter()
805 * Add RFS filter to switch
807 static int edma_add_rfs_filter(struct edma_adapter
*adapter
,
808 struct flow_keys
*keys
, u16 rq
,
809 struct edma_rfs_filter_node
*filter_node
)
813 struct flow_keys
*dest_keys
= &filter_node
->keys
;
815 memcpy(dest_keys
, &filter_node
->keys
, sizeof(*dest_keys
));
817 dest_keys->control = keys->control;
818 dest_keys->basic = keys->basic;
819 dest_keys->addrs = keys->addrs;
820 dest_keys->ports = keys->ports;
821 dest_keys.ip_proto = keys->ip_proto;
823 /* Call callback registered by ESS driver */
824 if (likely(adapter
->set_rfs_rule
))
825 res
= (*adapter
->set_rfs_rule
)(adapter
->netdev
, flow_get_u32_src(keys
),
826 flow_get_u32_dst(keys
), keys
->ports
.src
, keys
->ports
.dst
,
827 keys
->basic
.ip_proto
, rq
, 1);
832 /* edma_rfs_key_search()
833 * Look for existing RFS entry
835 static struct edma_rfs_filter_node
*edma_rfs_key_search(struct hlist_head
*h
,
836 struct flow_keys
*key
)
838 struct edma_rfs_filter_node
*p
;
840 hlist_for_each_entry(p
, h
, node
)
841 if (flow_get_u32_src(&p
->keys
) == flow_get_u32_src(key
) &&
842 flow_get_u32_dst(&p
->keys
) == flow_get_u32_dst(key
) &&
843 p
->keys
.ports
.src
== key
->ports
.src
&&
844 p
->keys
.ports
.dst
== key
->ports
.dst
&&
845 p
->keys
.basic
.ip_proto
== key
->basic
.ip_proto
)
850 /* edma_initialise_rfs_flow_table()
851 * Initialise EDMA RFS flow table
853 static void edma_initialise_rfs_flow_table(struct edma_adapter
*adapter
)
857 spin_lock_init(&adapter
->rfs
.rfs_ftab_lock
);
859 /* Initialize EDMA flow hash table */
860 for (i
= 0; i
< EDMA_RFS_FLOW_ENTRIES
; i
++)
861 INIT_HLIST_HEAD(&adapter
->rfs
.hlist_head
[i
]);
863 adapter
->rfs
.max_num_filter
= EDMA_RFS_FLOW_ENTRIES
;
864 adapter
->rfs
.filter_available
= adapter
->rfs
.max_num_filter
;
865 adapter
->rfs
.hashtoclean
= 0;
867 /* Add timer to get periodic RFS updates from OS */
868 timer_setup(&adapter
->rfs
.expire_rfs
, edma_flow_may_expire
, 0);
869 mod_timer(&adapter
->rfs
.expire_rfs
, jiffies
+ HZ
/ 4);
872 /* edma_free_rfs_flow_table()
873 * Free EDMA RFS flow table
875 static void edma_free_rfs_flow_table(struct edma_adapter
*adapter
)
879 /* Remove sync timer */
880 del_timer_sync(&adapter
->rfs
.expire_rfs
);
881 spin_lock_bh(&adapter
->rfs
.rfs_ftab_lock
);
883 /* Free EDMA RFS table entries */
884 adapter
->rfs
.filter_available
= 0;
886 /* Clean-up EDMA flow hash table */
887 for (i
= 0; i
< EDMA_RFS_FLOW_ENTRIES
; i
++) {
888 struct hlist_head
*hhead
;
889 struct hlist_node
*tmp
;
890 struct edma_rfs_filter_node
*filter_node
;
893 hhead
= &adapter
->rfs
.hlist_head
[i
];
894 hlist_for_each_entry_safe(filter_node
, tmp
, hhead
, node
) {
895 res
= edma_delete_rfs_filter(adapter
, filter_node
);
897 dev_warn(&adapter
->netdev
->dev
,
898 "EDMA going down but RFS entry %d not allowed to be flushed by Switch",
899 filter_node
->flow_id
);
900 hlist_del(&filter_node
->node
);
904 spin_unlock_bh(&adapter
->rfs
.rfs_ftab_lock
);
907 /* edma_tx_unmap_and_free()
910 static inline void edma_tx_unmap_and_free(struct platform_device
*pdev
,
911 struct edma_sw_desc
*sw_desc
)
913 struct sk_buff
*skb
= sw_desc
->skb
;
915 if (likely((sw_desc
->flags
& EDMA_SW_DESC_FLAG_SKB_HEAD
) ||
916 (sw_desc
->flags
& EDMA_SW_DESC_FLAG_SKB_FRAGLIST
)))
917 /* unmap_single for skb head area */
918 dma_unmap_single(&pdev
->dev
, sw_desc
->dma
,
919 sw_desc
->length
, DMA_TO_DEVICE
);
920 else if (sw_desc
->flags
& EDMA_SW_DESC_FLAG_SKB_FRAG
)
921 /* unmap page for paged fragments */
922 dma_unmap_page(&pdev
->dev
, sw_desc
->dma
,
923 sw_desc
->length
, DMA_TO_DEVICE
);
925 if (likely(sw_desc
->flags
& EDMA_SW_DESC_FLAG_LAST
))
926 dev_kfree_skb_any(skb
);
931 /* edma_tx_complete()
932 * Used to clean tx queues and update hardware and consumer index
934 static void edma_tx_complete(struct edma_common_info
*edma_cinfo
, int queue_id
)
936 struct edma_tx_desc_ring
*etdr
= edma_cinfo
->tpd_ring
[queue_id
];
937 struct edma_sw_desc
*sw_desc
;
938 struct platform_device
*pdev
= edma_cinfo
->pdev
;
941 u16 sw_next_to_clean
= etdr
->sw_next_to_clean
;
942 u16 hw_next_to_clean
;
945 edma_read_reg(EDMA_REG_TPD_IDX_Q(queue_id
), &data
);
946 hw_next_to_clean
= (data
>> EDMA_TPD_CONS_IDX_SHIFT
) & EDMA_TPD_CONS_IDX_MASK
;
948 /* clean the buffer here */
949 while (sw_next_to_clean
!= hw_next_to_clean
) {
950 sw_desc
= &etdr
->sw_desc
[sw_next_to_clean
];
951 edma_tx_unmap_and_free(pdev
, sw_desc
);
952 sw_next_to_clean
= (sw_next_to_clean
+ 1) & (etdr
->count
- 1);
955 etdr
->sw_next_to_clean
= sw_next_to_clean
;
957 /* update the TPD consumer index register */
958 edma_write_reg(EDMA_REG_TX_SW_CONS_IDX_Q(queue_id
), sw_next_to_clean
);
960 /* Wake the queue if queue is stopped and netdev link is up */
961 for (i
= 0; i
< EDMA_MAX_NETDEV_PER_QUEUE
&& etdr
->nq
[i
] ; i
++) {
962 if (netif_tx_queue_stopped(etdr
->nq
[i
])) {
963 if ((etdr
->netdev
[i
]) && netif_carrier_ok(etdr
->netdev
[i
]))
964 netif_tx_wake_queue(etdr
->nq
[i
]);
969 /* edma_get_tx_buffer()
970 * Get sw_desc corresponding to the TPD
972 static struct edma_sw_desc
*edma_get_tx_buffer(struct edma_common_info
*edma_cinfo
,
973 struct edma_tx_desc
*tpd
, int queue_id
)
975 struct edma_tx_desc_ring
*etdr
= edma_cinfo
->tpd_ring
[queue_id
];
976 return &etdr
->sw_desc
[tpd
- (struct edma_tx_desc
*)etdr
->hw_desc
];
979 /* edma_get_next_tpd()
980 * Return a TPD descriptor for transfer
982 static struct edma_tx_desc
*edma_get_next_tpd(struct edma_common_info
*edma_cinfo
,
985 struct edma_tx_desc_ring
*etdr
= edma_cinfo
->tpd_ring
[queue_id
];
986 u16 sw_next_to_fill
= etdr
->sw_next_to_fill
;
987 struct edma_tx_desc
*tpd_desc
=
988 (&((struct edma_tx_desc
*)(etdr
->hw_desc
))[sw_next_to_fill
]);
990 etdr
->sw_next_to_fill
= (etdr
->sw_next_to_fill
+ 1) & (etdr
->count
- 1);
995 /* edma_tpd_available()
996 * Check number of free TPDs
998 static inline u16
edma_tpd_available(struct edma_common_info
*edma_cinfo
,
1001 struct edma_tx_desc_ring
*etdr
= edma_cinfo
->tpd_ring
[queue_id
];
1003 u16 sw_next_to_fill
;
1004 u16 sw_next_to_clean
;
1007 sw_next_to_clean
= etdr
->sw_next_to_clean
;
1008 sw_next_to_fill
= etdr
->sw_next_to_fill
;
1010 if (likely(sw_next_to_clean
<= sw_next_to_fill
))
1011 count
= etdr
->count
;
1013 return count
+ sw_next_to_clean
- sw_next_to_fill
- 1;
1016 /* edma_tx_queue_get()
1017 * Get the starting number of the queue
1019 static inline int edma_tx_queue_get(struct edma_adapter
*adapter
,
1020 struct sk_buff
*skb
, int txq_id
)
1022 /* skb->priority is used as an index to skb priority table
1023 * and based on packet priority, correspong queue is assigned.
1025 return adapter
->tx_start_offset
[txq_id
] + edma_skb_priority_offset(skb
);
1028 /* edma_tx_update_hw_idx()
1029 * update the producer index for the ring transmitted
1031 static void edma_tx_update_hw_idx(struct edma_common_info
*edma_cinfo
,
1032 struct sk_buff
*skb
, int queue_id
)
1034 struct edma_tx_desc_ring
*etdr
= edma_cinfo
->tpd_ring
[queue_id
];
1037 /* Read and update the producer index */
1038 edma_read_reg(EDMA_REG_TPD_IDX_Q(queue_id
), &tpd_idx_data
);
1039 tpd_idx_data
&= ~EDMA_TPD_PROD_IDX_BITS
;
1040 tpd_idx_data
|= (etdr
->sw_next_to_fill
& EDMA_TPD_PROD_IDX_MASK
)
1041 << EDMA_TPD_PROD_IDX_SHIFT
;
1043 edma_write_reg(EDMA_REG_TPD_IDX_Q(queue_id
), tpd_idx_data
);
1046 /* edma_rollback_tx()
1047 * Function to retrieve tx resources in case of error
1049 static void edma_rollback_tx(struct edma_adapter
*adapter
,
1050 struct edma_tx_desc
*start_tpd
, int queue_id
)
1052 struct edma_tx_desc_ring
*etdr
= adapter
->edma_cinfo
->tpd_ring
[queue_id
];
1053 struct edma_sw_desc
*sw_desc
;
1054 struct edma_tx_desc
*tpd
= NULL
;
1055 u16 start_index
, index
;
1057 start_index
= start_tpd
- (struct edma_tx_desc
*)(etdr
->hw_desc
);
1059 index
= start_index
;
1060 while (index
!= etdr
->sw_next_to_fill
) {
1061 tpd
= (&((struct edma_tx_desc
*)(etdr
->hw_desc
))[index
]);
1062 sw_desc
= &etdr
->sw_desc
[index
];
1063 edma_tx_unmap_and_free(adapter
->pdev
, sw_desc
);
1064 memset(tpd
, 0, sizeof(struct edma_tx_desc
));
1065 if (++index
== etdr
->count
)
1068 etdr
->sw_next_to_fill
= start_index
;
1071 /* edma_tx_map_and_fill()
1072 * gets called from edma_xmit_frame
1074 * This is where the dma of the buffer to be transmitted
1077 static int edma_tx_map_and_fill(struct edma_common_info
*edma_cinfo
,
1078 struct edma_adapter
*adapter
, struct sk_buff
*skb
, int queue_id
,
1079 unsigned int flags_transmit
, u16 from_cpu
, u16 dp_bitmap
,
1080 bool packet_is_rstp
, int nr_frags
)
1082 struct edma_sw_desc
*sw_desc
= NULL
;
1083 struct platform_device
*pdev
= edma_cinfo
->pdev
;
1084 struct edma_tx_desc
*tpd
= NULL
, *start_tpd
= NULL
;
1085 struct sk_buff
*iter_skb
;
1087 u32 word1
= 0, word3
= 0, lso_word1
= 0, svlan_tag
= 0;
1088 u16 buf_len
, lso_desc_len
= 0;
1090 /* It should either be a nr_frags skb or fraglist skb but not both */
1091 BUG_ON(nr_frags
&& skb_has_frag_list(skb
));
1093 if (skb_is_gso(skb
)) {
1094 /* TODO: What additional checks need to be performed here */
1095 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
) {
1096 lso_word1
|= EDMA_TPD_IPV4_EN
;
1097 ip_hdr(skb
)->check
= 0;
1098 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
1099 ip_hdr(skb
)->daddr
, 0, IPPROTO_TCP
, 0);
1100 } else if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
) {
1101 lso_word1
|= EDMA_TPD_LSO_V2_EN
;
1102 ipv6_hdr(skb
)->payload_len
= 0;
1103 tcp_hdr(skb
)->check
= ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
1104 &ipv6_hdr(skb
)->daddr
, 0, IPPROTO_TCP
, 0);
1108 lso_word1
|= EDMA_TPD_LSO_EN
| ((skb_shinfo(skb
)->gso_size
& EDMA_TPD_MSS_MASK
) << EDMA_TPD_MSS_SHIFT
) |
1109 (skb_transport_offset(skb
) << EDMA_TPD_HDR_SHIFT
);
1110 } else if (flags_transmit
& EDMA_HW_CHECKSUM
) {
1112 cso
= skb_checksum_start_offset(skb
);
1113 css
= cso
+ skb
->csum_offset
;
1115 word1
|= (EDMA_TPD_CUSTOM_CSUM_EN
);
1116 word1
|= (cso
>> 1) << EDMA_TPD_HDR_SHIFT
;
1117 word1
|= ((css
>> 1) << EDMA_TPD_CUSTOM_CSUM_SHIFT
);
1120 if (skb
->protocol
== htons(ETH_P_PPP_SES
))
1121 word1
|= EDMA_TPD_PPPOE_EN
;
1123 if (flags_transmit
& EDMA_VLAN_TX_TAG_INSERT_FLAG
) {
1124 switch(skb
->vlan_proto
) {
1125 case htons(ETH_P_8021Q
):
1126 word3
|= (1 << EDMA_TX_INS_CVLAN
);
1127 word3
|= skb_vlan_tag_get(skb
) << EDMA_TX_CVLAN_TAG_SHIFT
;
1129 case htons(ETH_P_8021AD
):
1130 word1
|= (1 << EDMA_TX_INS_SVLAN
);
1131 svlan_tag
= skb_vlan_tag_get(skb
) << EDMA_TX_SVLAN_TAG_SHIFT
;
1134 dev_err(&pdev
->dev
, "no ctag or stag present\n");
1135 goto vlan_tag_error
;
1137 } else if (flags_transmit
& EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG
) {
1138 word3
|= (1 << EDMA_TX_INS_CVLAN
);
1139 word3
|= (adapter
->default_vlan_tag
) << EDMA_TX_CVLAN_TAG_SHIFT
;
1142 if (packet_is_rstp
) {
1143 word3
|= dp_bitmap
<< EDMA_TPD_PORT_BITMAP_SHIFT
;
1144 word3
|= from_cpu
<< EDMA_TPD_FROM_CPU_SHIFT
;
1146 word3
|= adapter
->dp_bitmap
<< EDMA_TPD_PORT_BITMAP_SHIFT
;
1149 buf_len
= skb_headlen(skb
);
1152 if (lso_word1
& EDMA_TPD_LSO_V2_EN
) {
1154 /* IPv6 LSOv2 descriptor */
1155 start_tpd
= tpd
= edma_get_next_tpd(edma_cinfo
, queue_id
);
1156 sw_desc
= edma_get_tx_buffer(edma_cinfo
, tpd
, queue_id
);
1157 sw_desc
->flags
|= EDMA_SW_DESC_FLAG_SKB_NONE
;
1159 /* LSOv2 descriptor overrides addr field to pass length */
1160 tpd
->addr
= cpu_to_le16(skb
->len
);
1161 tpd
->svlan_tag
= svlan_tag
;
1162 tpd
->word1
= word1
| lso_word1
;
1166 tpd
= edma_get_next_tpd(edma_cinfo
, queue_id
);
1169 sw_desc
= edma_get_tx_buffer(edma_cinfo
, tpd
, queue_id
);
1171 /* The last buffer info contain the skb address,
1172 * so skb will be freed after unmap
1174 sw_desc
->length
= lso_desc_len
;
1175 sw_desc
->flags
|= EDMA_SW_DESC_FLAG_SKB_HEAD
;
1177 sw_desc
->dma
= dma_map_single(&adapter
->pdev
->dev
,
1178 skb
->data
, buf_len
, DMA_TO_DEVICE
);
1179 if (dma_mapping_error(&pdev
->dev
, sw_desc
->dma
))
1182 tpd
->addr
= cpu_to_le32(sw_desc
->dma
);
1183 tpd
->len
= cpu_to_le16(buf_len
);
1185 tpd
->svlan_tag
= svlan_tag
;
1186 tpd
->word1
= word1
| lso_word1
;
1189 /* The last buffer info contain the skb address,
1190 * so it will be freed after unmap
1192 sw_desc
->length
= lso_desc_len
;
1193 sw_desc
->flags
|= EDMA_SW_DESC_FLAG_SKB_HEAD
;
1198 if (likely(buf_len
)) {
1200 /* TODO Do not dequeue descriptor if there is a potential error */
1201 tpd
= edma_get_next_tpd(edma_cinfo
, queue_id
);
1206 sw_desc
= edma_get_tx_buffer(edma_cinfo
, tpd
, queue_id
);
1208 /* The last buffer info contain the skb address,
1209 * so it will be free after unmap
1211 sw_desc
->length
= buf_len
;
1212 sw_desc
->flags
|= EDMA_SW_DESC_FLAG_SKB_HEAD
;
1213 sw_desc
->dma
= dma_map_single(&adapter
->pdev
->dev
,
1214 skb
->data
, buf_len
, DMA_TO_DEVICE
);
1215 if (dma_mapping_error(&pdev
->dev
, sw_desc
->dma
))
1218 tpd
->addr
= cpu_to_le32(sw_desc
->dma
);
1219 tpd
->len
= cpu_to_le16(buf_len
);
1221 tpd
->svlan_tag
= svlan_tag
;
1222 tpd
->word1
= word1
| lso_word1
;
1226 /* Walk through all paged fragments */
1227 while (nr_frags
--) {
1228 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1229 buf_len
= skb_frag_size(frag
);
1230 tpd
= edma_get_next_tpd(edma_cinfo
, queue_id
);
1231 sw_desc
= edma_get_tx_buffer(edma_cinfo
, tpd
, queue_id
);
1232 sw_desc
->length
= buf_len
;
1233 sw_desc
->flags
|= EDMA_SW_DESC_FLAG_SKB_FRAG
;
1235 sw_desc
->dma
= skb_frag_dma_map(&pdev
->dev
, frag
, 0, buf_len
, DMA_TO_DEVICE
);
1237 if (dma_mapping_error(NULL
, sw_desc
->dma
))
1240 tpd
->addr
= cpu_to_le32(sw_desc
->dma
);
1241 tpd
->len
= cpu_to_le16(buf_len
);
1243 tpd
->svlan_tag
= svlan_tag
;
1244 tpd
->word1
= word1
| lso_word1
;
1249 /* Walk through all fraglist skbs */
1250 skb_walk_frags(skb
, iter_skb
) {
1251 buf_len
= iter_skb
->len
;
1252 tpd
= edma_get_next_tpd(edma_cinfo
, queue_id
);
1253 sw_desc
= edma_get_tx_buffer(edma_cinfo
, tpd
, queue_id
);
1254 sw_desc
->length
= buf_len
;
1255 sw_desc
->dma
= dma_map_single(&adapter
->pdev
->dev
,
1256 iter_skb
->data
, buf_len
, DMA_TO_DEVICE
);
1258 if (dma_mapping_error(NULL
, sw_desc
->dma
))
1261 tpd
->addr
= cpu_to_le32(sw_desc
->dma
);
1262 tpd
->len
= cpu_to_le16(buf_len
);
1263 tpd
->svlan_tag
= svlan_tag
;
1264 tpd
->word1
= word1
| lso_word1
;
1266 sw_desc
->flags
|= EDMA_SW_DESC_FLAG_SKB_FRAGLIST
;
1270 tpd
->word1
|= 1 << EDMA_TPD_EOP_SHIFT
;
1273 sw_desc
->flags
|= EDMA_SW_DESC_FLAG_LAST
;
1278 edma_rollback_tx(adapter
, start_tpd
, queue_id
);
1279 dev_err(&pdev
->dev
, "TX DMA map failed\n");
1284 /* edma_check_link()
1287 static int edma_check_link(struct edma_adapter
*adapter
)
1289 struct phy_device
*phydev
= adapter
->phydev
;
1291 if (!(adapter
->poll_required
))
1292 return __EDMA_LINKUP
;
1295 return __EDMA_LINKUP
;
1297 return __EDMA_LINKDOWN
;
1300 /* edma_adjust_link()
1301 * check for edma link status
1303 void edma_adjust_link(struct net_device
*netdev
)
1306 struct edma_adapter
*adapter
= netdev_priv(netdev
);
1307 struct phy_device
*phydev
= adapter
->phydev
;
1309 if (!test_bit(__EDMA_UP
, &adapter
->state_flags
))
1312 status
= edma_check_link(adapter
);
1314 if (status
== __EDMA_LINKUP
&& adapter
->link_state
== __EDMA_LINKDOWN
) {
1315 dev_info(&adapter
->pdev
->dev
, "%s: GMAC Link is up with phy_speed=%d\n", netdev
->name
, phydev
->speed
);
1316 adapter
->link_state
= __EDMA_LINKUP
;
1317 if (adapter
->edma_cinfo
->is_single_phy
) {
1318 ess_set_port_status_speed(adapter
->edma_cinfo
, phydev
,
1319 ffs(adapter
->dp_bitmap
) - 1);
1321 netif_carrier_on(netdev
);
1322 if (netif_running(netdev
))
1323 netif_tx_wake_all_queues(netdev
);
1324 } else if (status
== __EDMA_LINKDOWN
&& adapter
->link_state
== __EDMA_LINKUP
) {
1325 dev_info(&adapter
->pdev
->dev
, "%s: GMAC Link is down\n", netdev
->name
);
1326 adapter
->link_state
= __EDMA_LINKDOWN
;
1327 netif_carrier_off(netdev
);
1328 netif_tx_stop_all_queues(netdev
);
1333 * Statistics api used to retreive the tx/rx statistics
1335 struct net_device_stats
*edma_get_stats(struct net_device
*netdev
)
1337 struct edma_adapter
*adapter
= netdev_priv(netdev
);
1339 return &adapter
->stats
;
1343 * Main api to be called by the core for packet transmission
1345 netdev_tx_t
edma_xmit(struct sk_buff
*skb
,
1346 struct net_device
*net_dev
)
1348 struct edma_adapter
*adapter
= netdev_priv(net_dev
);
1349 struct edma_common_info
*edma_cinfo
= adapter
->edma_cinfo
;
1350 struct edma_tx_desc_ring
*etdr
;
1351 u16 from_cpu
, dp_bitmap
, txq_id
;
1352 int ret
, nr_frags
= 0, num_tpds_needed
= 1, queue_id
;
1353 unsigned int flags_transmit
= 0;
1354 bool packet_is_rstp
= false;
1355 struct netdev_queue
*nq
= NULL
;
1357 if (skb_shinfo(skb
)->nr_frags
) {
1358 nr_frags
= skb_shinfo(skb
)->nr_frags
;
1359 num_tpds_needed
+= nr_frags
;
1360 } else if (skb_has_frag_list(skb
)) {
1361 struct sk_buff
*iter_skb
;
1363 skb_walk_frags(skb
, iter_skb
)
1367 if (num_tpds_needed
> EDMA_MAX_SKB_FRAGS
) {
1368 dev_err(&net_dev
->dev
,
1369 "skb received with fragments %d which is more than %lu",
1370 num_tpds_needed
, EDMA_MAX_SKB_FRAGS
);
1371 dev_kfree_skb_any(skb
);
1372 adapter
->stats
.tx_errors
++;
1373 return NETDEV_TX_OK
;
1376 if (edma_stp_rstp
) {
1377 u16 ath_hdr
, ath_eth_type
;
1378 u8 mac_addr
[EDMA_ETH_HDR_LEN
];
1379 ath_eth_type
= ntohs(*(uint16_t *)&skb
->data
[12]);
1380 if (ath_eth_type
== edma_ath_eth_type
) {
1381 packet_is_rstp
= true;
1382 ath_hdr
= htons(*(uint16_t *)&skb
->data
[14]);
1383 dp_bitmap
= ath_hdr
& EDMA_TX_ATH_HDR_PORT_BITMAP_MASK
;
1384 from_cpu
= (ath_hdr
& EDMA_TX_ATH_HDR_FROM_CPU_MASK
) >> EDMA_TX_ATH_HDR_FROM_CPU_SHIFT
;
1385 memcpy(mac_addr
, skb
->data
, EDMA_ETH_HDR_LEN
);
1389 memcpy(skb
->data
, mac_addr
, EDMA_ETH_HDR_LEN
);
1393 /* this will be one of the 4 TX queues exposed to linux kernel */
1394 txq_id
= skb_get_queue_mapping(skb
);
1395 queue_id
= edma_tx_queue_get(adapter
, skb
, txq_id
);
1396 etdr
= edma_cinfo
->tpd_ring
[queue_id
];
1397 nq
= netdev_get_tx_queue(net_dev
, txq_id
);
1400 /* Tx is not handled in bottom half context. Hence, we need to protect
1401 * Tx from tasks and bottom half
1404 if (num_tpds_needed
> edma_tpd_available(edma_cinfo
, queue_id
)) {
1405 /* not enough descriptor, just stop queue */
1406 netif_tx_stop_queue(nq
);
1408 dev_dbg(&net_dev
->dev
, "Not enough descriptors available");
1409 edma_cinfo
->edma_ethstats
.tx_desc_error
++;
1410 return NETDEV_TX_BUSY
;
1413 /* Check and mark VLAN tag offload */
1414 if (!adapter
->edma_cinfo
->is_single_phy
) {
1415 if (unlikely(skb_vlan_tag_present(skb
)))
1416 flags_transmit
|= EDMA_VLAN_TX_TAG_INSERT_FLAG
;
1417 else if (adapter
->default_vlan_tag
)
1418 flags_transmit
|= EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG
;
1421 /* Check and mark checksum offload */
1422 if (likely(skb
->ip_summed
== CHECKSUM_PARTIAL
))
1423 flags_transmit
|= EDMA_HW_CHECKSUM
;
1425 /* Map and fill descriptor for Tx */
1426 ret
= edma_tx_map_and_fill(edma_cinfo
, adapter
, skb
, queue_id
,
1427 flags_transmit
, from_cpu
, dp_bitmap
, packet_is_rstp
, nr_frags
);
1429 dev_kfree_skb_any(skb
);
1430 adapter
->stats
.tx_errors
++;
1434 /* Update SW producer index */
1435 edma_tx_update_hw_idx(edma_cinfo
, skb
, queue_id
);
1437 /* update tx statistics */
1438 adapter
->stats
.tx_packets
++;
1439 adapter
->stats
.tx_bytes
+= skb
->len
;
1443 return NETDEV_TX_OK
;
1447 * edma_flow_may_expire()
1448 * Timer function called periodically to delete the node
1450 void edma_flow_may_expire(struct timer_list
*t
)
1452 struct edma_rfs_flow_table
*table
= from_timer(table
, t
, expire_rfs
);
1453 struct edma_adapter
*adapter
=
1454 container_of(table
, typeof(*adapter
), rfs
);
1457 spin_lock_bh(&adapter
->rfs
.rfs_ftab_lock
);
1458 for (j
= 0; j
< EDMA_RFS_EXPIRE_COUNT_PER_CALL
; j
++) {
1459 struct hlist_head
*hhead
;
1460 struct hlist_node
*tmp
;
1461 struct edma_rfs_filter_node
*n
;
1464 hhead
= &adapter
->rfs
.hlist_head
[adapter
->rfs
.hashtoclean
++];
1465 hlist_for_each_entry_safe(n
, tmp
, hhead
, node
) {
1466 res
= rps_may_expire_flow(adapter
->netdev
, n
->rq_id
,
1467 n
->flow_id
, n
->filter_id
);
1470 ret
= edma_delete_rfs_filter(adapter
, n
);
1472 dev_dbg(&adapter
->netdev
->dev
,
1473 "RFS entry %d not allowed to be flushed by Switch",
1476 hlist_del(&n
->node
);
1478 adapter
->rfs
.filter_available
++;
1484 adapter
->rfs
.hashtoclean
= adapter
->rfs
.hashtoclean
& (EDMA_RFS_FLOW_ENTRIES
- 1);
1485 spin_unlock_bh(&adapter
->rfs
.rfs_ftab_lock
);
1486 mod_timer(&adapter
->rfs
.expire_rfs
, jiffies
+ HZ
/ 4);
1489 /* edma_rx_flow_steer()
1490 * Called by core to to steer the flow to CPU
1492 int edma_rx_flow_steer(struct net_device
*dev
, const struct sk_buff
*skb
,
1493 u16 rxq
, u32 flow_id
)
1495 struct flow_keys keys
;
1496 struct edma_rfs_filter_node
*filter_node
;
1497 struct edma_adapter
*adapter
= netdev_priv(dev
);
1501 if (skb
->protocol
== htons(ETH_P_IPV6
)) {
1502 dev_err(&adapter
->pdev
->dev
, "IPv6 not supported\n");
1504 goto no_protocol_err
;
1507 /* Dissect flow parameters
1508 * We only support IPv4 + TCP/UDP
1510 res
= skb_flow_dissect_flow_keys(skb
, &keys
, 0);
1511 if (!((keys
.basic
.ip_proto
== IPPROTO_TCP
) || (keys
.basic
.ip_proto
== IPPROTO_UDP
))) {
1512 res
= -EPROTONOSUPPORT
;
1513 goto no_protocol_err
;
1516 /* Check if table entry exists */
1517 hash_tblid
= skb_get_hash_raw(skb
) & EDMA_RFS_FLOW_ENTRIES_MASK
;
1519 spin_lock_bh(&adapter
->rfs
.rfs_ftab_lock
);
1520 filter_node
= edma_rfs_key_search(&adapter
->rfs
.hlist_head
[hash_tblid
], &keys
);
1523 if (rxq
== filter_node
->rq_id
) {
1527 res
= edma_delete_rfs_filter(adapter
, filter_node
);
1529 dev_warn(&adapter
->netdev
->dev
,
1530 "Cannot steer flow %d to different queue",
1531 filter_node
->flow_id
);
1533 adapter
->rfs
.filter_available
++;
1534 res
= edma_add_rfs_filter(adapter
, &keys
, rxq
, filter_node
);
1536 dev_warn(&adapter
->netdev
->dev
,
1537 "Cannot steer flow %d to different queue",
1538 filter_node
->flow_id
);
1540 adapter
->rfs
.filter_available
--;
1541 filter_node
->rq_id
= rxq
;
1542 filter_node
->filter_id
= res
;
1547 if (adapter
->rfs
.filter_available
== 0) {
1552 filter_node
= kmalloc(sizeof(*filter_node
), GFP_ATOMIC
);
1558 res
= edma_add_rfs_filter(adapter
, &keys
, rxq
, filter_node
);
1564 adapter
->rfs
.filter_available
--;
1565 filter_node
->rq_id
= rxq
;
1566 filter_node
->filter_id
= res
;
1567 filter_node
->flow_id
= flow_id
;
1568 filter_node
->keys
= keys
;
1569 INIT_HLIST_NODE(&filter_node
->node
);
1570 hlist_add_head(&filter_node
->node
, &adapter
->rfs
.hlist_head
[hash_tblid
]);
1574 spin_unlock_bh(&adapter
->rfs
.rfs_ftab_lock
);
1579 /* edma_register_rfs_filter()
1580 * Add RFS filter callback
1582 int edma_register_rfs_filter(struct net_device
*netdev
,
1583 set_rfs_filter_callback_t set_filter
)
1585 struct edma_adapter
*adapter
= netdev_priv(netdev
);
1587 spin_lock_bh(&adapter
->rfs
.rfs_ftab_lock
);
1589 if (adapter
->set_rfs_rule
) {
1590 spin_unlock_bh(&adapter
->rfs
.rfs_ftab_lock
);
1594 adapter
->set_rfs_rule
= set_filter
;
1595 spin_unlock_bh(&adapter
->rfs
.rfs_ftab_lock
);
1600 /* edma_alloc_tx_rings()
1603 int edma_alloc_tx_rings(struct edma_common_info
*edma_cinfo
)
1605 struct platform_device
*pdev
= edma_cinfo
->pdev
;
1608 for (i
= 0; i
< edma_cinfo
->num_tx_queues
; i
++) {
1609 err
= edma_alloc_tx_ring(edma_cinfo
, edma_cinfo
->tpd_ring
[i
]);
1611 dev_err(&pdev
->dev
, "Tx Queue alloc %u failed\n", i
);
1619 /* edma_free_tx_rings()
1622 void edma_free_tx_rings(struct edma_common_info
*edma_cinfo
)
1626 for (i
= 0; i
< edma_cinfo
->num_tx_queues
; i
++)
1627 edma_free_tx_ring(edma_cinfo
, edma_cinfo
->tpd_ring
[i
]);
1630 /* edma_free_tx_resources()
1631 * Free buffers associated with tx rings
1633 void edma_free_tx_resources(struct edma_common_info
*edma_cinfo
)
1635 struct edma_tx_desc_ring
*etdr
;
1636 struct edma_sw_desc
*sw_desc
;
1637 struct platform_device
*pdev
= edma_cinfo
->pdev
;
1640 for (i
= 0; i
< edma_cinfo
->num_tx_queues
; i
++) {
1641 etdr
= edma_cinfo
->tpd_ring
[i
];
1642 for (j
= 0; j
< EDMA_TX_RING_SIZE
; j
++) {
1643 sw_desc
= &etdr
->sw_desc
[j
];
1644 if (sw_desc
->flags
& (EDMA_SW_DESC_FLAG_SKB_HEAD
|
1645 EDMA_SW_DESC_FLAG_SKB_FRAG
| EDMA_SW_DESC_FLAG_SKB_FRAGLIST
))
1646 edma_tx_unmap_and_free(pdev
, sw_desc
);
1651 /* edma_alloc_rx_rings()
1654 int edma_alloc_rx_rings(struct edma_common_info
*edma_cinfo
)
1656 struct platform_device
*pdev
= edma_cinfo
->pdev
;
1659 for (i
= 0, j
= 0; i
< edma_cinfo
->num_rx_queues
; i
++) {
1660 err
= edma_alloc_rx_ring(edma_cinfo
, edma_cinfo
->rfd_ring
[j
]);
1662 dev_err(&pdev
->dev
, "Rx Queue alloc%u failed\n", i
);
1665 j
+= ((edma_cinfo
->num_rx_queues
== 4) ? 2 : 1);
1671 /* edma_free_rx_rings()
1674 void edma_free_rx_rings(struct edma_common_info
*edma_cinfo
)
1678 for (i
= 0, j
= 0; i
< edma_cinfo
->num_rx_queues
; i
++) {
1679 edma_free_rx_ring(edma_cinfo
, edma_cinfo
->rfd_ring
[j
]);
1680 j
+= ((edma_cinfo
->num_rx_queues
== 4) ? 2 : 1);
1684 /* edma_free_queues()
1685 * Free the queues allocaated
1687 void edma_free_queues(struct edma_common_info
*edma_cinfo
)
1691 for (i
= 0; i
< edma_cinfo
->num_tx_queues
; i
++) {
1692 if (edma_cinfo
->tpd_ring
[i
])
1693 kfree(edma_cinfo
->tpd_ring
[i
]);
1694 edma_cinfo
->tpd_ring
[i
] = NULL
;
1697 for (i
= 0, j
= 0; i
< edma_cinfo
->num_rx_queues
; i
++) {
1698 if (edma_cinfo
->rfd_ring
[j
])
1699 kfree(edma_cinfo
->rfd_ring
[j
]);
1700 edma_cinfo
->rfd_ring
[j
] = NULL
;
1701 j
+= ((edma_cinfo
->num_rx_queues
== 4) ? 2 : 1);
1704 edma_cinfo
->num_rx_queues
= 0;
1705 edma_cinfo
->num_tx_queues
= 0;
1710 /* edma_free_rx_resources()
1711 * Free buffers associated with tx rings
1713 void edma_free_rx_resources(struct edma_common_info
*edma_cinfo
)
1715 struct edma_rfd_desc_ring
*erdr
;
1716 struct edma_sw_desc
*sw_desc
;
1717 struct platform_device
*pdev
= edma_cinfo
->pdev
;
1720 for (i
= 0, k
= 0; i
< edma_cinfo
->num_rx_queues
; i
++) {
1721 erdr
= edma_cinfo
->rfd_ring
[k
];
1722 for (j
= 0; j
< EDMA_RX_RING_SIZE
; j
++) {
1723 sw_desc
= &erdr
->sw_desc
[j
];
1724 if (likely(sw_desc
->flags
& EDMA_SW_DESC_FLAG_SKB_HEAD
)) {
1725 dma_unmap_single(&pdev
->dev
, sw_desc
->dma
,
1726 sw_desc
->length
, DMA_FROM_DEVICE
);
1727 edma_clean_rfd(erdr
, j
);
1728 } else if ((sw_desc
->flags
& EDMA_SW_DESC_FLAG_SKB_FRAG
)) {
1729 dma_unmap_page(&pdev
->dev
, sw_desc
->dma
,
1730 sw_desc
->length
, DMA_FROM_DEVICE
);
1731 edma_clean_rfd(erdr
, j
);
1734 k
+= ((edma_cinfo
->num_rx_queues
== 4) ? 2 : 1);
1739 /* edma_alloc_queues_tx()
1740 * Allocate memory for all rings
1742 int edma_alloc_queues_tx(struct edma_common_info
*edma_cinfo
)
1746 for (i
= 0; i
< edma_cinfo
->num_tx_queues
; i
++) {
1747 struct edma_tx_desc_ring
*etdr
;
1748 etdr
= kzalloc(sizeof(struct edma_tx_desc_ring
), GFP_KERNEL
);
1751 etdr
->count
= edma_cinfo
->tx_ring_count
;
1752 edma_cinfo
->tpd_ring
[i
] = etdr
;
1757 edma_free_queues(edma_cinfo
);
1761 /* edma_alloc_queues_rx()
1762 * Allocate memory for all rings
1764 int edma_alloc_queues_rx(struct edma_common_info
*edma_cinfo
)
1768 for (i
= 0, j
= 0; i
< edma_cinfo
->num_rx_queues
; i
++) {
1769 struct edma_rfd_desc_ring
*rfd_ring
;
1770 rfd_ring
= kzalloc(sizeof(struct edma_rfd_desc_ring
),
1774 rfd_ring
->count
= edma_cinfo
->rx_ring_count
;
1775 edma_cinfo
->rfd_ring
[j
] = rfd_ring
;
1776 j
+= ((edma_cinfo
->num_rx_queues
== 4) ? 2 : 1);
1780 edma_free_queues(edma_cinfo
);
1784 /* edma_clear_irq_status()
1785 * Clear interrupt status
1787 void edma_clear_irq_status()
1789 edma_write_reg(EDMA_REG_RX_ISR
, 0xff);
1790 edma_write_reg(EDMA_REG_TX_ISR
, 0xffff);
1791 edma_write_reg(EDMA_REG_MISC_ISR
, 0x1fff);
1792 edma_write_reg(EDMA_REG_WOL_ISR
, 0x1);
1796 * Configure skb, edma interrupts and control register.
1798 int edma_configure(struct edma_common_info
*edma_cinfo
)
1800 struct edma_hw
*hw
= &edma_cinfo
->hw
;
1801 u32 intr_modrt_data
;
1802 u32 intr_ctrl_data
= 0;
1803 int i
, j
, ret_count
;
1805 edma_read_reg(EDMA_REG_INTR_CTRL
, &intr_ctrl_data
);
1806 intr_ctrl_data
&= ~(1 << EDMA_INTR_SW_IDX_W_TYP_SHIFT
);
1807 intr_ctrl_data
|= hw
->intr_sw_idx_w
<< EDMA_INTR_SW_IDX_W_TYP_SHIFT
;
1808 edma_write_reg(EDMA_REG_INTR_CTRL
, intr_ctrl_data
);
1810 edma_clear_irq_status();
1812 /* Clear any WOL status */
1813 edma_write_reg(EDMA_REG_WOL_CTRL
, 0);
1814 intr_modrt_data
= (EDMA_TX_IMT
<< EDMA_IRQ_MODRT_TX_TIMER_SHIFT
);
1815 intr_modrt_data
|= (EDMA_RX_IMT
<< EDMA_IRQ_MODRT_RX_TIMER_SHIFT
);
1816 edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT
, intr_modrt_data
);
1817 edma_configure_tx(edma_cinfo
);
1818 edma_configure_rx(edma_cinfo
);
1820 /* Allocate the RX buffer */
1821 for (i
= 0, j
= 0; i
< edma_cinfo
->num_rx_queues
; i
++) {
1822 struct edma_rfd_desc_ring
*ring
= edma_cinfo
->rfd_ring
[j
];
1823 ret_count
= edma_alloc_rx_buf(edma_cinfo
, ring
, ring
->count
, j
);
1825 dev_dbg(&edma_cinfo
->pdev
->dev
, "not all rx buffers allocated\n");
1827 j
+= ((edma_cinfo
->num_rx_queues
== 4) ? 2 : 1);
1830 /* Configure descriptor Ring */
1831 edma_init_desc(edma_cinfo
);
1835 /* edma_irq_enable()
1836 * Enable default interrupt generation settings
1838 void edma_irq_enable(struct edma_common_info
*edma_cinfo
)
1840 struct edma_hw
*hw
= &edma_cinfo
->hw
;
1843 edma_write_reg(EDMA_REG_RX_ISR
, 0xff);
1844 for (i
= 0, j
= 0; i
< edma_cinfo
->num_rx_queues
; i
++) {
1845 edma_write_reg(EDMA_REG_RX_INT_MASK_Q(j
), hw
->rx_intr_mask
);
1846 j
+= ((edma_cinfo
->num_rx_queues
== 4) ? 2 : 1);
1848 edma_write_reg(EDMA_REG_TX_ISR
, 0xffff);
1849 for (i
= 0; i
< edma_cinfo
->num_tx_queues
; i
++)
1850 edma_write_reg(EDMA_REG_TX_INT_MASK_Q(i
), hw
->tx_intr_mask
);
1853 /* edma_irq_disable()
1856 void edma_irq_disable(struct edma_common_info
*edma_cinfo
)
1860 for (i
= 0; i
< EDMA_MAX_RECEIVE_QUEUE
; i
++)
1861 edma_write_reg(EDMA_REG_RX_INT_MASK_Q(i
), 0x0);
1863 for (i
= 0; i
< EDMA_MAX_TRANSMIT_QUEUE
; i
++)
1864 edma_write_reg(EDMA_REG_TX_INT_MASK_Q(i
), 0x0);
1865 edma_write_reg(EDMA_REG_MISC_IMR
, 0);
1866 edma_write_reg(EDMA_REG_WOL_IMR
, 0);
1872 void edma_free_irqs(struct edma_adapter
*adapter
)
1874 struct edma_common_info
*edma_cinfo
= adapter
->edma_cinfo
;
1876 int k
= ((edma_cinfo
->num_rx_queues
== 4) ? 1 : 2);
1878 for (i
= 0; i
< CONFIG_NR_CPUS
; i
++) {
1879 for (j
= edma_cinfo
->edma_percpu_info
[i
].tx_start
; j
< (edma_cinfo
->edma_percpu_info
[i
].tx_start
+ 4); j
++)
1880 free_irq(edma_cinfo
->tx_irq
[j
], &edma_cinfo
->edma_percpu_info
[i
]);
1882 for (j
= edma_cinfo
->edma_percpu_info
[i
].rx_start
; j
< (edma_cinfo
->edma_percpu_info
[i
].rx_start
+ k
); j
++)
1883 free_irq(edma_cinfo
->rx_irq
[j
], &edma_cinfo
->edma_percpu_info
[i
]);
1887 /* edma_enable_rx_ctrl()
1888 * Enable RX queue control
1890 void edma_enable_rx_ctrl(struct edma_hw
*hw
)
1894 edma_read_reg(EDMA_REG_RXQ_CTRL
, &data
);
1895 data
|= EDMA_RXQ_CTRL_EN
;
1896 edma_write_reg(EDMA_REG_RXQ_CTRL
, data
);
1900 /* edma_enable_tx_ctrl()
1901 * Enable TX queue control
1903 void edma_enable_tx_ctrl(struct edma_hw
*hw
)
1907 edma_read_reg(EDMA_REG_TXQ_CTRL
, &data
);
1908 data
|= EDMA_TXQ_CTRL_TXQ_EN
;
1909 edma_write_reg(EDMA_REG_TXQ_CTRL
, data
);
1912 /* edma_stop_rx_tx()
1913 * Disable RX/TQ Queue control
1915 void edma_stop_rx_tx(struct edma_hw
*hw
)
1919 edma_read_reg(EDMA_REG_RXQ_CTRL
, &data
);
1920 data
&= ~EDMA_RXQ_CTRL_EN
;
1921 edma_write_reg(EDMA_REG_RXQ_CTRL
, data
);
1922 edma_read_reg(EDMA_REG_TXQ_CTRL
, &data
);
1923 data
&= ~EDMA_TXQ_CTRL_TXQ_EN
;
1924 edma_write_reg(EDMA_REG_TXQ_CTRL
, data
);
1930 int edma_reset(struct edma_common_info
*edma_cinfo
)
1932 struct edma_hw
*hw
= &edma_cinfo
->hw
;
1934 edma_irq_disable(edma_cinfo
);
1936 edma_clear_irq_status();
1938 edma_stop_rx_tx(hw
);
1943 /* edma_fill_netdev()
1944 * Fill netdev for each etdr
1946 int edma_fill_netdev(struct edma_common_info
*edma_cinfo
, int queue_id
,
1947 int dev
, int txq_id
)
1949 struct edma_tx_desc_ring
*etdr
;
1952 etdr
= edma_cinfo
->tpd_ring
[queue_id
];
1954 while (etdr
->netdev
[i
])
1957 if (i
>= EDMA_MAX_NETDEV_PER_QUEUE
)
1960 /* Populate the netdev associated with the tpd ring */
1961 etdr
->netdev
[i
] = edma_netdev
[dev
];
1962 etdr
->nq
[i
] = netdev_get_tx_queue(edma_netdev
[dev
], txq_id
);
1968 * Change the Ethernet Address of the NIC
1970 int edma_set_mac_addr(struct net_device
*netdev
, void *p
)
1972 struct sockaddr
*addr
= p
;
1974 if (!is_valid_ether_addr(addr
->sa_data
))
1977 if (netif_running(netdev
))
1980 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
1984 /* edma_set_stp_rstp()
1987 void edma_set_stp_rstp(bool rstp
)
1989 edma_stp_rstp
= rstp
;
1992 /* edma_assign_ath_hdr_type()
1993 * assign atheros header eth type
1995 void edma_assign_ath_hdr_type(int eth_type
)
1997 edma_ath_eth_type
= eth_type
& EDMA_ETH_TYPE_MASK
;
2000 /* edma_get_default_vlan_tag()
2001 * Used by other modules to get the default vlan tag
2003 int edma_get_default_vlan_tag(struct net_device
*netdev
)
2005 struct edma_adapter
*adapter
= netdev_priv(netdev
);
2007 if (adapter
->default_vlan_tag
)
2008 return adapter
->default_vlan_tag
;
2014 * gets called when netdevice is up, start the queue.
2016 int edma_open(struct net_device
*netdev
)
2018 struct edma_adapter
*adapter
= netdev_priv(netdev
);
2019 struct platform_device
*pdev
= adapter
->edma_cinfo
->pdev
;
2021 netif_tx_start_all_queues(netdev
);
2022 edma_initialise_rfs_flow_table(adapter
);
2023 set_bit(__EDMA_UP
, &adapter
->state_flags
);
2025 /* if Link polling is enabled, in our case enabled for WAN, then
2026 * do a phy start, else always set link as UP
2028 if (adapter
->poll_required
) {
2029 if (!IS_ERR(adapter
->phydev
)) {
2030 phy_start(adapter
->phydev
);
2031 phy_start_aneg(adapter
->phydev
);
2032 adapter
->link_state
= __EDMA_LINKDOWN
;
2034 dev_dbg(&pdev
->dev
, "Invalid PHY device for a link polled interface\n");
2037 adapter
->link_state
= __EDMA_LINKUP
;
2038 netif_carrier_on(netdev
);
2046 * gets called when netdevice is down, stops the queue.
2048 int edma_close(struct net_device
*netdev
)
2050 struct edma_adapter
*adapter
= netdev_priv(netdev
);
2052 edma_free_rfs_flow_table(adapter
);
2053 netif_carrier_off(netdev
);
2054 netif_tx_stop_all_queues(netdev
);
2056 if (adapter
->poll_required
) {
2057 if (!IS_ERR(adapter
->phydev
))
2058 phy_stop(adapter
->phydev
);
2061 adapter
->link_state
= __EDMA_LINKDOWN
;
2063 /* Set GMAC state to UP before link state is checked
2065 clear_bit(__EDMA_UP
, &adapter
->state_flags
);
2071 * polling function that gets called when the napi gets scheduled.
2073 * Main sequence of task performed in this api
2074 * is clear irq status -> clear_tx_irq -> clean_rx_irq->
2075 * enable interrupts.
2077 int edma_poll(struct napi_struct
*napi
, int budget
)
2079 struct edma_per_cpu_queues_info
*edma_percpu_info
= container_of(napi
,
2080 struct edma_per_cpu_queues_info
, napi
);
2081 struct edma_common_info
*edma_cinfo
= edma_percpu_info
->edma_cinfo
;
2083 u32 shadow_rx_status
, shadow_tx_status
;
2085 int i
, work_done
= 0;
2086 u16 rx_pending_fill
;
2088 /* Store the Rx/Tx status by ANDing it with
2089 * appropriate CPU RX?TX mask
2091 edma_read_reg(EDMA_REG_RX_ISR
, ®_data
);
2092 edma_percpu_info
->rx_status
|= reg_data
& edma_percpu_info
->rx_mask
;
2093 shadow_rx_status
= edma_percpu_info
->rx_status
;
2094 edma_read_reg(EDMA_REG_TX_ISR
, ®_data
);
2095 edma_percpu_info
->tx_status
|= reg_data
& edma_percpu_info
->tx_mask
;
2096 shadow_tx_status
= edma_percpu_info
->tx_status
;
2098 /* Every core will have a start, which will be computed
2099 * in probe and stored in edma_percpu_info->tx_start variable.
2100 * We will shift the status bit by tx_start to obtain
2101 * status bits for the core on which the current processing
2102 * is happening. Since, there are 4 tx queues per core,
2103 * we will run the loop till we get the correct queue to clear.
2105 while (edma_percpu_info
->tx_status
) {
2106 queue_id
= ffs(edma_percpu_info
->tx_status
) - 1;
2107 edma_tx_complete(edma_cinfo
, queue_id
);
2108 edma_percpu_info
->tx_status
&= ~(1 << queue_id
);
2111 /* Every core will have a start, which will be computed
2112 * in probe and stored in edma_percpu_info->tx_start variable.
2113 * We will shift the status bit by tx_start to obtain
2114 * status bits for the core on which the current processing
2115 * is happening. Since, there are 4 tx queues per core, we
2116 * will run the loop till we get the correct queue to clear.
2118 while (edma_percpu_info
->rx_status
) {
2119 queue_id
= ffs(edma_percpu_info
->rx_status
) - 1;
2120 rx_pending_fill
= edma_rx_complete(edma_cinfo
, &work_done
,
2121 budget
, queue_id
, napi
);
2123 if (likely(work_done
< budget
)) {
2124 if (rx_pending_fill
) {
2125 /* reschedule poll() to refill rx buffer deficit */
2129 edma_percpu_info
->rx_status
&= ~(1 << queue_id
);
2135 /* Clear the status register, to avoid the interrupts to
2136 * reoccur.This clearing of interrupt status register is
2137 * done here as writing to status register only takes place
2138 * once the producer/consumer index has been updated to
2139 * reflect that the packet transmission/reception went fine.
2141 edma_write_reg(EDMA_REG_RX_ISR
, shadow_rx_status
);
2142 edma_write_reg(EDMA_REG_TX_ISR
, shadow_tx_status
);
2144 /* If budget not fully consumed, exit the polling mode */
2145 if (likely(work_done
< budget
)) {
2146 napi_complete(napi
);
2148 /* re-enable the interrupts */
2149 for (i
= 0; i
< edma_cinfo
->num_rxq_per_core
; i
++)
2150 edma_write_reg(EDMA_REG_RX_INT_MASK_Q(edma_percpu_info
->rx_start
+ i
), 0x1);
2151 for (i
= 0; i
< edma_cinfo
->num_txq_per_core
; i
++)
2152 edma_write_reg(EDMA_REG_TX_INT_MASK_Q(edma_percpu_info
->tx_start
+ i
), 0x1);
2161 irqreturn_t
edma_interrupt(int irq
, void *dev
)
2163 struct edma_per_cpu_queues_info
*edma_percpu_info
= (struct edma_per_cpu_queues_info
*) dev
;
2164 struct edma_common_info
*edma_cinfo
= edma_percpu_info
->edma_cinfo
;
2167 /* Unmask the TX/RX interrupt register */
2168 for (i
= 0; i
< edma_cinfo
->num_rxq_per_core
; i
++)
2169 edma_write_reg(EDMA_REG_RX_INT_MASK_Q(edma_percpu_info
->rx_start
+ i
), 0x0);
2171 for (i
= 0; i
< edma_cinfo
->num_txq_per_core
; i
++)
2172 edma_write_reg(EDMA_REG_TX_INT_MASK_Q(edma_percpu_info
->tx_start
+ i
), 0x0);
2174 napi_schedule(&edma_percpu_info
->napi
);