blob: f8a4352b75d0a4e3edc96bf10f0daf8624679580 [file] [log] [blame]
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301/*
Rakesh Nair8016fbd2018-01-03 15:46:06 +05302 * Copyright (c) 2014 - 2018, The Linux Foundation. All rights reserved.
Rakesh Nair9bcf2602017-01-06 16:02:16 +05303 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14 */
15
16#include <linux/platform_device.h>
17#include <linux/if_vlan.h>
Rakesh Nair888af952017-06-30 18:41:58 +053018#include <linux/kernel.h>
Rakesh Nair9bcf2602017-01-06 16:02:16 +053019#include "ess_edma.h"
20#include "edma.h"
21
22extern struct net_device *edma_netdev[EDMA_MAX_PORTID_SUPPORTED];
Rakesh Naird4a11502017-11-07 17:02:11 +053023extern u32 edma_disable_queue_stop;
24
Rakesh Nair9bcf2602017-01-06 16:02:16 +053025bool edma_stp_rstp;
26u16 edma_ath_eth_type;
Rakesh Nair888af952017-06-30 18:41:58 +053027extern u8 edma_dscp2ac_tbl[EDMA_PRECEDENCE_MAX];
28extern u8 edma_per_prec_stats_enable;
Rakesh Nair1c6a18c2017-08-02 21:27:06 +053029extern u32 edma_iad_stats_enable;
Rakesh Nair9bcf2602017-01-06 16:02:16 +053030
31/* edma_skb_priority_offset()
32 * get edma skb priority
33 */
34static unsigned int edma_skb_priority_offset(struct sk_buff *skb)
35{
36 return (skb->priority >> 2) & 1;
37}
38
39/* edma_alloc_tx_ring()
40 * Allocate Tx descriptors ring
41 */
42static int edma_alloc_tx_ring(struct edma_common_info *edma_cinfo,
43 struct edma_tx_desc_ring *etdr)
44{
45 struct platform_device *pdev = edma_cinfo->pdev;
46 u16 sw_size = sizeof(struct edma_sw_desc) * etdr->count;
47
48 /* Initialize ring */
49 etdr->size = sizeof(struct edma_tx_desc) * etdr->count;
50 etdr->sw_next_to_fill = 0;
51 etdr->sw_next_to_clean = 0;
52
53 /* Allocate SW descriptors */
54 etdr->sw_desc = vzalloc(sw_size);
55 if (!etdr->sw_desc) {
56 dev_err(&pdev->dev, "buffer alloc of tx ring failed=%p", etdr);
57 return -ENOMEM;
58 }
59
60 /* Allocate HW descriptors */
61 etdr->hw_desc = dma_alloc_coherent(&pdev->dev, etdr->size, &etdr->dma,
62 GFP_KERNEL);
63 if (!etdr->hw_desc) {
64 dev_err(&pdev->dev, "descriptor allocation for tx ring failed");
65 vfree(etdr->sw_desc);
66 etdr->sw_desc = NULL;
67 return -ENOMEM;
68 }
69
70 return 0;
71}
72
73/* edma_free_tx_ring()
74 * Free tx rings allocated by edma_alloc_tx_rings
75 */
76static void edma_free_tx_ring(struct edma_common_info *edma_cinfo,
77 struct edma_tx_desc_ring *etdr)
78{
79 struct platform_device *pdev = edma_cinfo->pdev;
80
81 if (likely(etdr->hw_desc)) {
82 dma_free_coherent(&pdev->dev, etdr->size, etdr->hw_desc,
83 etdr->dma);
84
85 vfree(etdr->sw_desc);
86 etdr->sw_desc = NULL;
87 }
88}
89
90/* edma_alloc_rx_ring()
91 * allocate rx descriptor ring
92 */
93static int edma_alloc_rx_ring(struct edma_common_info *edma_cinfo,
94 struct edma_rfd_desc_ring *erxd)
95{
96 struct platform_device *pdev = edma_cinfo->pdev;
97 u16 sw_size = sizeof(struct edma_sw_desc) * erxd->count;
98
99 erxd->size = sizeof(struct edma_sw_desc) * erxd->count;
100 erxd->sw_next_to_fill = 0;
101 erxd->sw_next_to_clean = 0;
102
103 /* Allocate SW descriptors */
104 erxd->sw_desc = vzalloc(sw_size);
105 if (!erxd->sw_desc)
106 return -ENOMEM;
107
108 /* Alloc HW descriptors */
109 erxd->hw_desc = dma_alloc_coherent(&pdev->dev, erxd->size, &erxd->dma,
110 GFP_KERNEL);
111 if (!erxd->hw_desc) {
112 vfree(erxd->sw_desc);
113 erxd->sw_desc = NULL;
114 return -ENOMEM;
115 }
116
Rakesh Nair03b586c2017-04-03 18:28:58 +0530117 /* Initialize pending fill */
118 erxd->pending_fill = 0;
119
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530120 return 0;
121}
122
123/* edma_free_rx_ring()
124 * Free rx ring allocated by alloc_rx_ring
125 */
126static void edma_free_rx_ring(struct edma_common_info *edma_cinfo,
127 struct edma_rfd_desc_ring *erxd)
128{
129 struct platform_device *pdev = edma_cinfo->pdev;
130
131 if (likely(erxd->hw_desc)) {
132 dma_free_coherent(&pdev->dev, erxd->size, erxd->hw_desc,
133 erxd->dma);
134
135 vfree(erxd->sw_desc);
136 erxd->sw_desc = NULL;
137 }
138}
139
140/* edma_configure_tx()
141 * Configure transmission control data
142 */
143static void edma_configure_tx(struct edma_common_info *edma_cinfo)
144{
145 u32 txq_ctrl_data;
146
147 txq_ctrl_data = (EDMA_TPD_BURST << EDMA_TXQ_NUM_TPD_BURST_SHIFT);
148 txq_ctrl_data |= EDMA_TXQ_CTRL_TPD_BURST_EN;
149 txq_ctrl_data |= (EDMA_TXF_BURST << EDMA_TXQ_TXF_BURST_NUM_SHIFT);
150 edma_write_reg(EDMA_REG_TXQ_CTRL, txq_ctrl_data);
151}
152
153/* edma_configure_rx()
154 * configure reception control data
155 */
156static void edma_configure_rx(struct edma_common_info *edma_cinfo)
157{
158 struct edma_hw *hw = &edma_cinfo->hw;
159 u32 rss_type, rx_desc1, rxq_ctrl_data;
160
161 /* Set RSS type */
162 rss_type = hw->rss_type;
163 edma_write_reg(EDMA_REG_RSS_TYPE, rss_type);
164
165 /* Set RFD burst number */
166 rx_desc1 = (EDMA_RFD_BURST << EDMA_RXQ_RFD_BURST_NUM_SHIFT);
167
168 /* Set RFD prefetch threshold */
169 rx_desc1 |= (EDMA_RFD_THR << EDMA_RXQ_RFD_PF_THRESH_SHIFT);
170
171 /* Set RFD in host ring low threshold to generte interrupt */
172 rx_desc1 |= (EDMA_RFD_LTHR << EDMA_RXQ_RFD_LOW_THRESH_SHIFT);
173 edma_write_reg(EDMA_REG_RX_DESC1, rx_desc1);
174
175 /* Set Rx FIFO threshold to start to DMA data to host */
176 rxq_ctrl_data = EDMA_FIFO_THRESH_128_BYTE;
177
178 /* Set RX remove vlan bit */
179 rxq_ctrl_data |= EDMA_RXQ_CTRL_RMV_VLAN;
180
181 edma_write_reg(EDMA_REG_RXQ_CTRL, rxq_ctrl_data);
182}
183
184/* edma_alloc_rx_buf()
185 * does skb allocation for the received packets.
186 */
187static int edma_alloc_rx_buf(struct edma_common_info
188 *edma_cinfo,
189 struct edma_rfd_desc_ring *erdr,
190 int cleaned_count, int queue_id)
191{
192 struct platform_device *pdev = edma_cinfo->pdev;
193 struct edma_rx_free_desc *rx_desc;
194 struct edma_sw_desc *sw_desc;
195 struct sk_buff *skb;
196 unsigned int i;
197 u16 prod_idx, length;
198 u32 reg_data;
199
200 if (cleaned_count > erdr->count) {
201 dev_err(&pdev->dev, "Incorrect cleaned_count %d",
202 cleaned_count);
203 return -1;
204 }
205
206 i = erdr->sw_next_to_fill;
207
208 while (cleaned_count) {
209 sw_desc = &erdr->sw_desc[i];
210 length = edma_cinfo->rx_head_buffer_len;
211
212 if (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_REUSE) {
213 skb = sw_desc->skb;
214
215 /* Clear REUSE flag */
216 sw_desc->flags &= ~EDMA_SW_DESC_FLAG_SKB_REUSE;
217 } else {
218 /* alloc skb */
219 skb = netdev_alloc_skb(edma_netdev[0], length);
220 if (!skb) {
221 /* Better luck next round */
222 sw_desc->flags = 0;
223 break;
224 }
225 }
226
227 if (!edma_cinfo->page_mode) {
228 sw_desc->dma = dma_map_single(&pdev->dev, skb->data,
229 length, DMA_FROM_DEVICE);
230 if (dma_mapping_error(&pdev->dev, sw_desc->dma)) {
231 WARN_ONCE(0, "EDMA DMA mapping failed for linear address %x", sw_desc->dma);
232 sw_desc->flags = 0;
233 sw_desc->skb = NULL;
234 dev_kfree_skb_any(skb);
235 break;
236 }
237
238 /*
239 * We should not exit from here with REUSE flag set
240 * This is to avoid re-using same sk_buff for next
241 * time around
242 */
243 sw_desc->flags = EDMA_SW_DESC_FLAG_SKB_HEAD;
244 sw_desc->length = length;
245 } else {
246 struct page *pg = alloc_page(GFP_ATOMIC);
247
248 if (!pg) {
249 sw_desc->flags = 0;
250 sw_desc->skb = NULL;
251 dev_kfree_skb_any(skb);
252 break;
253 }
254
255 sw_desc->dma = dma_map_page(&pdev->dev, pg, 0,
256 edma_cinfo->rx_page_buffer_len,
257 DMA_FROM_DEVICE);
258 if (dma_mapping_error(&pdev->dev, sw_desc->dma)) {
259 WARN_ONCE(0, "EDMA DMA mapping failed for page address %x", sw_desc->dma);
260 sw_desc->flags = 0;
261 sw_desc->skb = NULL;
262 __free_page(pg);
263 dev_kfree_skb_any(skb);
264 break;
265 }
266
267 skb_fill_page_desc(skb, 0, pg, 0,
268 edma_cinfo->rx_page_buffer_len);
269 sw_desc->flags = EDMA_SW_DESC_FLAG_SKB_FRAG;
270 sw_desc->length = edma_cinfo->rx_page_buffer_len;
271 }
272
273 /* Update the buffer info */
274 sw_desc->skb = skb;
275 rx_desc = (&(erdr->hw_desc)[i]);
276 rx_desc->buffer_addr = cpu_to_le64(sw_desc->dma);
277 if (++i == erdr->count)
278 i = 0;
279 cleaned_count--;
280 }
281
282 erdr->sw_next_to_fill = i;
283
284 if (i == 0)
285 prod_idx = erdr->count - 1;
286 else
287 prod_idx = i - 1;
288
289 /* Update the producer index */
290 edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), &reg_data);
291 reg_data &= ~EDMA_RFD_PROD_IDX_BITS;
292 reg_data |= prod_idx;
293 edma_write_reg(EDMA_REG_RFD_IDX_Q(queue_id), reg_data);
Rakesh Nair03b586c2017-04-03 18:28:58 +0530294
295 /* If we couldn't allocate all the buffers,
296 * we increment the alloc failure counters
297 */
298 if (cleaned_count)
299 edma_cinfo->edma_ethstats.rx_alloc_fail_ctr++;
300
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530301 return cleaned_count;
302}
303
304/* edma_init_desc()
305 * update descriptor ring size, buffer and producer/consumer index
306 */
307static void edma_init_desc(struct edma_common_info *edma_cinfo)
308{
309 struct edma_rfd_desc_ring *rfd_ring;
310 struct edma_tx_desc_ring *etdr;
311 int i = 0, j = 0;
312 u32 data = 0;
313 u16 hw_cons_idx = 0;
314
315 /* Set the base address of every TPD ring. */
316 for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
317 etdr = edma_cinfo->tpd_ring[i];
318
319 /* Update descriptor ring base address */
320 edma_write_reg(EDMA_REG_TPD_BASE_ADDR_Q(i), (u32)etdr->dma);
321 edma_read_reg(EDMA_REG_TPD_IDX_Q(i), &data);
322
323 /* Calculate hardware consumer index */
324 hw_cons_idx = (data >> EDMA_TPD_CONS_IDX_SHIFT) & 0xffff;
325 etdr->sw_next_to_fill = hw_cons_idx;
326 etdr->sw_next_to_clean = hw_cons_idx;
327 data &= ~(EDMA_TPD_PROD_IDX_MASK << EDMA_TPD_PROD_IDX_SHIFT);
328 data |= hw_cons_idx;
329
330 /* update producer index */
331 edma_write_reg(EDMA_REG_TPD_IDX_Q(i), data);
332
333 /* update SW consumer index register */
334 edma_write_reg(EDMA_REG_TX_SW_CONS_IDX_Q(i), hw_cons_idx);
335
336 /* Set TPD ring size */
337 edma_write_reg(EDMA_REG_TPD_RING_SIZE,
338 edma_cinfo->tx_ring_count &
339 EDMA_TPD_RING_SIZE_MASK);
340 }
341
342 for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
343 rfd_ring = edma_cinfo->rfd_ring[j];
344 /* Update Receive Free descriptor ring base address */
345 edma_write_reg(EDMA_REG_RFD_BASE_ADDR_Q(j),
346 (u32)(rfd_ring->dma));
347 j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
348 }
349
350 data = edma_cinfo->rx_head_buffer_len;
351 if (edma_cinfo->page_mode)
352 data = edma_cinfo->rx_page_buffer_len;
353
354 data &= EDMA_RX_BUF_SIZE_MASK;
355 data <<= EDMA_RX_BUF_SIZE_SHIFT;
356
357 /* Update RFD ring size and RX buffer size */
358 data |= (edma_cinfo->rx_ring_count & EDMA_RFD_RING_SIZE_MASK)
359 << EDMA_RFD_RING_SIZE_SHIFT;
360
361 edma_write_reg(EDMA_REG_RX_DESC0, data);
362
363 /* Disable TX FIFO low watermark and high watermark */
364 edma_write_reg(EDMA_REG_TXF_WATER_MARK, 0);
365
366 /* Load all of base address above */
367 edma_read_reg(EDMA_REG_TX_SRAM_PART, &data);
368 data |= 1 << EDMA_LOAD_PTR_SHIFT;
369 edma_write_reg(EDMA_REG_TX_SRAM_PART, data);
370}
371
372/* edma_receive_checksum
373 * Api to check checksum on receive packets
374 */
375static void edma_receive_checksum(struct edma_rx_return_desc *rd,
376 struct sk_buff *skb)
377{
378 skb_checksum_none_assert(skb);
379
380 /* check the RRD IP/L4 checksum bit to see if
381 * its set, which in turn indicates checksum
382 * failure.
383 */
384 if (rd->rrd6 & EDMA_RRD_CSUM_FAIL_MASK)
385 return;
386
Rakesh Nair72e1d282017-05-19 22:21:01 +0530387 /*
388 * We disable checksum verification only if
389 * we have a TCP/UDP packet
390 */
391 if (rd->rrd7 & (EDMA_RRD_L4OFFSET_MASK << EDMA_RRD_L4OFFSET_SHIFT))
392 skb->ip_summed = CHECKSUM_UNNECESSARY;
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530393}
394
395/* edma_clean_rfd()
396 * clean up rx resourcers on error
397 */
398static void edma_clean_rfd(struct platform_device *pdev,
399 struct edma_rfd_desc_ring *erdr,
400 u16 index,
401 int pos)
402{
403 struct edma_rx_free_desc *rx_desc = &(erdr->hw_desc[index]);
404 struct edma_sw_desc *sw_desc = &erdr->sw_desc[index];
405
406 /* Unmap non-first RFD positions in packet */
407 if (pos) {
408 if (likely(sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_HEAD))
409 dma_unmap_single(&pdev->dev, sw_desc->dma,
410 sw_desc->length, DMA_FROM_DEVICE);
411 else
412 dma_unmap_page(&pdev->dev, sw_desc->dma,
413 sw_desc->length, DMA_FROM_DEVICE);
414 }
415
416 if (sw_desc->skb) {
417 dev_kfree_skb_any(sw_desc->skb);
418 sw_desc->skb = NULL;
419 }
420
421 sw_desc->flags = 0;
422 memset(rx_desc, 0, sizeof(struct edma_rx_free_desc));
423}
424
425/* edma_rx_complete_stp_rstp()
426 * Complete Rx processing for STP RSTP packets
427 */
428static void edma_rx_complete_stp_rstp(struct sk_buff *skb, int port_id, struct edma_rx_return_desc *rd)
429{
430 int i;
431 u32 priority;
432 u16 port_type;
433 u8 mac_addr[EDMA_ETH_HDR_LEN];
434
435 port_type = (rd->rrd1 >> EDMA_RRD_PORT_TYPE_SHIFT)
436 & EDMA_RRD_PORT_TYPE_MASK;
437 /* if port type is 0x4, then only proceed with
438 * other stp/rstp calculation
439 */
440 if (port_type == EDMA_RX_ATH_HDR_RSTP_PORT_TYPE) {
441 u8 bpdu_mac[6] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00};
442
443 /* calculate the frame priority */
444 priority = (rd->rrd1 >> EDMA_RRD_PRIORITY_SHIFT)
445 & EDMA_RRD_PRIORITY_MASK;
446
447 for (i = 0; i < EDMA_ETH_HDR_LEN; i++)
448 mac_addr[i] = skb->data[i];
449
450 /* Check if destination mac addr is bpdu addr */
451 if (!memcmp(mac_addr, bpdu_mac, 6)) {
452 /* destination mac address is BPDU
453 * destination mac address, then add
454 * atheros header to the packet.
455 */
456 u16 athr_hdr = (EDMA_RX_ATH_HDR_VERSION << EDMA_RX_ATH_HDR_VERSION_SHIFT) |
457 (priority << EDMA_RX_ATH_HDR_PRIORITY_SHIFT) |
458 (EDMA_RX_ATH_HDR_RSTP_PORT_TYPE << EDMA_RX_ATH_PORT_TYPE_SHIFT) | port_id;
459 skb_push(skb, 4);
460 memcpy(skb->data, mac_addr, EDMA_ETH_HDR_LEN);
461 *(uint16_t *)&skb->data[12] = htons(edma_ath_eth_type);
462 *(uint16_t *)&skb->data[14] = htons(athr_hdr);
463 }
464 }
465}
466
467/* edma_rx_complete_fraglist()
468 * Complete Rx processing for fraglist skbs
469 */
470static int edma_rx_complete_fraglist(struct sk_buff *skb, u16 num_rfds, u16 length, u32 sw_next_to_clean,
471 struct edma_rfd_desc_ring *erdr, struct edma_common_info *edma_cinfo)
472{
473 struct platform_device *pdev = edma_cinfo->pdev;
474 struct edma_hw *hw = &edma_cinfo->hw;
475 struct sk_buff *skb_temp;
476 struct edma_sw_desc *sw_desc;
477 int i;
478 u16 size_remaining;
479
480 skb->data_len = 0;
481 skb->tail += (hw->rx_head_buff_size - 16);
482 skb->len = skb->truesize = length;
483 size_remaining = length - (hw->rx_head_buff_size - 16);
484
485 /* clean-up all related sw_descs */
486 for (i = 1; i < num_rfds; i++) {
487 struct sk_buff *skb_prev;
488
489 sw_desc = &erdr->sw_desc[sw_next_to_clean];
490 skb_temp = sw_desc->skb;
491
492 dma_unmap_single(&pdev->dev, sw_desc->dma,
493 sw_desc->length, DMA_FROM_DEVICE);
494
495 if (size_remaining < hw->rx_head_buff_size)
496 skb_put(skb_temp, size_remaining);
497 else
498 skb_put(skb_temp, hw->rx_head_buff_size);
499
500 /* If we are processing the first rfd, we link
501 * skb->frag_list to the skb corresponding to the
502 * first RFD
503 */
504 if (i == 1)
505 skb_shinfo(skb)->frag_list = skb_temp;
506 else
507 skb_prev->next = skb_temp;
508 skb_prev = skb_temp;
509 skb_temp->next = NULL;
510
511 skb->data_len += skb_temp->len;
512 size_remaining -= skb_temp->len;
513
514 /* Increment SW index */
515 sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
516 }
517
518 return sw_next_to_clean;
519}
520
521/* edma_rx_complete_paged()
522 * Complete Rx processing for paged skbs
523 */
524static int edma_rx_complete_paged(struct sk_buff *skb, u16 num_rfds,
525 u16 length, u32 sw_next_to_clean,
526 struct edma_rfd_desc_ring *erdr,
527 struct edma_common_info *edma_cinfo)
528{
529 struct platform_device *pdev = edma_cinfo->pdev;
530 struct sk_buff *skb_temp;
531 struct edma_sw_desc *sw_desc;
532 int i;
533 u16 size_remaining;
534
535 skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
536
537 /* Setup skbuff fields */
538 skb->len = length;
539
540 if (likely(num_rfds <= 1)) {
541 skb->data_len = length;
542 skb->truesize += edma_cinfo->rx_page_buffer_len;
543 skb_fill_page_desc(skb, 0, skb_frag_page(frag),
544 16, length);
545 } else {
546 frag->size -= 16;
547 skb->data_len = frag->size;
548 skb->truesize += edma_cinfo->rx_page_buffer_len;
549 size_remaining = length - frag->size;
550
551 skb_fill_page_desc(skb, 0, skb_frag_page(frag),
552 16, frag->size);
553
554 /* clean-up all related sw_descs */
555 for (i = 1; i < num_rfds; i++) {
556 sw_desc = &erdr->sw_desc[sw_next_to_clean];
557 skb_temp = sw_desc->skb;
558 frag = &skb_shinfo(skb_temp)->frags[0];
559 dma_unmap_page(&pdev->dev, sw_desc->dma,
560 sw_desc->length, DMA_FROM_DEVICE);
561
562 if (size_remaining < edma_cinfo->rx_page_buffer_len)
563 frag->size = size_remaining;
564
565 skb_fill_page_desc(skb, i, skb_frag_page(frag),
566 0, frag->size);
567
568 /* We used frag pages from skb_temp in skb */
569 skb_shinfo(skb_temp)->nr_frags = 0;
570 dev_kfree_skb_any(skb_temp);
571
572 skb->data_len += frag->size;
573 skb->truesize += edma_cinfo->rx_page_buffer_len;
574 size_remaining -= frag->size;
575
576 /* Increment SW index */
577 sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
578 }
579 }
580
581 return sw_next_to_clean;
582}
583
584/*
585 * edma_rx_complete()
586 * Main api called from the poll function to process rx packets.
587 */
Rakesh Nair03b586c2017-04-03 18:28:58 +0530588static u16 edma_rx_complete(struct edma_common_info *edma_cinfo,
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530589 int *work_done, int work_to_do, int queue_id,
590 struct napi_struct *napi)
591{
592 struct platform_device *pdev = edma_cinfo->pdev;
593 struct edma_rfd_desc_ring *erdr = edma_cinfo->rfd_ring[queue_id];
594 u16 hash_type, rrd[8], cleaned_count = 0, length = 0, num_rfds = 1,
595 sw_next_to_clean, hw_next_to_clean = 0, vlan = 0, ret_count = 0;
596 u32 data = 0;
597 u16 count = erdr->count, rfd_avail;
598 u8 queue_to_rxid[8] = {0, 0, 1, 1, 2, 2, 3, 3};
599
Rakesh Nair03b586c2017-04-03 18:28:58 +0530600 cleaned_count = erdr->pending_fill;
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530601 sw_next_to_clean = erdr->sw_next_to_clean;
602
603 edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), &data);
604 hw_next_to_clean = (data >> EDMA_RFD_CONS_IDX_SHIFT) &
605 EDMA_RFD_CONS_IDX_MASK;
606
607 do {
608 while (sw_next_to_clean != hw_next_to_clean) {
609 struct net_device *netdev;
610 struct edma_adapter *adapter;
611 struct edma_sw_desc *sw_desc;
612 struct sk_buff *skb;
613 struct edma_rx_return_desc *rd;
614 u8 *vaddr;
615 int port_id, i, drop_count = 0;
616 u32 priority;
617
618 if (!work_to_do)
619 break;
620
621 sw_desc = &erdr->sw_desc[sw_next_to_clean];
622 skb = sw_desc->skb;
623
624 /* Get RRD */
625 if (!edma_cinfo->page_mode) {
626 dma_unmap_single(&pdev->dev, sw_desc->dma,
627 sw_desc->length, DMA_FROM_DEVICE);
628 rd = (struct edma_rx_return_desc *)skb->data;
629
630 } else {
631 dma_unmap_page(&pdev->dev, sw_desc->dma,
632 sw_desc->length, DMA_FROM_DEVICE);
633 vaddr = kmap_atomic(skb_frag_page(&skb_shinfo(skb)->frags[0]));
634 memcpy((uint8_t *)&rrd[0], vaddr, 16);
635 rd = (struct edma_rx_return_desc *)rrd;
636 kunmap_atomic(vaddr);
637 }
638
639 /* Check if RRD is valid */
640 if (!(rd->rrd7 & EDMA_RRD_DESC_VALID)) {
641 dev_err(&pdev->dev, "Incorrect RRD DESC valid bit set");
642 edma_clean_rfd(pdev, erdr, sw_next_to_clean, 0);
643 sw_next_to_clean = (sw_next_to_clean + 1) &
644 (erdr->count - 1);
645 cleaned_count++;
646 continue;
647 }
648
649 /* Get the number of RFDs from RRD */
650 num_rfds = rd->rrd1 & EDMA_RRD_NUM_RFD_MASK;
651
652 /* Get Rx port ID from switch */
653 port_id = (rd->rrd1 >> EDMA_PORT_ID_SHIFT) & EDMA_PORT_ID_MASK;
654 if ((!port_id) || (port_id > EDMA_MAX_PORTID_SUPPORTED)) {
655 if (net_ratelimit()) {
656 dev_err(&pdev->dev, "Incorrect RRD source port bit set");
657 dev_err(&pdev->dev,
658 "RRD Dump\n rrd0:%x rrd1: %x rrd2: %x rrd3: %x rrd4: %x rrd5: %x rrd6: %x rrd7: %x",
659 rd->rrd0, rd->rrd1, rd->rrd2, rd->rrd3, rd->rrd4, rd->rrd5, rd->rrd6, rd->rrd7);
660 dev_err(&pdev->dev, "Num_rfds: %d, src_port: %d, pkt_size: %d, cvlan_tag: %d\n",
661 num_rfds, rd->rrd1 & EDMA_RRD_SRC_PORT_NUM_MASK,
662 rd->rrd6 & EDMA_RRD_PKT_SIZE_MASK, rd->rrd7 & EDMA_RRD_CVLAN);
663 }
664 for (i = 0; i < num_rfds; i++) {
665 edma_clean_rfd(pdev, erdr, sw_next_to_clean, i);
666 sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
667 }
668
669 cleaned_count += num_rfds;
670 continue;
671 }
672
673 netdev = edma_cinfo->portid_netdev_lookup_tbl[port_id];
674 if (!netdev) {
675 dev_err(&pdev->dev, "Invalid netdev");
676 for (i = 0; i < num_rfds; i++) {
677 edma_clean_rfd(pdev, erdr, sw_next_to_clean, i);
678 sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
679 }
680
681 cleaned_count += num_rfds;
682 continue;
683 }
684 adapter = netdev_priv(netdev);
685
686 /* This code is added to handle a usecase where high
687 * priority stream and a low priority stream are
688 * received simultaneously on DUT. The problem occurs
689 * if one of the Rx rings is full and the corresponding
690 * core is busy with other stuff. This causes ESS CPU
691 * port to backpressure all incoming traffic including
692 * high priority one. We monitor free descriptor count
693 * on each CPU and whenever it reaches threshold (< 80),
694 * we drop all low priority traffic and let only high
695 * priotiy traffic pass through. We can hence avoid
696 * ESS CPU port to send backpressure on high priroity
697 * stream.
698 */
699 priority = (rd->rrd1 >> EDMA_RRD_PRIORITY_SHIFT)
700 & EDMA_RRD_PRIORITY_MASK;
701 if (likely(!priority && !edma_cinfo->page_mode && (num_rfds <= 1))) {
702 rfd_avail = (count + sw_next_to_clean - hw_next_to_clean - 1) & (count - 1);
703 if (rfd_avail < EDMA_RFD_AVAIL_THR) {
704 sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_REUSE;
705 sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
706 adapter->stats.rx_dropped++;
707 cleaned_count++;
708 drop_count++;
709 if (drop_count == 3) {
710 work_to_do--;
711 (*work_done)++;
712 drop_count = 0;
713 }
714 if (cleaned_count == EDMA_RX_BUFFER_WRITE) {
715 /* If buffer clean count reaches 16, we replenish HW buffers. */
716 ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id);
717 edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id),
718 sw_next_to_clean);
719 cleaned_count = ret_count;
Rakesh Nair03b586c2017-04-03 18:28:58 +0530720 erdr->pending_fill = ret_count;
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530721 }
722 continue;
723 }
724 }
725
726 work_to_do--;
727 (*work_done)++;
728
729 /* Increment SW index */
730 sw_next_to_clean = (sw_next_to_clean + 1) &
731 (erdr->count - 1);
732
733 /* Get the packet size and allocate buffer */
734 length = rd->rrd6 & EDMA_RRD_PKT_SIZE_MASK;
735
736 if (edma_cinfo->page_mode) {
737 /* paged skb */
738 sw_next_to_clean = edma_rx_complete_paged(skb, num_rfds, length,
739 sw_next_to_clean,
740 erdr, edma_cinfo);
741 if (!pskb_may_pull(skb, ETH_HLEN)) {
742 cleaned_count += num_rfds;
743 dev_kfree_skb_any(skb);
744 continue;
745 }
746 } else {
747 /* single or fraglist skb */
748
749 /* Addition of 16 bytes is required, as in the packet
750 * first 16 bytes are rrd descriptors, so actual data
751 * starts from an offset of 16.
752 */
753 skb_reserve(skb, 16);
754 if (likely((num_rfds <= 1) || !edma_cinfo->fraglist_mode))
755 skb_put(skb, length);
756 else
757 sw_next_to_clean = edma_rx_complete_fraglist(skb, num_rfds, length,
758 sw_next_to_clean,
759 erdr, edma_cinfo);
760 }
761
762 cleaned_count += num_rfds;
763
764 if (edma_stp_rstp)
765 edma_rx_complete_stp_rstp(skb, port_id, rd);
766
767 skb->protocol = eth_type_trans(skb, netdev);
768
769 /* Record Rx queue for RFS/RPS and fill flow hash from HW */
770 skb_record_rx_queue(skb, queue_to_rxid[queue_id]);
771 if (netdev->features & NETIF_F_RXHASH) {
772 hash_type = (rd->rrd5 >> EDMA_HASH_TYPE_SHIFT);
773 if ((hash_type > EDMA_HASH_TYPE_START) && (hash_type < EDMA_HASH_TYPE_END))
774 skb_set_hash(skb, rd->rrd2, PKT_HASH_TYPE_L4);
775 }
776
777#ifdef CONFIG_NF_FLOW_COOKIE
778 skb->flow_cookie = rd->rrd3 & EDMA_RRD_FLOW_COOKIE_MASK;
779#endif
780 edma_receive_checksum(rd, skb);
781
782 /* Process VLAN HW acceleration indication provided by HW */
783 if (adapter->default_vlan_tag != rd->rrd4) {
784 vlan = rd->rrd4;
785 if (likely(rd->rrd7 & EDMA_RRD_CVLAN))
786 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
787 else if (rd->rrd1 & EDMA_RRD_SVLAN)
788 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan);
789 }
790
791 /* Update rx statistics */
792 adapter->stats.rx_packets++;
793 adapter->stats.rx_bytes += length;
794
795 /* Check if we reached refill threshold */
796 if (cleaned_count == EDMA_RX_BUFFER_WRITE) {
797 ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id);
798 edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id),
799 sw_next_to_clean);
800 cleaned_count = ret_count;
Rakesh Nair03b586c2017-04-03 18:28:58 +0530801 erdr->pending_fill = ret_count;
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530802 }
803
Rakesh Nair888af952017-06-30 18:41:58 +0530804 /*
805 * We increment per-precedence counters for the rx packets
806 */
807 if (edma_per_prec_stats_enable) {
808 edma_cinfo->edma_ethstats.rx_prec[priority]++;
809 edma_cinfo->edma_ethstats.rx_ac[edma_dscp2ac_tbl[priority]]++;
Rakesh Nair1c6a18c2017-08-02 21:27:06 +0530810
811 if (edma_iad_stats_enable) {
812 if (edma_dscp2ac_tbl[priority] == EDMA_AC_VI)
813 edma_iad_process_flow(edma_cinfo, skb, EDMA_INGRESS_DIR, priority);
814 }
Rakesh Nair888af952017-06-30 18:41:58 +0530815 }
816
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530817 /* At this point skb should go to stack */
818 napi_gro_receive(napi, skb);
819 }
820
821 /* Check if we still have NAPI budget */
822 if (!work_to_do)
823 break;
824
825 /* Read index once again since we still have NAPI budget */
826 edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), &data);
827 hw_next_to_clean = (data >> EDMA_RFD_CONS_IDX_SHIFT) &
828 EDMA_RFD_CONS_IDX_MASK;
829 } while (hw_next_to_clean != sw_next_to_clean);
830
831 erdr->sw_next_to_clean = sw_next_to_clean;
832
833 /* Refill here in case refill threshold wasn't reached */
834 if (likely(cleaned_count)) {
835 ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id);
Rakesh Nair03b586c2017-04-03 18:28:58 +0530836 erdr->pending_fill = ret_count;
837 if (ret_count) {
838 if(net_ratelimit())
839 dev_dbg(&pdev->dev, "Edma not getting memory for descriptors.\n");
840 }
841
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530842 edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id),
843 erdr->sw_next_to_clean);
844 }
Rakesh Nair03b586c2017-04-03 18:28:58 +0530845
846 return erdr->pending_fill;
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530847}
848
849/* edma_delete_rfs_filter()
850 * Remove RFS filter from switch
851 */
852static int edma_delete_rfs_filter(struct edma_adapter *adapter,
853 struct edma_rfs_filter_node *filter_node)
854{
855 int res = -1;
856
857 if (likely(adapter->set_rfs_rule))
858 res = (*adapter->set_rfs_rule)(adapter->netdev,
859#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21))
860 filter_node->keys.src,
861 filter_node->keys.dst, filter_node->keys.port16[0],
862 filter_node->keys.port16[1],
863 filter_node->keys.ip_proto,
864#else
865 filter_node->keys.addrs.v4addrs.src,
866 filter_node->keys.addrs.v4addrs.dst, filter_node->keys.ports.src,
867 filter_node->keys.ports.dst,
868 filter_node->keys.basic.ip_proto,
869#endif
870 filter_node->rq_id,
871 0);
872
873 return res;
874}
875
876/* edma_add_rfs_filter()
877 * Add RFS filter to switch
878 */
879static int edma_add_rfs_filter(struct edma_adapter *adapter,
880 struct flow_keys *keys, u16 rq,
881 struct edma_rfs_filter_node *filter_node)
882{
883 int res = -1;
884
885#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21))
886 filter_node->keys.src = keys->src;
887 filter_node->keys.dst = keys->dst;
888 filter_node->keys.ports = keys->ports;
889 filter_node->keys.ip_proto = keys->ip_proto;
890#else
891 filter_node->keys.addrs.v4addrs.src = keys->addrs.v4addrs.src;
892 filter_node->keys.addrs.v4addrs.dst = keys->addrs.v4addrs.dst;
893 filter_node->keys.ports.ports = keys->ports.ports;
894 filter_node->keys.basic.ip_proto = keys->basic.ip_proto;
895#endif
896
897 /* Call callback registered by ESS driver */
898 if (likely(adapter->set_rfs_rule))
899#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21))
900 res = (*adapter->set_rfs_rule)(adapter->netdev, keys->src,
901 keys->dst, keys->port16[0], keys->port16[1],
902 keys->ip_proto, rq, 1);
903#else
904 res = (*adapter->set_rfs_rule)(adapter->netdev, keys->addrs.v4addrs.src,
905 keys->addrs.v4addrs.dst, keys->ports.src, keys->ports.dst,
906 keys->basic.ip_proto, rq, 1);
907#endif
908
909 return res;
910}
911
912/* edma_rfs_key_search()
913 * Look for existing RFS entry
914 */
915static struct edma_rfs_filter_node *edma_rfs_key_search(struct hlist_head *h,
916 struct flow_keys *key)
917{
918 struct edma_rfs_filter_node *p;
919
920 hlist_for_each_entry(p, h, node)
921#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21))
922 if (p->keys.src == key->src &&
923 p->keys.dst == key->dst &&
924 p->keys.ports == key->ports &&
925 p->keys.ip_proto == key->ip_proto)
926#else
927 if (p->keys.addrs.v4addrs.src == key->addrs.v4addrs.src &&
928 p->keys.addrs.v4addrs.dst == key->addrs.v4addrs.dst &&
929 p->keys.ports.ports == key->ports.ports &&
930 p->keys.basic.ip_proto == key->basic.ip_proto)
931#endif
932 return p;
933 return NULL;
934}
935
936/* edma_initialise_rfs_flow_table()
937 * Initialise EDMA RFS flow table
938 */
939static void edma_initialise_rfs_flow_table(struct edma_adapter *adapter)
940{
941 int i;
942
943 spin_lock_init(&adapter->rfs.rfs_ftab_lock);
944
945 /* Initialize EDMA flow hash table */
946 for (i = 0; i < EDMA_RFS_FLOW_ENTRIES; i++)
947 INIT_HLIST_HEAD(&adapter->rfs.hlist_head[i]);
948
949 adapter->rfs.max_num_filter = EDMA_RFS_FLOW_ENTRIES;
950 adapter->rfs.filter_available = adapter->rfs.max_num_filter;
951 adapter->rfs.hashtoclean = 0;
952
953 /* Add timer to get periodic RFS updates from OS */
954 init_timer(&adapter->rfs.expire_rfs);
955 adapter->rfs.expire_rfs.function = edma_flow_may_expire;
956 adapter->rfs.expire_rfs.data = (unsigned long)adapter;
957 mod_timer(&adapter->rfs.expire_rfs, jiffies + HZ/4);
958}
959
960/* edma_free_rfs_flow_table()
961 * Free EDMA RFS flow table
962 */
963static void edma_free_rfs_flow_table(struct edma_adapter *adapter)
964{
965 int i;
966
967 /* Remove sync timer */
968 del_timer_sync(&adapter->rfs.expire_rfs);
969 spin_lock_bh(&adapter->rfs.rfs_ftab_lock);
970
971 /* Free EDMA RFS table entries */
972 adapter->rfs.filter_available = 0;
973
974 /* Clean-up EDMA flow hash table */
975 for (i = 0; i < EDMA_RFS_FLOW_ENTRIES; i++) {
976 struct hlist_head *hhead;
977 struct hlist_node *tmp;
978 struct edma_rfs_filter_node *filter_node;
979 int res;
980
981 hhead = &adapter->rfs.hlist_head[i];
982 hlist_for_each_entry_safe(filter_node, tmp, hhead, node) {
983 res = edma_delete_rfs_filter(adapter, filter_node);
984 if (res < 0)
985 dev_warn(&adapter->netdev->dev,
986 "EDMA going down but RFS entry %d not allowed to be flushed by Switch",
987 filter_node->flow_id);
988 hlist_del(&filter_node->node);
989 kfree(filter_node);
990 }
991 }
992 spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
993}
994
995/* edma_tx_unmap_and_free()
996 * clean TX buffer
997 */
998static inline void edma_tx_unmap_and_free(struct platform_device *pdev,
999 struct edma_sw_desc *sw_desc)
1000{
1001 struct sk_buff *skb = sw_desc->skb;
1002
1003 if (likely((sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_HEAD) ||
1004 (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_FRAGLIST)))
1005 /* unmap_single for skb head area */
1006 dma_unmap_single(&pdev->dev, sw_desc->dma,
1007 sw_desc->length, DMA_TO_DEVICE);
1008 else if (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_FRAG)
1009 /* unmap page for paged fragments */
1010 dma_unmap_page(&pdev->dev, sw_desc->dma,
1011 sw_desc->length, DMA_TO_DEVICE);
1012
1013 if (likely(sw_desc->flags & EDMA_SW_DESC_FLAG_LAST))
1014 dev_kfree_skb_any(skb);
1015
1016 sw_desc->flags = 0;
1017}
1018
1019/* edma_tx_complete()
1020 * Used to clean tx queues and update hardware and consumer index
1021 */
1022static void edma_tx_complete(struct edma_common_info *edma_cinfo, int queue_id)
1023{
1024 struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
1025 struct edma_sw_desc *sw_desc;
1026 struct platform_device *pdev = edma_cinfo->pdev;
1027 int i;
1028
1029 u16 sw_next_to_clean = etdr->sw_next_to_clean;
1030 u16 hw_next_to_clean;
1031 u32 data = 0;
1032
1033 edma_read_reg(EDMA_REG_TPD_IDX_Q(queue_id), &data);
1034 hw_next_to_clean = (data >> EDMA_TPD_CONS_IDX_SHIFT) & EDMA_TPD_CONS_IDX_MASK;
1035
1036 /* clean the buffer here */
1037 while (sw_next_to_clean != hw_next_to_clean) {
1038 sw_desc = &etdr->sw_desc[sw_next_to_clean];
1039 edma_tx_unmap_and_free(pdev, sw_desc);
1040 sw_next_to_clean = (sw_next_to_clean + 1) & (etdr->count - 1);
1041 }
1042
1043 etdr->sw_next_to_clean = sw_next_to_clean;
1044
1045 /* update the TPD consumer index register */
1046 edma_write_reg(EDMA_REG_TX_SW_CONS_IDX_Q(queue_id), sw_next_to_clean);
1047
1048 /* Wake the queue if queue is stopped and netdev link is up */
1049 for (i = 0; i < EDMA_MAX_NETDEV_PER_QUEUE && etdr->nq[i] ; i++) {
1050 if (netif_tx_queue_stopped(etdr->nq[i])) {
1051 if ((etdr->netdev[i]) && netif_carrier_ok(etdr->netdev[i]))
1052 netif_tx_wake_queue(etdr->nq[i]);
1053 }
1054 }
1055}
1056
1057/* edma_get_tx_buffer()
1058 * Get sw_desc corresponding to the TPD
1059 */
1060static struct edma_sw_desc *edma_get_tx_buffer(struct edma_common_info *edma_cinfo,
1061 struct edma_tx_desc *tpd, int queue_id)
1062{
1063 struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
1064
1065 return &etdr->sw_desc[tpd - (struct edma_tx_desc *)etdr->hw_desc];
1066}
1067
1068/* edma_get_next_tpd()
1069 * Return a TPD descriptor for transfer
1070 */
1071static struct edma_tx_desc *edma_get_next_tpd(struct edma_common_info *edma_cinfo,
1072 int queue_id)
1073{
1074 struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
1075 u16 sw_next_to_fill = etdr->sw_next_to_fill;
1076 struct edma_tx_desc *tpd_desc =
1077 (&((struct edma_tx_desc *)(etdr->hw_desc))[sw_next_to_fill]);
1078
1079 etdr->sw_next_to_fill = (etdr->sw_next_to_fill + 1) & (etdr->count - 1);
1080
1081 return tpd_desc;
1082}
1083
1084/* edma_tpd_available()
1085 * Check number of free TPDs
1086 */
1087static inline u16 edma_tpd_available(struct edma_common_info *edma_cinfo,
1088 int queue_id)
1089{
1090 struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
1091
1092 u16 sw_next_to_fill;
1093 u16 sw_next_to_clean;
1094 u16 count = 0;
1095
1096 sw_next_to_clean = etdr->sw_next_to_clean;
1097 sw_next_to_fill = etdr->sw_next_to_fill;
1098
1099 if (likely(sw_next_to_clean <= sw_next_to_fill))
1100 count = etdr->count;
1101
1102 return count + sw_next_to_clean - sw_next_to_fill - 1;
1103}
1104
1105/* edma_tx_queue_get()
1106 * Get the starting number of the queue
1107 */
1108static inline int edma_tx_queue_get(struct edma_adapter *adapter,
1109 struct sk_buff *skb, int txq_id)
1110{
1111 /* skb->priority is used as an index to skb priority table
1112 * and based on packet priority, correspong queue is assigned.
1113 */
1114 return adapter->tx_start_offset[txq_id] + edma_skb_priority_offset(skb);
1115}
1116
1117/* edma_tx_update_hw_idx()
1118 * update the producer index for the ring transmitted
1119 */
1120static void edma_tx_update_hw_idx(struct edma_common_info *edma_cinfo,
1121 struct sk_buff *skb, int queue_id)
1122{
1123 struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
1124 u32 tpd_idx_data;
1125
1126 /* Read and update the producer index */
1127 edma_read_reg(EDMA_REG_TPD_IDX_Q(queue_id), &tpd_idx_data);
1128 tpd_idx_data &= ~EDMA_TPD_PROD_IDX_BITS;
1129 tpd_idx_data |= (etdr->sw_next_to_fill & EDMA_TPD_PROD_IDX_MASK)
1130 << EDMA_TPD_PROD_IDX_SHIFT;
1131
1132 edma_write_reg(EDMA_REG_TPD_IDX_Q(queue_id), tpd_idx_data);
1133}
1134
1135/* edma_rollback_tx()
1136 * Function to retrieve tx resources in case of error
1137 */
1138static void edma_rollback_tx(struct edma_adapter *adapter,
1139 struct edma_tx_desc *start_tpd, int queue_id)
1140{
1141 struct edma_tx_desc_ring *etdr = adapter->edma_cinfo->tpd_ring[queue_id];
1142 struct edma_sw_desc *sw_desc;
1143 struct edma_tx_desc *tpd = NULL;
1144 u16 start_index, index;
1145
1146 start_index = start_tpd - (struct edma_tx_desc *)(etdr->hw_desc);
1147
1148 index = start_index;
1149 while (index != etdr->sw_next_to_fill) {
1150 tpd = (&((struct edma_tx_desc *)(etdr->hw_desc))[index]);
1151 sw_desc = &etdr->sw_desc[index];
1152 edma_tx_unmap_and_free(adapter->pdev, sw_desc);
1153 memset(tpd, 0, sizeof(struct edma_tx_desc));
1154 if (++index == etdr->count)
1155 index = 0;
1156 }
1157 etdr->sw_next_to_fill = start_index;
1158}
1159
Rakesh Nair7e053532017-08-18 17:53:25 +05301160/* edma_get_v4_precedence()
1161 * Function to retrieve precedence for IPv4
1162 */
1163static inline int edma_get_v4_precedence(struct sk_buff *skb, int nh_offset, u8 *precedence)
1164{
1165 const struct iphdr *iph;
1166 struct iphdr iph_hdr;
1167
1168 iph = skb_header_pointer(skb, nh_offset, sizeof(iph_hdr), &iph_hdr);
1169
1170 if (!iph || iph->ihl < 5)
1171 return -1;
1172
1173 *precedence = iph->tos >> EDMA_DSCP_PREC_SHIFT;
1174
1175 return 0;
1176}
1177
1178/* edma_get_v6_precedence()
1179 * Function to retrieve precedence for IPv6
1180 */
1181static inline int edma_get_v6_precedence(struct sk_buff *skb, int nh_offset, u8 *precedence)
1182{
1183 const struct ipv6hdr *iph;
1184 struct ipv6hdr iph_hdr;
1185
1186 iph = skb_header_pointer(skb, nh_offset, sizeof(iph_hdr), &iph_hdr);
1187
1188 if (!iph)
1189 return -1;
1190
1191 *precedence = iph->priority >> EDMA_DSCP6_PREC_SHIFT;
1192
1193 return 0;
1194}
1195
1196/* edma_get_skb_precedence()
1197 * Function to retrieve precedence from skb
1198 */
1199static int edma_get_skb_precedence(struct sk_buff *skb, u8 *precedence)
1200{
1201 int nhoff = skb_network_offset(skb);
1202 __be16 proto = skb->protocol;
1203 int ret;
1204 struct pppoeh_proto *pppoeh, ppp_hdr;
1205
1206 switch(proto) {
1207 case __constant_htons(ETH_P_IP): {
1208 ret = edma_get_v4_precedence(skb, nhoff, precedence);
1209 if (ret)
1210 return -1;
1211 break;
1212 }
1213 case __constant_htons(ETH_P_IPV6): {
1214 ret = edma_get_v6_precedence(skb, nhoff, precedence);
1215 if (ret)
1216 return -1;
1217 break;
1218 }
1219 case __constant_htons(ETH_P_PPP_SES): {
1220 pppoeh = skb_header_pointer(skb, nhoff, sizeof(ppp_hdr), &ppp_hdr);
1221 if (!pppoeh)
1222 return -1;
1223
1224 proto = pppoeh->proto;
1225 nhoff += PPPOE_SES_HLEN;
1226 switch (proto) {
1227 case __constant_htons(PPP_IP): {
1228 ret = edma_get_v4_precedence(skb, nhoff, precedence);
1229 if (ret)
1230 return -1;
1231 break;
1232 }
1233 case __constant_htons(PPP_IPV6): {
1234 ret = edma_get_v6_precedence(skb, nhoff, precedence);
1235 if (ret)
1236 return -1;
1237 break;
1238 }
1239 default:
1240 return -1;
1241 }
1242 break;
1243 }
1244 default:
1245 return -1;
1246 }
1247
1248 return 0;
1249}
1250
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301251/* edma_tx_map_and_fill()
1252 * gets called from edma_xmit_frame
1253 *
1254 * This is where the dma of the buffer to be transmitted
1255 * gets mapped
1256 */
1257static int edma_tx_map_and_fill(struct edma_common_info *edma_cinfo,
1258 struct edma_adapter *adapter,
1259 struct sk_buff *skb, int queue_id,
1260 unsigned int flags_transmit,
1261 u16 from_cpu, u16 dp_bitmap,
1262 bool packet_is_rstp, int nr_frags)
1263{
1264 struct edma_sw_desc *sw_desc = NULL;
1265 struct platform_device *pdev = edma_cinfo->pdev;
1266 struct edma_tx_desc *tpd = NULL;
1267 struct edma_tx_desc *start_tpd = NULL;
1268 struct sk_buff *iter_skb;
1269 int i;
1270 u32 word1 = 0, word3 = 0, lso_word1 = 0, svlan_tag = 0;
1271 u16 buf_len, lso_desc_len = 0;
1272
1273 if (skb_is_gso(skb)) {
1274 /* TODO: What additional checks need to be performed here */
1275 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
1276 lso_word1 |= EDMA_TPD_IPV4_EN;
1277 ip_hdr(skb)->check = 0;
1278 tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1279 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
1280 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
1281 lso_word1 |= EDMA_TPD_LSO_V2_EN;
1282 ipv6_hdr(skb)->payload_len = 0;
1283 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1284 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
1285 } else
1286 return -EINVAL;
1287
1288 lso_word1 |= EDMA_TPD_LSO_EN | ((skb_shinfo(skb)->gso_size & EDMA_TPD_MSS_MASK) << EDMA_TPD_MSS_SHIFT) |
1289 (skb_transport_offset(skb) << EDMA_TPD_HDR_SHIFT);
1290 } else if (flags_transmit & EDMA_HW_CHECKSUM) {
1291 u8 css, cso;
1292 cso = skb_checksum_start_offset(skb);
1293 css = cso + skb->csum_offset;
1294
1295 word1 |= (EDMA_TPD_CUSTOM_CSUM_EN);
1296 word1 |= (cso >> 1) << EDMA_TPD_HDR_SHIFT;
1297 word1 |= ((css >> 1) << EDMA_TPD_CUSTOM_CSUM_SHIFT);
1298 }
1299
1300 if (skb->protocol == htons(ETH_P_PPP_SES))
1301 word1 |= EDMA_TPD_PPPOE_EN;
1302
1303 if (flags_transmit & EDMA_VLAN_TX_TAG_INSERT_FLAG) {
1304 switch (skb->vlan_proto) {
1305 case htons(ETH_P_8021Q):
1306 word3 |= (1 << EDMA_TX_INS_CVLAN);
1307#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21))
1308 word3 |= vlan_tx_tag_get(skb) << EDMA_TX_CVLAN_TAG_SHIFT;
1309#else
1310 word3 |= skb_vlan_tag_get(skb) << EDMA_TX_CVLAN_TAG_SHIFT;
1311#endif
1312 break;
1313 case htons(ETH_P_8021AD):
1314 word1 |= (1 << EDMA_TX_INS_SVLAN);
1315#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21))
1316 svlan_tag = vlan_tx_tag_get(skb) << EDMA_TX_SVLAN_TAG_SHIFT;
1317#else
1318 svlan_tag = skb_vlan_tag_get(skb) << EDMA_TX_SVLAN_TAG_SHIFT;
1319#endif
1320 break;
1321 default:
1322 dev_err(&pdev->dev, "no ctag or stag present\n");
1323 goto vlan_tag_error;
1324 }
1325 } else if (flags_transmit & EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG) {
1326 word3 |= (1 << EDMA_TX_INS_CVLAN);
1327 word3 |= (adapter->default_vlan_tag) << EDMA_TX_CVLAN_TAG_SHIFT;
1328 }
1329
1330 if (packet_is_rstp) {
1331 word3 |= dp_bitmap << EDMA_TPD_PORT_BITMAP_SHIFT;
1332 word3 |= from_cpu << EDMA_TPD_FROM_CPU_SHIFT;
1333 } else {
1334 word3 |= adapter->dp_bitmap << EDMA_TPD_PORT_BITMAP_SHIFT;
1335 }
1336
1337 buf_len = skb_headlen(skb);
1338
1339 if (lso_word1) {
1340 if (lso_word1 & EDMA_TPD_LSO_V2_EN) {
1341
1342 /* IPv6 LSOv2 descriptor */
1343 start_tpd = tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1344 sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1345 sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_NONE;
1346
1347 /* LSOv2 descriptor overrides addr field to pass length */
1348 tpd->addr = cpu_to_le16(skb->len);
1349 tpd->svlan_tag = svlan_tag;
1350 tpd->word1 = word1 | lso_word1;
1351 tpd->word3 = word3;
1352 }
1353
1354 tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1355 if (!start_tpd)
1356 start_tpd = tpd;
1357 sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1358
1359 /* The last buffer info contain the skb address,
1360 * so skb will be freed after unmap
1361 */
1362 sw_desc->length = lso_desc_len;
1363 sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_HEAD;
1364
1365 sw_desc->dma = dma_map_single(&adapter->pdev->dev,
1366 skb->data, buf_len, DMA_TO_DEVICE);
1367 if (dma_mapping_error(&pdev->dev, sw_desc->dma))
1368 goto dma_error;
1369
1370 tpd->addr = cpu_to_le32(sw_desc->dma);
1371 tpd->len = cpu_to_le16(buf_len);
1372
1373 tpd->svlan_tag = svlan_tag;
1374 tpd->word1 = word1 | lso_word1;
1375 tpd->word3 = word3;
1376
1377 /* The last buffer info contain the skb address,
1378 * so it will be freed after unmap
1379 */
1380 sw_desc->length = lso_desc_len;
1381 sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_HEAD;
1382
1383 buf_len = 0;
1384 }
1385
1386 if (likely(buf_len)) {
1387
1388 /* TODO Do not dequeue descriptor if there is a potential error */
1389 tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1390
1391 if (!start_tpd)
1392 start_tpd = tpd;
1393
1394 sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1395
1396 /* The last buffer info contain the skb address,
1397 * so it will be free after unmap
1398 */
1399 sw_desc->length = buf_len;
1400 sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_HEAD;
1401 sw_desc->dma = dma_map_single(&adapter->pdev->dev,
1402 skb->data, buf_len, DMA_TO_DEVICE);
1403 if (dma_mapping_error(&pdev->dev, sw_desc->dma))
1404 goto dma_error;
1405
1406 tpd->addr = cpu_to_le32(sw_desc->dma);
1407 tpd->len = cpu_to_le16(buf_len);
1408
1409 tpd->svlan_tag = svlan_tag;
1410 tpd->word1 = word1 | lso_word1;
1411 tpd->word3 = word3;
1412 }
1413
1414 i = 0;
1415
1416 /* Walk through paged frags for head skb */
1417 while (nr_frags--) {
1418 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1419 buf_len = skb_frag_size(frag);
1420 tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1421 sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1422 sw_desc->length = buf_len;
1423 sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_FRAG;
1424
1425 sw_desc->dma = skb_frag_dma_map(&pdev->dev, frag, 0, buf_len, DMA_TO_DEVICE);
1426
1427 if (dma_mapping_error(NULL, sw_desc->dma))
1428 goto dma_error;
1429
1430 tpd->addr = cpu_to_le32(sw_desc->dma);
1431 tpd->len = cpu_to_le16(buf_len);
1432
1433 tpd->svlan_tag = svlan_tag;
1434 tpd->word1 = word1 | lso_word1;
1435 tpd->word3 = word3;
1436 i++;
1437 }
1438
1439 /* Walk through all fraglist skbs */
1440 skb_walk_frags(skb, iter_skb) {
1441 buf_len = iter_skb->len;
1442 tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1443 sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1444 sw_desc->length = buf_len;
1445 sw_desc->dma = dma_map_single(&adapter->pdev->dev,
1446 iter_skb->data, buf_len, DMA_TO_DEVICE);
1447
1448 if (dma_mapping_error(NULL, sw_desc->dma))
1449 goto dma_error;
1450
1451 tpd->addr = cpu_to_le32(sw_desc->dma);
1452 tpd->len = cpu_to_le16(buf_len);
1453 tpd->svlan_tag = svlan_tag;
1454 tpd->word1 = word1 | lso_word1;
1455 tpd->word3 = word3;
1456 sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_FRAGLIST;
1457
1458 i = 0;
1459
1460 nr_frags = skb_shinfo(iter_skb)->nr_frags;
1461
1462 /* Walk through paged frags for this fraglist skb */
1463 while (nr_frags--) {
1464 skb_frag_t *frag = &skb_shinfo(iter_skb)->frags[i];
1465 buf_len = skb_frag_size(frag);
1466 tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1467 sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1468 sw_desc->length = buf_len;
1469 sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_FRAG;
1470
1471 sw_desc->dma = skb_frag_dma_map(&pdev->dev, frag,
1472 0, buf_len, DMA_TO_DEVICE);
1473 if (dma_mapping_error(NULL, sw_desc->dma))
1474 goto dma_error;
1475
1476 tpd->addr = cpu_to_le32(sw_desc->dma);
1477 tpd->len = cpu_to_le16(buf_len);
1478 tpd->svlan_tag = svlan_tag;
1479 tpd->word1 = word1 | lso_word1;
1480 tpd->word3 = word3;
1481 i++;
1482 }
1483 }
1484
Rakesh Nair888af952017-06-30 18:41:58 +05301485 /* If sysctl support for per-precedence stats are enabled */
1486 if (edma_per_prec_stats_enable) {
Rakesh Nair7e053532017-08-18 17:53:25 +05301487 uint8_t precedence = 0;
Rakesh Nair888af952017-06-30 18:41:58 +05301488
Rakesh Nair7e053532017-08-18 17:53:25 +05301489 if(!edma_get_skb_precedence(skb, &precedence)) {
Rakesh Nair888af952017-06-30 18:41:58 +05301490 /* Increment per-precedence counters for tx packets
1491 * and set the precedence in the TPD.
1492 */
1493 edma_cinfo->edma_ethstats.tx_prec[precedence]++;
1494 edma_cinfo->edma_ethstats.tx_ac[edma_dscp2ac_tbl[precedence]]++;
Rakesh Nairdadf1fb2017-09-07 11:58:28 +05301495 if (tpd)
1496 tpd->word3 |= precedence << EDMA_TPD_PRIO_SHIFT;
Rakesh Nair888af952017-06-30 18:41:58 +05301497 }
Rakesh Nair1c6a18c2017-08-02 21:27:06 +05301498
1499 /* If sysctl support for IAD stats are enabled */
1500 if (edma_iad_stats_enable) {
1501 if (edma_dscp2ac_tbl[precedence] == EDMA_AC_VI)
1502 edma_iad_process_flow(edma_cinfo, skb, EDMA_EGRESS_DIR, precedence);
1503 }
Rakesh Nair888af952017-06-30 18:41:58 +05301504 }
1505
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301506 /* If tpd or sw_desc is still unitiialized then we need to return */
1507 if ((!tpd) || (!sw_desc))
1508 return -EINVAL;
1509
1510 tpd->word1 |= 1 << EDMA_TPD_EOP_SHIFT;
1511
1512 sw_desc->skb = skb;
1513 sw_desc->flags |= EDMA_SW_DESC_FLAG_LAST;
1514
1515 return 0;
1516
1517dma_error:
1518 edma_rollback_tx(adapter, start_tpd, queue_id);
1519 dev_err(&pdev->dev, "TX DMA map failed\n");
1520vlan_tag_error:
1521 return -ENOMEM;
1522}
1523
1524/* edma_check_link()
1525 * check Link status
1526 */
1527static int edma_check_link(struct edma_adapter *adapter)
1528{
1529 struct phy_device *phydev = adapter->phydev;
1530
1531 if (!(adapter->poll_required))
1532 return __EDMA_LINKUP;
1533
1534 if (phydev->link)
1535 return __EDMA_LINKUP;
1536
1537 return __EDMA_LINKDOWN;
1538}
1539
1540/* edma_adjust_link()
1541 * check for edma link status
1542 */
1543void edma_adjust_link(struct net_device *netdev)
1544{
1545 int status;
1546 struct edma_adapter *adapter = netdev_priv(netdev);
1547 struct phy_device *phydev = adapter->phydev;
1548
1549 if (!test_bit(__EDMA_UP, &adapter->state_flags))
1550 return;
1551
1552 status = edma_check_link(adapter);
1553
1554 if (status == __EDMA_LINKUP && adapter->link_state == __EDMA_LINKDOWN) {
1555 dev_info(&adapter->pdev->dev, "%s: GMAC Link is up with phy_speed=%d\n", netdev->name, phydev->speed);
1556 adapter->link_state = __EDMA_LINKUP;
1557 netif_carrier_on(netdev);
1558 if (netif_running(netdev))
1559 netif_tx_wake_all_queues(netdev);
1560 } else if (status == __EDMA_LINKDOWN && adapter->link_state == __EDMA_LINKUP) {
1561 dev_info(&adapter->pdev->dev, "%s: GMAC Link is down\n", netdev->name);
1562 adapter->link_state = __EDMA_LINKDOWN;
1563 netif_carrier_off(netdev);
1564 netif_tx_stop_all_queues(netdev);
1565 }
1566}
1567
Bhaskar Valabojue429bab2017-03-15 09:01:23 +05301568/* edma_get_stats64()
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301569 * Statistics api used to retreive the tx/rx statistics
1570 */
Bhaskar Valabojue429bab2017-03-15 09:01:23 +05301571struct rtnl_link_stats64 *edma_get_stats64(struct net_device *netdev,
1572 struct rtnl_link_stats64 *stats)
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301573{
1574 struct edma_adapter *adapter = netdev_priv(netdev);
1575
Bhaskar Valabojue429bab2017-03-15 09:01:23 +05301576 memcpy(stats, &adapter->stats, sizeof(*stats));
1577
1578 return stats;
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301579}
1580
1581/* edma_xmit()
1582 * Main api to be called by the core for packet transmission
1583 */
1584netdev_tx_t edma_xmit(struct sk_buff *skb,
1585 struct net_device *net_dev)
1586{
1587 struct edma_adapter *adapter = netdev_priv(net_dev);
1588 struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
1589 struct edma_tx_desc_ring *etdr;
1590 u16 from_cpu = 0, dp_bitmap = 0, txq_id;
1591 int ret, nr_frags_first = 0, num_tpds_needed = 1, queue_id = 0;
1592 unsigned int flags_transmit = 0;
1593 bool packet_is_rstp = false;
1594 struct netdev_queue *nq = NULL;
1595
1596 if (skb_shinfo(skb)->nr_frags) {
1597 nr_frags_first = skb_shinfo(skb)->nr_frags;
1598
1599 /* It is unlikely below check hits, BUG_ON */
1600 BUG_ON(nr_frags_first > MAX_SKB_FRAGS);
1601
1602 num_tpds_needed += nr_frags_first;
1603 }
1604
1605 if (skb_has_frag_list(skb)) {
1606 struct sk_buff *iter_skb;
1607
1608 /* Walk through fraglist skbs making a note of nr_frags */
1609 skb_walk_frags(skb, iter_skb) {
1610 unsigned char nr_frags = skb_shinfo(iter_skb)->nr_frags;
1611
1612 /* It is unlikely below check hits, BUG_ON */
1613 BUG_ON(nr_frags > MAX_SKB_FRAGS);
1614
1615 /* One TPD for skb->data and more for nr_frags */
1616 num_tpds_needed += (1 + nr_frags);
1617 }
1618 }
1619
1620 if (edma_stp_rstp) {
1621 u16 ath_hdr, ath_eth_type;
1622 u8 mac_addr[EDMA_ETH_HDR_LEN];
1623 ath_eth_type = ntohs(*(uint16_t *)&skb->data[12]);
1624 if (ath_eth_type == edma_ath_eth_type) {
1625 packet_is_rstp = true;
1626 ath_hdr = htons(*(uint16_t *)&skb->data[14]);
1627 dp_bitmap = ath_hdr & EDMA_TX_ATH_HDR_PORT_BITMAP_MASK;
1628 from_cpu = (ath_hdr & EDMA_TX_ATH_HDR_FROM_CPU_MASK) >> EDMA_TX_ATH_HDR_FROM_CPU_SHIFT;
1629 memcpy(mac_addr, skb->data, EDMA_ETH_HDR_LEN);
1630
1631 skb_pull(skb, 4);
1632
1633 memcpy(skb->data, mac_addr, EDMA_ETH_HDR_LEN);
1634 }
1635 }
1636
1637 /* this will be one of the 4 TX queues exposed to linux kernel */
1638 txq_id = skb_get_queue_mapping(skb);
1639 queue_id = edma_tx_queue_get(adapter, skb, txq_id);
1640 etdr = edma_cinfo->tpd_ring[queue_id];
1641 nq = netdev_get_tx_queue(net_dev, txq_id);
1642
1643 local_bh_disable();
1644 /* Tx is not handled in bottom half context. Hence, we need to protect
1645 * Tx from tasks and bottom half
1646 */
1647
1648 if (num_tpds_needed > edma_tpd_available(edma_cinfo, queue_id)) {
Rakesh Naird4a11502017-11-07 17:02:11 +05301649 if (edma_disable_queue_stop) {
1650 local_bh_enable();
1651 dev_dbg(&net_dev->dev, "Packet dropped as queue is full");
1652 dev_kfree_skb_any(skb);
1653 adapter->stats.tx_errors++;
1654 return NETDEV_TX_OK;
1655 }
1656
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301657 /* not enough descriptor, just stop queue */
1658 netif_tx_stop_queue(nq);
1659 local_bh_enable();
1660 dev_dbg(&net_dev->dev, "Not enough descriptors available");
1661 edma_cinfo->edma_ethstats.tx_desc_error++;
1662 return NETDEV_TX_BUSY;
1663 }
1664
1665 /* Check and mark VLAN tag offload */
1666#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21))
1667 if (vlan_tx_tag_present(skb))
1668#else
1669 if (skb_vlan_tag_present(skb))
1670#endif
1671 flags_transmit |= EDMA_VLAN_TX_TAG_INSERT_FLAG;
1672 else if (adapter->default_vlan_tag)
1673 flags_transmit |= EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG;
1674
1675 /* Check and mark checksum offload */
1676 if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
1677 flags_transmit |= EDMA_HW_CHECKSUM;
1678
1679 /* Map and fill descriptor for Tx */
1680 ret = edma_tx_map_and_fill(edma_cinfo, adapter, skb, queue_id,
1681 flags_transmit, from_cpu, dp_bitmap,
1682 packet_is_rstp, nr_frags_first);
1683 if (ret) {
1684 dev_kfree_skb_any(skb);
1685 adapter->stats.tx_errors++;
1686 goto netdev_okay;
1687 }
1688
1689 /* Update SW producer index */
1690 edma_tx_update_hw_idx(edma_cinfo, skb, queue_id);
1691
1692 /* update tx statistics */
1693 adapter->stats.tx_packets++;
1694 adapter->stats.tx_bytes += skb->len;
1695
1696netdev_okay:
1697 local_bh_enable();
1698 return NETDEV_TX_OK;
1699}
1700
1701/*
1702 * edma_flow_may_expire()
1703 * Timer function called periodically to delete the node
1704 */
1705void edma_flow_may_expire(unsigned long data)
1706{
1707 struct edma_adapter *adapter = (struct edma_adapter *)data;
1708 int j;
1709
1710 spin_lock_bh(&adapter->rfs.rfs_ftab_lock);
1711 for (j = 0; j < EDMA_RFS_EXPIRE_COUNT_PER_CALL; j++) {
1712 struct hlist_head *hhead;
1713 struct hlist_node *tmp;
1714 struct edma_rfs_filter_node *n;
1715 bool res;
1716
1717 hhead = &adapter->rfs.hlist_head[adapter->rfs.hashtoclean++];
1718 hlist_for_each_entry_safe(n, tmp, hhead, node) {
1719 res = rps_may_expire_flow(adapter->netdev, n->rq_id,
1720 n->flow_id, n->filter_id);
1721 if (res) {
1722 res = edma_delete_rfs_filter(adapter, n);
1723 if (res < 0)
1724 dev_dbg(&adapter->netdev->dev,
1725 "RFS entry %d not allowed to be flushed by Switch",
1726 n->flow_id);
1727 else {
1728 hlist_del(&n->node);
1729 kfree(n);
1730 adapter->rfs.filter_available++;
1731 }
1732 }
1733 }
1734 }
1735
1736 adapter->rfs.hashtoclean = adapter->rfs.hashtoclean & (EDMA_RFS_FLOW_ENTRIES - 1);
1737 spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
1738 mod_timer(&adapter->rfs.expire_rfs, jiffies + HZ/4);
1739}
1740
1741/* edma_rx_flow_steer()
1742 * Called by core to to steer the flow to CPU
1743 */
1744int edma_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
1745 u16 rxq, u32 flow_id)
1746{
1747 struct flow_keys keys;
1748 struct edma_rfs_filter_node *filter_node;
1749 struct edma_adapter *adapter = netdev_priv(dev);
1750 u16 hash_tblid;
1751 int res;
1752
1753 if (skb->protocol == htons(ETH_P_IPV6)) {
1754 res = -EPROTONOSUPPORT;
1755 goto no_protocol_err;
1756 }
1757
1758 /* Dissect flow parameters
1759 * We only support IPv4 + TCP/UDP
1760 */
1761#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21))
1762 res = skb_flow_dissect(skb, &keys);
1763 if (!((keys.ip_proto == IPPROTO_TCP) || (keys.ip_proto == IPPROTO_UDP))) {
1764#else
1765 res = skb_flow_dissect_flow_keys(skb, &keys, 0);
1766 if (!((keys.basic.ip_proto == IPPROTO_TCP) || (keys.basic.ip_proto == IPPROTO_UDP))) {
1767#endif
1768 res = -EPROTONOSUPPORT;
1769 goto no_protocol_err;
1770 }
1771
1772 /* Check if table entry exists */
1773 hash_tblid = skb_get_hash_raw(skb) & EDMA_RFS_FLOW_ENTRIES_MASK;
1774
1775 spin_lock_bh(&adapter->rfs.rfs_ftab_lock);
1776 filter_node = edma_rfs_key_search(&adapter->rfs.hlist_head[hash_tblid], &keys);
1777
1778 if (filter_node) {
1779 if (rxq == filter_node->rq_id) {
1780 res = -EEXIST;
1781 goto out;
1782 } else {
1783 res = edma_delete_rfs_filter(adapter, filter_node);
1784 if (res < 0)
1785 dev_warn(&adapter->netdev->dev,
1786 "Cannot steer flow %d to different queue",
1787 filter_node->flow_id);
1788 else {
1789 adapter->rfs.filter_available++;
1790 res = edma_add_rfs_filter(adapter, &keys, rxq, filter_node);
1791 if (res < 0) {
1792 dev_warn(&adapter->netdev->dev,
1793 "Cannot steer flow %d to different queue",
1794 filter_node->flow_id);
1795 } else {
1796 adapter->rfs.filter_available--;
1797 filter_node->rq_id = rxq;
1798 filter_node->filter_id = res;
1799 }
1800 }
1801 }
1802 } else {
1803 if (adapter->rfs.filter_available == 0) {
1804 res = -EBUSY;
1805 goto out;
1806 }
1807
1808 filter_node = kmalloc(sizeof(*filter_node), GFP_ATOMIC);
1809 if (!filter_node) {
1810 res = -ENOMEM;
1811 goto out;
1812 }
1813
1814 res = edma_add_rfs_filter(adapter, &keys, rxq, filter_node);
1815 if (res < 0) {
1816 kfree(filter_node);
1817 goto out;
1818 }
1819
1820 adapter->rfs.filter_available--;
1821 filter_node->rq_id = rxq;
1822 filter_node->filter_id = res;
1823 filter_node->flow_id = flow_id;
1824 filter_node->keys = keys;
1825 INIT_HLIST_NODE(&filter_node->node);
1826 hlist_add_head(&filter_node->node, &adapter->rfs.hlist_head[hash_tblid]);
1827 }
1828
1829out:
1830 spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
1831no_protocol_err:
1832 return res;
1833}
1834
1835#ifdef CONFIG_RFS_ACCEL
1836/* edma_register_rfs_filter()
1837 * Add RFS filter callback
1838 */
1839int edma_register_rfs_filter(struct net_device *netdev,
1840 set_rfs_filter_callback_t set_filter)
1841{
1842 struct edma_adapter *adapter = netdev_priv(netdev);
1843
1844 spin_lock_bh(&adapter->rfs.rfs_ftab_lock);
1845
1846 if (adapter->set_rfs_rule) {
1847 spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
1848 return -1;
1849 }
1850
1851 adapter->set_rfs_rule = set_filter;
1852 spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
1853
1854 return 0;
1855}
1856#endif
1857
1858/* edma_select_xps_queue()
1859 * Called by Linux TX stack to populate Linux TX queue
1860 */
1861u16 edma_select_xps_queue(struct net_device *dev, struct sk_buff *skb,
1862 void *accel_priv, select_queue_fallback_t fallback)
1863{
1864#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21))
1865 return smp_processor_id();
1866#else
1867 int cpu = get_cpu();
1868 put_cpu();
1869
1870 return cpu;
1871#endif
1872}
1873
1874/* edma_alloc_tx_rings()
1875 * Allocate rx rings
1876 */
1877int edma_alloc_tx_rings(struct edma_common_info *edma_cinfo)
1878{
1879 struct platform_device *pdev = edma_cinfo->pdev;
1880 int i, err = 0;
1881
1882 for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
1883 err = edma_alloc_tx_ring(edma_cinfo, edma_cinfo->tpd_ring[i]);
1884 if (err) {
1885 dev_err(&pdev->dev, "Tx Queue alloc %u failed\n", i);
1886 return err;
1887 }
1888 }
1889
1890 return 0;
1891}
1892
1893/* edma_free_tx_rings()
1894 * Free tx rings
1895 */
1896void edma_free_tx_rings(struct edma_common_info *edma_cinfo)
1897{
1898 int i;
1899
1900 for (i = 0; i < edma_cinfo->num_tx_queues; i++)
1901 edma_free_tx_ring(edma_cinfo, edma_cinfo->tpd_ring[i]);
1902}
1903
1904/* edma_free_tx_resources()
1905 * Free buffers associated with tx rings
1906 */
1907void edma_free_tx_resources(struct edma_common_info *edma_cinfo)
1908{
1909 struct edma_tx_desc_ring *etdr;
1910 struct edma_sw_desc *sw_desc;
1911 struct platform_device *pdev = edma_cinfo->pdev;
1912 int i, j;
1913
1914 for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
1915 etdr = edma_cinfo->tpd_ring[i];
Rakesh Nair3a756882017-11-15 12:18:21 +05301916 for (j = 0; j < edma_cinfo->tx_ring_count; j++) {
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301917 sw_desc = &etdr->sw_desc[j];
1918 if (sw_desc->flags & (EDMA_SW_DESC_FLAG_SKB_HEAD |
1919 EDMA_SW_DESC_FLAG_SKB_FRAG | EDMA_SW_DESC_FLAG_SKB_FRAGLIST))
1920 edma_tx_unmap_and_free(pdev, sw_desc);
1921 }
1922 }
1923}
1924
1925/* edma_alloc_rx_rings()
1926 * Allocate rx rings
1927 */
1928int edma_alloc_rx_rings(struct edma_common_info *edma_cinfo)
1929{
1930 struct platform_device *pdev = edma_cinfo->pdev;
1931 int i, j, err = 0;
1932
1933 for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
1934 err = edma_alloc_rx_ring(edma_cinfo, edma_cinfo->rfd_ring[j]);
1935 if (err) {
1936 dev_err(&pdev->dev, "Rx Queue alloc%u failed\n", i);
1937 return err;
1938 }
1939 j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1940 }
1941
1942 return 0;
1943}
1944
1945/* edma_free_rx_rings()
1946 * free rx rings
1947 */
1948void edma_free_rx_rings(struct edma_common_info *edma_cinfo)
1949{
1950 int i, j;
1951
1952 for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
1953 edma_free_rx_ring(edma_cinfo, edma_cinfo->rfd_ring[j]);
1954 j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1955 }
1956}
1957
1958/* edma_free_queues()
1959 * Free the queues allocaated
1960 */
1961void edma_free_queues(struct edma_common_info *edma_cinfo)
1962{
1963 int i , j;
1964
1965 for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
1966 if (edma_cinfo->tpd_ring[i])
1967 kfree(edma_cinfo->tpd_ring[i]);
1968 edma_cinfo->tpd_ring[i] = NULL;
1969 }
1970
1971 for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
1972 if (edma_cinfo->rfd_ring[j])
1973 kfree(edma_cinfo->rfd_ring[j]);
1974 edma_cinfo->rfd_ring[j] = NULL;
1975 j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1976 }
1977
1978 edma_cinfo->num_rx_queues = 0;
1979 edma_cinfo->num_tx_queues = 0;
1980
1981 return;
1982}
1983
1984/* edma_free_rx_resources()
1985 * Free buffers associated with tx rings
1986 */
1987void edma_free_rx_resources(struct edma_common_info *edma_cinfo)
1988{
1989 struct edma_rfd_desc_ring *erdr;
1990 struct platform_device *pdev = edma_cinfo->pdev;
1991 int i, j, k;
1992
1993 for (i = 0, k = 0; i < edma_cinfo->num_rx_queues; i++) {
1994 erdr = edma_cinfo->rfd_ring[k];
Rakesh Nair3a756882017-11-15 12:18:21 +05301995 for (j = 0; j < edma_cinfo->rx_ring_count; j++) {
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301996 /* unmap all descriptors while cleaning */
1997 edma_clean_rfd(pdev, erdr, j, 1);
1998 }
1999 k += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
2000
2001 }
2002}
2003
2004/* edma_alloc_queues_tx()
2005 * Allocate memory for all rings
2006 */
2007int edma_alloc_queues_tx(struct edma_common_info *edma_cinfo)
2008{
2009 int i;
2010
2011 for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
2012 struct edma_tx_desc_ring *etdr;
2013 etdr = kzalloc(sizeof(struct edma_tx_desc_ring), GFP_KERNEL);
2014 if (!etdr)
2015 goto err;
2016 etdr->count = edma_cinfo->tx_ring_count;
2017 edma_cinfo->tpd_ring[i] = etdr;
2018 }
2019
2020 return 0;
2021err:
2022 edma_free_queues(edma_cinfo);
2023 return -1;
2024}
2025
2026/* edma_alloc_queues_rx()
2027 * Allocate memory for all rings
2028 */
2029int edma_alloc_queues_rx(struct edma_common_info *edma_cinfo)
2030{
2031 int i, j;
2032
2033 for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
2034 struct edma_rfd_desc_ring *rfd_ring;
2035 rfd_ring = kzalloc(sizeof(struct edma_rfd_desc_ring),
2036 GFP_KERNEL);
2037 if (!rfd_ring)
2038 goto err;
2039 rfd_ring->count = edma_cinfo->rx_ring_count;
2040 edma_cinfo->rfd_ring[j] = rfd_ring;
2041 j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
2042 }
2043 return 0;
2044err:
2045 edma_free_queues(edma_cinfo);
2046 return -1;
2047}
2048
2049/* edma_clear_irq_status()
2050 * Clear interrupt status
2051 */
2052void edma_clear_irq_status(void)
2053{
2054 edma_write_reg(EDMA_REG_RX_ISR, 0xff);
2055 edma_write_reg(EDMA_REG_TX_ISR, 0xffff);
2056 edma_write_reg(EDMA_REG_MISC_ISR, 0x1fff);
2057 edma_write_reg(EDMA_REG_WOL_ISR, 0x1);
2058};
2059
2060/* edma_configure()
2061 * Configure skb, edma interrupts and control register.
2062 */
2063int edma_configure(struct edma_common_info *edma_cinfo)
2064{
2065 struct edma_hw *hw = &edma_cinfo->hw;
2066 u32 intr_modrt_data;
2067 u32 intr_ctrl_data = 0;
2068 int i, j, ret_count;
2069
2070 edma_read_reg(EDMA_REG_INTR_CTRL, &intr_ctrl_data);
2071 intr_ctrl_data &= ~(1 << EDMA_INTR_SW_IDX_W_TYP_SHIFT);
2072 intr_ctrl_data |= hw->intr_sw_idx_w << EDMA_INTR_SW_IDX_W_TYP_SHIFT;
2073 edma_write_reg(EDMA_REG_INTR_CTRL, intr_ctrl_data);
2074
2075 edma_clear_irq_status();
2076
2077 /* Clear any WOL status */
2078 edma_write_reg(EDMA_REG_WOL_CTRL, 0);
2079 intr_modrt_data = (EDMA_TX_IMT << EDMA_IRQ_MODRT_TX_TIMER_SHIFT);
2080 intr_modrt_data |= (EDMA_RX_IMT << EDMA_IRQ_MODRT_RX_TIMER_SHIFT);
2081 edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, intr_modrt_data);
2082 edma_configure_tx(edma_cinfo);
2083 edma_configure_rx(edma_cinfo);
2084
2085 /* Allocate the RX buffer */
2086 for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
2087 struct edma_rfd_desc_ring *ring = edma_cinfo->rfd_ring[j];
2088 ret_count = edma_alloc_rx_buf(edma_cinfo, ring, ring->count, j);
2089 if (ret_count)
2090 dev_dbg(&edma_cinfo->pdev->dev, "not all rx buffers allocated\n");
Rakesh Nair03b586c2017-04-03 18:28:58 +05302091 ring->pending_fill = ret_count;
Rakesh Nair9bcf2602017-01-06 16:02:16 +05302092 j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
2093 }
2094
2095 /* Configure descriptor Ring */
2096 edma_init_desc(edma_cinfo);
2097 return 0;
2098}
2099
2100/* edma_irq_enable()
2101 * Enable default interrupt generation settings
2102 */
2103void edma_irq_enable(struct edma_common_info *edma_cinfo)
2104{
2105 struct edma_hw *hw = &edma_cinfo->hw;
2106 int i, j;
2107
2108 edma_write_reg(EDMA_REG_RX_ISR, 0xff);
2109 for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
2110 edma_write_reg(EDMA_REG_RX_INT_MASK_Q(j), hw->rx_intr_mask);
2111 j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
2112 }
2113 edma_write_reg(EDMA_REG_TX_ISR, 0xffff);
2114 for (i = 0; i < edma_cinfo->num_tx_queues; i++)
2115 edma_write_reg(EDMA_REG_TX_INT_MASK_Q(i), hw->tx_intr_mask);
2116}
2117
2118/* edma_irq_disable()
2119 * Disable Interrupt
2120 */
2121void edma_irq_disable(struct edma_common_info *edma_cinfo)
2122{
2123 int i;
2124
2125 for (i = 0; i < EDMA_MAX_RECEIVE_QUEUE; i++)
2126 edma_write_reg(EDMA_REG_RX_INT_MASK_Q(i), 0x0);
2127
2128 for (i = 0; i < EDMA_MAX_TRANSMIT_QUEUE; i++)
2129 edma_write_reg(EDMA_REG_TX_INT_MASK_Q(i), 0x0);
2130 edma_write_reg(EDMA_REG_MISC_IMR, 0);
2131 edma_write_reg(EDMA_REG_WOL_IMR, 0);
2132}
2133
2134/* edma_free_irqs()
2135 * Free All IRQs
2136 */
2137void edma_free_irqs(struct edma_adapter *adapter)
2138{
2139 struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
2140 int i, j;
2141 int k = ((edma_cinfo->num_rx_queues == 4) ? 1 : 2);
2142
2143 for (i = 0; i < CONFIG_NR_CPUS; i++) {
Rakesh Nair8016fbd2018-01-03 15:46:06 +05302144 for (j = edma_cinfo->edma_percpu_info[i].tx_comp_start; j < (edma_cinfo->edma_percpu_info[i].tx_comp_start + 4); j++)
Rakesh Nair9bcf2602017-01-06 16:02:16 +05302145 free_irq(edma_cinfo->tx_irq[j], &edma_cinfo->edma_percpu_info[i]);
2146
2147 for (j = edma_cinfo->edma_percpu_info[i].rx_start; j < (edma_cinfo->edma_percpu_info[i].rx_start + k); j++)
2148 free_irq(edma_cinfo->rx_irq[j], &edma_cinfo->edma_percpu_info[i]);
2149 }
2150}
2151
2152/* edma_enable_rx_ctrl()
2153 * Enable RX queue control
2154 */
2155void edma_enable_rx_ctrl(struct edma_hw *hw)
2156{
2157 u32 data;
2158
2159 edma_read_reg(EDMA_REG_RXQ_CTRL, &data);
2160 data |= EDMA_RXQ_CTRL_EN;
2161 edma_write_reg(EDMA_REG_RXQ_CTRL, data);
2162}
2163
2164
2165/* edma_enable_tx_ctrl()
2166 * Enable TX queue control
2167 */
2168void edma_enable_tx_ctrl(struct edma_hw *hw)
2169{
2170 u32 data;
2171
2172 edma_read_reg(EDMA_REG_TXQ_CTRL, &data);
2173 data |= EDMA_TXQ_CTRL_TXQ_EN;
2174 edma_write_reg(EDMA_REG_TXQ_CTRL, data);
2175}
2176
2177/* edma_stop_rx_tx()
2178 * Disable RX/TQ Queue control
2179 */
2180void edma_stop_rx_tx(struct edma_hw *hw)
2181{
2182 u32 data;
2183
2184 edma_read_reg(EDMA_REG_RXQ_CTRL, &data);
2185 data &= ~EDMA_RXQ_CTRL_EN;
2186 edma_write_reg(EDMA_REG_RXQ_CTRL, data);
2187 edma_read_reg(EDMA_REG_TXQ_CTRL, &data);
2188 data &= ~EDMA_TXQ_CTRL_TXQ_EN;
2189 edma_write_reg(EDMA_REG_TXQ_CTRL, data);
2190}
2191
2192/* edma_reset()
2193 * Reset the EDMA
2194 */
2195int edma_reset(struct edma_common_info *edma_cinfo)
2196{
2197 struct edma_hw *hw = &edma_cinfo->hw;
2198
2199 edma_irq_disable(edma_cinfo);
2200
2201 edma_clear_irq_status();
2202
2203 edma_stop_rx_tx(hw);
2204
2205 return 0;
2206}
2207
2208/* edma_fill_netdev()
2209 * Fill netdev for each etdr
2210 */
2211int edma_fill_netdev(struct edma_common_info *edma_cinfo, int queue_id,
2212 int dev, int txq_id)
2213{
2214 struct edma_tx_desc_ring *etdr;
2215 int i = 0;
2216
2217 etdr = edma_cinfo->tpd_ring[queue_id];
2218
2219 while (etdr->netdev[i])
2220 i++;
2221
2222 if (i >= EDMA_MAX_NETDEV_PER_QUEUE)
2223 return -1;
2224
2225 /* Populate the netdev associated with the tpd ring */
2226 etdr->netdev[i] = edma_netdev[dev];
2227 etdr->nq[i] = netdev_get_tx_queue(edma_netdev[dev], txq_id);
2228
2229 return 0;
2230}
2231
2232/* edma_change_mtu()
2233 * change the MTU of the NIC.
2234 */
2235int edma_change_mtu(struct net_device *netdev, int new_mtu)
2236{
2237 struct edma_adapter *adapter = netdev_priv(netdev);
2238 struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
2239 int old_mtu = netdev->mtu;
2240 int max_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + (2 * VLAN_HLEN);
2241
2242 if ((max_frame_size < ETH_ZLEN + ETH_FCS_LEN) ||
2243 (max_frame_size > EDMA_MAX_JUMBO_FRAME_SIZE)) {
2244 dev_err(&edma_cinfo->pdev->dev, "MTU setting not correct\n");
2245 return -EINVAL;
2246 }
2247
2248 /* set MTU */
2249 if (old_mtu != new_mtu) {
2250 netdev->mtu = new_mtu;
2251 netdev_update_features(netdev);
2252 }
2253
2254 return 0;
2255}
2256
2257/* edma_set_mac()
2258 * Change the Ethernet Address of the NIC
2259 */
2260int edma_set_mac_addr(struct net_device *netdev, void *p)
2261{
2262 struct sockaddr *addr = p;
2263
2264 if (!is_valid_ether_addr(addr->sa_data))
2265 return -EINVAL;
2266
2267 if (netif_running(netdev))
2268 return -EBUSY;
2269
2270 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2271 return 0;
2272}
2273
2274/* edma_set_stp_rstp()
2275 * set stp/rstp
2276 */
2277void edma_set_stp_rstp(bool rstp)
2278{
2279 edma_stp_rstp = rstp;
2280}
2281
2282/* edma_assign_ath_hdr_type()
2283 * assign atheros header eth type
2284 */
2285void edma_assign_ath_hdr_type(int eth_type)
2286{
2287 edma_ath_eth_type = eth_type & EDMA_ETH_TYPE_MASK;
2288}
2289
2290/* edma_get_default_vlan_tag()
2291 * Used by other modules to get the default vlan tag
2292 */
2293int edma_get_default_vlan_tag(struct net_device *netdev)
2294{
2295 struct edma_adapter *adapter = netdev_priv(netdev);
2296
2297 if (adapter->default_vlan_tag)
2298 return adapter->default_vlan_tag;
2299
2300 return 0;
2301}
2302
2303/* edma_open()
2304 * gets called when netdevice is up, start the queue.
2305 */
2306int edma_open(struct net_device *netdev)
2307{
2308 struct edma_adapter *adapter = netdev_priv(netdev);
2309 struct platform_device *pdev = adapter->edma_cinfo->pdev;
2310
2311 netif_tx_start_all_queues(netdev);
2312 edma_initialise_rfs_flow_table(adapter);
2313 set_bit(__EDMA_UP, &adapter->state_flags);
2314
2315 /* if Link polling is enabled, in our case enabled for WAN, then
2316 * do a phy start, else always set link as UP
2317 */
Rakesh Naired29f6b2017-04-04 15:48:08 +05302318 mutex_lock(&adapter->poll_mutex);
Rakesh Nair9bcf2602017-01-06 16:02:16 +05302319 if (adapter->poll_required) {
2320 if (!IS_ERR(adapter->phydev)) {
2321 phy_start(adapter->phydev);
2322 phy_start_aneg(adapter->phydev);
2323 adapter->link_state = __EDMA_LINKDOWN;
2324 } else {
2325 dev_dbg(&pdev->dev, "Invalid PHY device for a link polled interface\n");
2326 }
2327 } else {
2328 adapter->link_state = __EDMA_LINKUP;
2329 netif_carrier_on(netdev);
2330 }
Rakesh Naired29f6b2017-04-04 15:48:08 +05302331 mutex_unlock(&adapter->poll_mutex);
Rakesh Nair9bcf2602017-01-06 16:02:16 +05302332
2333 return 0;
2334}
2335
2336
2337/* edma_close()
2338 * gets called when netdevice is down, stops the queue.
2339 */
2340int edma_close(struct net_device *netdev)
2341{
2342 struct edma_adapter *adapter = netdev_priv(netdev);
2343
2344 edma_free_rfs_flow_table(adapter);
2345 netif_carrier_off(netdev);
2346 netif_tx_stop_all_queues(netdev);
2347
Rakesh Naired29f6b2017-04-04 15:48:08 +05302348 mutex_lock(&adapter->poll_mutex);
Rakesh Nair9bcf2602017-01-06 16:02:16 +05302349 if (adapter->poll_required) {
2350 if (!IS_ERR(adapter->phydev))
2351 phy_stop(adapter->phydev);
2352 }
Rakesh Naired29f6b2017-04-04 15:48:08 +05302353 mutex_unlock(&adapter->poll_mutex);
Rakesh Nair9bcf2602017-01-06 16:02:16 +05302354
2355 adapter->link_state = __EDMA_LINKDOWN;
2356
2357 /* Set GMAC state to UP before link state is checked
2358 */
2359 clear_bit(__EDMA_UP, &adapter->state_flags);
2360
2361 return 0;
2362}
2363
2364/* edma_poll
2365 * polling function that gets called when the napi gets scheduled.
2366 *
2367 * Main sequence of task performed in this api
2368 * is clear irq status -> clear_tx_irq -> clean_rx_irq->
2369 * enable interrupts.
2370 */
2371int edma_poll(struct napi_struct *napi, int budget)
2372{
2373 struct edma_per_cpu_queues_info *edma_percpu_info = container_of(napi,
2374 struct edma_per_cpu_queues_info, napi);
2375 struct edma_common_info *edma_cinfo = edma_percpu_info->edma_cinfo;
2376 u32 reg_data;
2377 u32 shadow_rx_status, shadow_tx_status;
2378 int queue_id;
2379 int i, work_done = 0;
Rakesh Nair03b586c2017-04-03 18:28:58 +05302380 u16 rx_pending_fill;
Rakesh Nair9bcf2602017-01-06 16:02:16 +05302381
2382 /* Store the Rx/Tx status by ANDing it with
2383 * appropriate CPU RX?TX mask
2384 */
2385 edma_read_reg(EDMA_REG_RX_ISR, &reg_data);
2386 edma_percpu_info->rx_status |= reg_data & edma_percpu_info->rx_mask;
2387 shadow_rx_status = edma_percpu_info->rx_status;
2388 edma_read_reg(EDMA_REG_TX_ISR, &reg_data);
2389 edma_percpu_info->tx_status |= reg_data & edma_percpu_info->tx_mask;
2390 shadow_tx_status = edma_percpu_info->tx_status;
2391
2392 /* Every core will have a start, which will be computed
2393 * in probe and stored in edma_percpu_info->tx_start variable.
2394 * We will shift the status bit by tx_start to obtain
2395 * status bits for the core on which the current processing
2396 * is happening. Since, there are 4 tx queues per core,
2397 * we will run the loop till we get the correct queue to clear.
2398 */
2399 while (edma_percpu_info->tx_status) {
2400 queue_id = ffs(edma_percpu_info->tx_status) - 1;
2401 edma_tx_complete(edma_cinfo, queue_id);
2402 edma_percpu_info->tx_status &= ~(1 << queue_id);
2403 }
2404
2405 /* Every core will have a start, which will be computed
2406 * in probe and stored in edma_percpu_info->tx_start variable.
2407 * We will shift the status bit by tx_start to obtain
2408 * status bits for the core on which the current processing
2409 * is happening. Since, there are 4 tx queues per core, we
2410 * will run the loop till we get the correct queue to clear.
2411 */
2412 while (edma_percpu_info->rx_status) {
2413 queue_id = ffs(edma_percpu_info->rx_status) - 1;
Rakesh Nair03b586c2017-04-03 18:28:58 +05302414 rx_pending_fill = edma_rx_complete(edma_cinfo, &work_done,
Rakesh Nair9bcf2602017-01-06 16:02:16 +05302415 budget, queue_id, napi);
2416
Rakesh Nair03b586c2017-04-03 18:28:58 +05302417 if (likely(work_done < budget)) {
2418 if (rx_pending_fill) {
2419 work_done = budget;
2420 break;
2421 }
2422
Rakesh Nair9bcf2602017-01-06 16:02:16 +05302423 edma_percpu_info->rx_status &= ~(1 << queue_id);
Rakesh Nair03b586c2017-04-03 18:28:58 +05302424 }
Rakesh Nair9bcf2602017-01-06 16:02:16 +05302425 else
2426 break;
2427 }
2428
2429 /* Clear the status register, to avoid the interrupts to
2430 * reoccur.This clearing of interrupt status register is
2431 * done here as writing to status register only takes place
2432 * once the producer/consumer index has been updated to
2433 * reflect that the packet transmission/reception went fine.
2434 */
2435 edma_write_reg(EDMA_REG_RX_ISR, shadow_rx_status);
2436 edma_write_reg(EDMA_REG_TX_ISR, shadow_tx_status);
2437
2438 /* If budget not fully consumed, exit the polling mode */
2439 if (likely(work_done < budget)) {
2440 napi_complete(napi);
2441
2442 /* re-enable the interrupts */
2443 for (i = 0; i < edma_cinfo->num_rxq_per_core; i++)
2444 edma_write_reg(EDMA_REG_RX_INT_MASK_Q(edma_percpu_info->rx_start + i), 0x1);
2445 for (i = 0; i < edma_cinfo->num_txq_per_core; i++)
Rakesh Nair8016fbd2018-01-03 15:46:06 +05302446 edma_write_reg(EDMA_REG_TX_INT_MASK_Q(edma_percpu_info->tx_comp_start + i), 0x1);
Rakesh Nair9bcf2602017-01-06 16:02:16 +05302447 }
2448
2449 return work_done;
2450}
2451
2452/* edma interrupt()
2453 * interrupt handler
2454 */
2455irqreturn_t edma_interrupt(int irq, void *dev)
2456{
2457 struct edma_per_cpu_queues_info *edma_percpu_info = (struct edma_per_cpu_queues_info *) dev;
2458 struct edma_common_info *edma_cinfo = edma_percpu_info->edma_cinfo;
2459 int i;
2460
2461 /* Unmask the TX/RX interrupt register */
2462 for (i = 0; i < edma_cinfo->num_rxq_per_core; i++)
2463 edma_write_reg(EDMA_REG_RX_INT_MASK_Q(edma_percpu_info->rx_start + i), 0x0);
2464
2465 for (i = 0; i < edma_cinfo->num_txq_per_core; i++)
Rakesh Nair8016fbd2018-01-03 15:46:06 +05302466 edma_write_reg(EDMA_REG_TX_INT_MASK_Q(edma_percpu_info->tx_comp_start + i), 0x0);
Rakesh Nair9bcf2602017-01-06 16:02:16 +05302467
2468 napi_schedule(&edma_percpu_info->napi);
2469
2470 return IRQ_HANDLED;
2471}