blob: 0285de3909348eb60547495bf7f6e999d5d8792d [file] [log] [blame]
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301/*
2 * Copyright (c) 2014 - 2017, The Linux Foundation. All rights reserved.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14 */
15
16#include <linux/platform_device.h>
17#include <linux/if_vlan.h>
Rakesh Nair888af952017-06-30 18:41:58 +053018#include <linux/kernel.h>
Rakesh Nair9bcf2602017-01-06 16:02:16 +053019#include "ess_edma.h"
20#include "edma.h"
21
22extern struct net_device *edma_netdev[EDMA_MAX_PORTID_SUPPORTED];
23bool edma_stp_rstp;
24u16 edma_ath_eth_type;
Rakesh Nair888af952017-06-30 18:41:58 +053025extern u8 edma_dscp2ac_tbl[EDMA_PRECEDENCE_MAX];
26extern u8 edma_per_prec_stats_enable;
Rakesh Nair1c6a18c2017-08-02 21:27:06 +053027extern u32 edma_iad_stats_enable;
Rakesh Nair9bcf2602017-01-06 16:02:16 +053028
29/* edma_skb_priority_offset()
30 * get edma skb priority
31 */
32static unsigned int edma_skb_priority_offset(struct sk_buff *skb)
33{
34 return (skb->priority >> 2) & 1;
35}
36
37/* edma_alloc_tx_ring()
38 * Allocate Tx descriptors ring
39 */
40static int edma_alloc_tx_ring(struct edma_common_info *edma_cinfo,
41 struct edma_tx_desc_ring *etdr)
42{
43 struct platform_device *pdev = edma_cinfo->pdev;
44 u16 sw_size = sizeof(struct edma_sw_desc) * etdr->count;
45
46 /* Initialize ring */
47 etdr->size = sizeof(struct edma_tx_desc) * etdr->count;
48 etdr->sw_next_to_fill = 0;
49 etdr->sw_next_to_clean = 0;
50
51 /* Allocate SW descriptors */
52 etdr->sw_desc = vzalloc(sw_size);
53 if (!etdr->sw_desc) {
54 dev_err(&pdev->dev, "buffer alloc of tx ring failed=%p", etdr);
55 return -ENOMEM;
56 }
57
58 /* Allocate HW descriptors */
59 etdr->hw_desc = dma_alloc_coherent(&pdev->dev, etdr->size, &etdr->dma,
60 GFP_KERNEL);
61 if (!etdr->hw_desc) {
62 dev_err(&pdev->dev, "descriptor allocation for tx ring failed");
63 vfree(etdr->sw_desc);
64 etdr->sw_desc = NULL;
65 return -ENOMEM;
66 }
67
68 return 0;
69}
70
71/* edma_free_tx_ring()
72 * Free tx rings allocated by edma_alloc_tx_rings
73 */
74static void edma_free_tx_ring(struct edma_common_info *edma_cinfo,
75 struct edma_tx_desc_ring *etdr)
76{
77 struct platform_device *pdev = edma_cinfo->pdev;
78
79 if (likely(etdr->hw_desc)) {
80 dma_free_coherent(&pdev->dev, etdr->size, etdr->hw_desc,
81 etdr->dma);
82
83 vfree(etdr->sw_desc);
84 etdr->sw_desc = NULL;
85 }
86}
87
88/* edma_alloc_rx_ring()
89 * allocate rx descriptor ring
90 */
91static int edma_alloc_rx_ring(struct edma_common_info *edma_cinfo,
92 struct edma_rfd_desc_ring *erxd)
93{
94 struct platform_device *pdev = edma_cinfo->pdev;
95 u16 sw_size = sizeof(struct edma_sw_desc) * erxd->count;
96
97 erxd->size = sizeof(struct edma_sw_desc) * erxd->count;
98 erxd->sw_next_to_fill = 0;
99 erxd->sw_next_to_clean = 0;
100
101 /* Allocate SW descriptors */
102 erxd->sw_desc = vzalloc(sw_size);
103 if (!erxd->sw_desc)
104 return -ENOMEM;
105
106 /* Alloc HW descriptors */
107 erxd->hw_desc = dma_alloc_coherent(&pdev->dev, erxd->size, &erxd->dma,
108 GFP_KERNEL);
109 if (!erxd->hw_desc) {
110 vfree(erxd->sw_desc);
111 erxd->sw_desc = NULL;
112 return -ENOMEM;
113 }
114
Rakesh Nair03b586c2017-04-03 18:28:58 +0530115 /* Initialize pending fill */
116 erxd->pending_fill = 0;
117
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530118 return 0;
119}
120
121/* edma_free_rx_ring()
122 * Free rx ring allocated by alloc_rx_ring
123 */
124static void edma_free_rx_ring(struct edma_common_info *edma_cinfo,
125 struct edma_rfd_desc_ring *erxd)
126{
127 struct platform_device *pdev = edma_cinfo->pdev;
128
129 if (likely(erxd->hw_desc)) {
130 dma_free_coherent(&pdev->dev, erxd->size, erxd->hw_desc,
131 erxd->dma);
132
133 vfree(erxd->sw_desc);
134 erxd->sw_desc = NULL;
135 }
136}
137
138/* edma_configure_tx()
139 * Configure transmission control data
140 */
141static void edma_configure_tx(struct edma_common_info *edma_cinfo)
142{
143 u32 txq_ctrl_data;
144
145 txq_ctrl_data = (EDMA_TPD_BURST << EDMA_TXQ_NUM_TPD_BURST_SHIFT);
146 txq_ctrl_data |= EDMA_TXQ_CTRL_TPD_BURST_EN;
147 txq_ctrl_data |= (EDMA_TXF_BURST << EDMA_TXQ_TXF_BURST_NUM_SHIFT);
148 edma_write_reg(EDMA_REG_TXQ_CTRL, txq_ctrl_data);
149}
150
151/* edma_configure_rx()
152 * configure reception control data
153 */
154static void edma_configure_rx(struct edma_common_info *edma_cinfo)
155{
156 struct edma_hw *hw = &edma_cinfo->hw;
157 u32 rss_type, rx_desc1, rxq_ctrl_data;
158
159 /* Set RSS type */
160 rss_type = hw->rss_type;
161 edma_write_reg(EDMA_REG_RSS_TYPE, rss_type);
162
163 /* Set RFD burst number */
164 rx_desc1 = (EDMA_RFD_BURST << EDMA_RXQ_RFD_BURST_NUM_SHIFT);
165
166 /* Set RFD prefetch threshold */
167 rx_desc1 |= (EDMA_RFD_THR << EDMA_RXQ_RFD_PF_THRESH_SHIFT);
168
169 /* Set RFD in host ring low threshold to generte interrupt */
170 rx_desc1 |= (EDMA_RFD_LTHR << EDMA_RXQ_RFD_LOW_THRESH_SHIFT);
171 edma_write_reg(EDMA_REG_RX_DESC1, rx_desc1);
172
173 /* Set Rx FIFO threshold to start to DMA data to host */
174 rxq_ctrl_data = EDMA_FIFO_THRESH_128_BYTE;
175
176 /* Set RX remove vlan bit */
177 rxq_ctrl_data |= EDMA_RXQ_CTRL_RMV_VLAN;
178
179 edma_write_reg(EDMA_REG_RXQ_CTRL, rxq_ctrl_data);
180}
181
182/* edma_alloc_rx_buf()
183 * does skb allocation for the received packets.
184 */
185static int edma_alloc_rx_buf(struct edma_common_info
186 *edma_cinfo,
187 struct edma_rfd_desc_ring *erdr,
188 int cleaned_count, int queue_id)
189{
190 struct platform_device *pdev = edma_cinfo->pdev;
191 struct edma_rx_free_desc *rx_desc;
192 struct edma_sw_desc *sw_desc;
193 struct sk_buff *skb;
194 unsigned int i;
195 u16 prod_idx, length;
196 u32 reg_data;
197
198 if (cleaned_count > erdr->count) {
199 dev_err(&pdev->dev, "Incorrect cleaned_count %d",
200 cleaned_count);
201 return -1;
202 }
203
204 i = erdr->sw_next_to_fill;
205
206 while (cleaned_count) {
207 sw_desc = &erdr->sw_desc[i];
208 length = edma_cinfo->rx_head_buffer_len;
209
210 if (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_REUSE) {
211 skb = sw_desc->skb;
212
213 /* Clear REUSE flag */
214 sw_desc->flags &= ~EDMA_SW_DESC_FLAG_SKB_REUSE;
215 } else {
216 /* alloc skb */
217 skb = netdev_alloc_skb(edma_netdev[0], length);
218 if (!skb) {
219 /* Better luck next round */
220 sw_desc->flags = 0;
221 break;
222 }
223 }
224
225 if (!edma_cinfo->page_mode) {
226 sw_desc->dma = dma_map_single(&pdev->dev, skb->data,
227 length, DMA_FROM_DEVICE);
228 if (dma_mapping_error(&pdev->dev, sw_desc->dma)) {
229 WARN_ONCE(0, "EDMA DMA mapping failed for linear address %x", sw_desc->dma);
230 sw_desc->flags = 0;
231 sw_desc->skb = NULL;
232 dev_kfree_skb_any(skb);
233 break;
234 }
235
236 /*
237 * We should not exit from here with REUSE flag set
238 * This is to avoid re-using same sk_buff for next
239 * time around
240 */
241 sw_desc->flags = EDMA_SW_DESC_FLAG_SKB_HEAD;
242 sw_desc->length = length;
243 } else {
244 struct page *pg = alloc_page(GFP_ATOMIC);
245
246 if (!pg) {
247 sw_desc->flags = 0;
248 sw_desc->skb = NULL;
249 dev_kfree_skb_any(skb);
250 break;
251 }
252
253 sw_desc->dma = dma_map_page(&pdev->dev, pg, 0,
254 edma_cinfo->rx_page_buffer_len,
255 DMA_FROM_DEVICE);
256 if (dma_mapping_error(&pdev->dev, sw_desc->dma)) {
257 WARN_ONCE(0, "EDMA DMA mapping failed for page address %x", sw_desc->dma);
258 sw_desc->flags = 0;
259 sw_desc->skb = NULL;
260 __free_page(pg);
261 dev_kfree_skb_any(skb);
262 break;
263 }
264
265 skb_fill_page_desc(skb, 0, pg, 0,
266 edma_cinfo->rx_page_buffer_len);
267 sw_desc->flags = EDMA_SW_DESC_FLAG_SKB_FRAG;
268 sw_desc->length = edma_cinfo->rx_page_buffer_len;
269 }
270
271 /* Update the buffer info */
272 sw_desc->skb = skb;
273 rx_desc = (&(erdr->hw_desc)[i]);
274 rx_desc->buffer_addr = cpu_to_le64(sw_desc->dma);
275 if (++i == erdr->count)
276 i = 0;
277 cleaned_count--;
278 }
279
280 erdr->sw_next_to_fill = i;
281
282 if (i == 0)
283 prod_idx = erdr->count - 1;
284 else
285 prod_idx = i - 1;
286
287 /* Update the producer index */
288 edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), &reg_data);
289 reg_data &= ~EDMA_RFD_PROD_IDX_BITS;
290 reg_data |= prod_idx;
291 edma_write_reg(EDMA_REG_RFD_IDX_Q(queue_id), reg_data);
Rakesh Nair03b586c2017-04-03 18:28:58 +0530292
293 /* If we couldn't allocate all the buffers,
294 * we increment the alloc failure counters
295 */
296 if (cleaned_count)
297 edma_cinfo->edma_ethstats.rx_alloc_fail_ctr++;
298
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530299 return cleaned_count;
300}
301
302/* edma_init_desc()
303 * update descriptor ring size, buffer and producer/consumer index
304 */
305static void edma_init_desc(struct edma_common_info *edma_cinfo)
306{
307 struct edma_rfd_desc_ring *rfd_ring;
308 struct edma_tx_desc_ring *etdr;
309 int i = 0, j = 0;
310 u32 data = 0;
311 u16 hw_cons_idx = 0;
312
313 /* Set the base address of every TPD ring. */
314 for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
315 etdr = edma_cinfo->tpd_ring[i];
316
317 /* Update descriptor ring base address */
318 edma_write_reg(EDMA_REG_TPD_BASE_ADDR_Q(i), (u32)etdr->dma);
319 edma_read_reg(EDMA_REG_TPD_IDX_Q(i), &data);
320
321 /* Calculate hardware consumer index */
322 hw_cons_idx = (data >> EDMA_TPD_CONS_IDX_SHIFT) & 0xffff;
323 etdr->sw_next_to_fill = hw_cons_idx;
324 etdr->sw_next_to_clean = hw_cons_idx;
325 data &= ~(EDMA_TPD_PROD_IDX_MASK << EDMA_TPD_PROD_IDX_SHIFT);
326 data |= hw_cons_idx;
327
328 /* update producer index */
329 edma_write_reg(EDMA_REG_TPD_IDX_Q(i), data);
330
331 /* update SW consumer index register */
332 edma_write_reg(EDMA_REG_TX_SW_CONS_IDX_Q(i), hw_cons_idx);
333
334 /* Set TPD ring size */
335 edma_write_reg(EDMA_REG_TPD_RING_SIZE,
336 edma_cinfo->tx_ring_count &
337 EDMA_TPD_RING_SIZE_MASK);
338 }
339
340 for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
341 rfd_ring = edma_cinfo->rfd_ring[j];
342 /* Update Receive Free descriptor ring base address */
343 edma_write_reg(EDMA_REG_RFD_BASE_ADDR_Q(j),
344 (u32)(rfd_ring->dma));
345 j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
346 }
347
348 data = edma_cinfo->rx_head_buffer_len;
349 if (edma_cinfo->page_mode)
350 data = edma_cinfo->rx_page_buffer_len;
351
352 data &= EDMA_RX_BUF_SIZE_MASK;
353 data <<= EDMA_RX_BUF_SIZE_SHIFT;
354
355 /* Update RFD ring size and RX buffer size */
356 data |= (edma_cinfo->rx_ring_count & EDMA_RFD_RING_SIZE_MASK)
357 << EDMA_RFD_RING_SIZE_SHIFT;
358
359 edma_write_reg(EDMA_REG_RX_DESC0, data);
360
361 /* Disable TX FIFO low watermark and high watermark */
362 edma_write_reg(EDMA_REG_TXF_WATER_MARK, 0);
363
364 /* Load all of base address above */
365 edma_read_reg(EDMA_REG_TX_SRAM_PART, &data);
366 data |= 1 << EDMA_LOAD_PTR_SHIFT;
367 edma_write_reg(EDMA_REG_TX_SRAM_PART, data);
368}
369
370/* edma_receive_checksum
371 * Api to check checksum on receive packets
372 */
373static void edma_receive_checksum(struct edma_rx_return_desc *rd,
374 struct sk_buff *skb)
375{
376 skb_checksum_none_assert(skb);
377
378 /* check the RRD IP/L4 checksum bit to see if
379 * its set, which in turn indicates checksum
380 * failure.
381 */
382 if (rd->rrd6 & EDMA_RRD_CSUM_FAIL_MASK)
383 return;
384
Rakesh Nair72e1d282017-05-19 22:21:01 +0530385 /*
386 * We disable checksum verification only if
387 * we have a TCP/UDP packet
388 */
389 if (rd->rrd7 & (EDMA_RRD_L4OFFSET_MASK << EDMA_RRD_L4OFFSET_SHIFT))
390 skb->ip_summed = CHECKSUM_UNNECESSARY;
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530391}
392
393/* edma_clean_rfd()
394 * clean up rx resourcers on error
395 */
396static void edma_clean_rfd(struct platform_device *pdev,
397 struct edma_rfd_desc_ring *erdr,
398 u16 index,
399 int pos)
400{
401 struct edma_rx_free_desc *rx_desc = &(erdr->hw_desc[index]);
402 struct edma_sw_desc *sw_desc = &erdr->sw_desc[index];
403
404 /* Unmap non-first RFD positions in packet */
405 if (pos) {
406 if (likely(sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_HEAD))
407 dma_unmap_single(&pdev->dev, sw_desc->dma,
408 sw_desc->length, DMA_FROM_DEVICE);
409 else
410 dma_unmap_page(&pdev->dev, sw_desc->dma,
411 sw_desc->length, DMA_FROM_DEVICE);
412 }
413
414 if (sw_desc->skb) {
415 dev_kfree_skb_any(sw_desc->skb);
416 sw_desc->skb = NULL;
417 }
418
419 sw_desc->flags = 0;
420 memset(rx_desc, 0, sizeof(struct edma_rx_free_desc));
421}
422
423/* edma_rx_complete_stp_rstp()
424 * Complete Rx processing for STP RSTP packets
425 */
426static void edma_rx_complete_stp_rstp(struct sk_buff *skb, int port_id, struct edma_rx_return_desc *rd)
427{
428 int i;
429 u32 priority;
430 u16 port_type;
431 u8 mac_addr[EDMA_ETH_HDR_LEN];
432
433 port_type = (rd->rrd1 >> EDMA_RRD_PORT_TYPE_SHIFT)
434 & EDMA_RRD_PORT_TYPE_MASK;
435 /* if port type is 0x4, then only proceed with
436 * other stp/rstp calculation
437 */
438 if (port_type == EDMA_RX_ATH_HDR_RSTP_PORT_TYPE) {
439 u8 bpdu_mac[6] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00};
440
441 /* calculate the frame priority */
442 priority = (rd->rrd1 >> EDMA_RRD_PRIORITY_SHIFT)
443 & EDMA_RRD_PRIORITY_MASK;
444
445 for (i = 0; i < EDMA_ETH_HDR_LEN; i++)
446 mac_addr[i] = skb->data[i];
447
448 /* Check if destination mac addr is bpdu addr */
449 if (!memcmp(mac_addr, bpdu_mac, 6)) {
450 /* destination mac address is BPDU
451 * destination mac address, then add
452 * atheros header to the packet.
453 */
454 u16 athr_hdr = (EDMA_RX_ATH_HDR_VERSION << EDMA_RX_ATH_HDR_VERSION_SHIFT) |
455 (priority << EDMA_RX_ATH_HDR_PRIORITY_SHIFT) |
456 (EDMA_RX_ATH_HDR_RSTP_PORT_TYPE << EDMA_RX_ATH_PORT_TYPE_SHIFT) | port_id;
457 skb_push(skb, 4);
458 memcpy(skb->data, mac_addr, EDMA_ETH_HDR_LEN);
459 *(uint16_t *)&skb->data[12] = htons(edma_ath_eth_type);
460 *(uint16_t *)&skb->data[14] = htons(athr_hdr);
461 }
462 }
463}
464
465/* edma_rx_complete_fraglist()
466 * Complete Rx processing for fraglist skbs
467 */
468static int edma_rx_complete_fraglist(struct sk_buff *skb, u16 num_rfds, u16 length, u32 sw_next_to_clean,
469 struct edma_rfd_desc_ring *erdr, struct edma_common_info *edma_cinfo)
470{
471 struct platform_device *pdev = edma_cinfo->pdev;
472 struct edma_hw *hw = &edma_cinfo->hw;
473 struct sk_buff *skb_temp;
474 struct edma_sw_desc *sw_desc;
475 int i;
476 u16 size_remaining;
477
478 skb->data_len = 0;
479 skb->tail += (hw->rx_head_buff_size - 16);
480 skb->len = skb->truesize = length;
481 size_remaining = length - (hw->rx_head_buff_size - 16);
482
483 /* clean-up all related sw_descs */
484 for (i = 1; i < num_rfds; i++) {
485 struct sk_buff *skb_prev;
486
487 sw_desc = &erdr->sw_desc[sw_next_to_clean];
488 skb_temp = sw_desc->skb;
489
490 dma_unmap_single(&pdev->dev, sw_desc->dma,
491 sw_desc->length, DMA_FROM_DEVICE);
492
493 if (size_remaining < hw->rx_head_buff_size)
494 skb_put(skb_temp, size_remaining);
495 else
496 skb_put(skb_temp, hw->rx_head_buff_size);
497
498 /* If we are processing the first rfd, we link
499 * skb->frag_list to the skb corresponding to the
500 * first RFD
501 */
502 if (i == 1)
503 skb_shinfo(skb)->frag_list = skb_temp;
504 else
505 skb_prev->next = skb_temp;
506 skb_prev = skb_temp;
507 skb_temp->next = NULL;
508
509 skb->data_len += skb_temp->len;
510 size_remaining -= skb_temp->len;
511
512 /* Increment SW index */
513 sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
514 }
515
516 return sw_next_to_clean;
517}
518
519/* edma_rx_complete_paged()
520 * Complete Rx processing for paged skbs
521 */
522static int edma_rx_complete_paged(struct sk_buff *skb, u16 num_rfds,
523 u16 length, u32 sw_next_to_clean,
524 struct edma_rfd_desc_ring *erdr,
525 struct edma_common_info *edma_cinfo)
526{
527 struct platform_device *pdev = edma_cinfo->pdev;
528 struct sk_buff *skb_temp;
529 struct edma_sw_desc *sw_desc;
530 int i;
531 u16 size_remaining;
532
533 skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
534
535 /* Setup skbuff fields */
536 skb->len = length;
537
538 if (likely(num_rfds <= 1)) {
539 skb->data_len = length;
540 skb->truesize += edma_cinfo->rx_page_buffer_len;
541 skb_fill_page_desc(skb, 0, skb_frag_page(frag),
542 16, length);
543 } else {
544 frag->size -= 16;
545 skb->data_len = frag->size;
546 skb->truesize += edma_cinfo->rx_page_buffer_len;
547 size_remaining = length - frag->size;
548
549 skb_fill_page_desc(skb, 0, skb_frag_page(frag),
550 16, frag->size);
551
552 /* clean-up all related sw_descs */
553 for (i = 1; i < num_rfds; i++) {
554 sw_desc = &erdr->sw_desc[sw_next_to_clean];
555 skb_temp = sw_desc->skb;
556 frag = &skb_shinfo(skb_temp)->frags[0];
557 dma_unmap_page(&pdev->dev, sw_desc->dma,
558 sw_desc->length, DMA_FROM_DEVICE);
559
560 if (size_remaining < edma_cinfo->rx_page_buffer_len)
561 frag->size = size_remaining;
562
563 skb_fill_page_desc(skb, i, skb_frag_page(frag),
564 0, frag->size);
565
566 /* We used frag pages from skb_temp in skb */
567 skb_shinfo(skb_temp)->nr_frags = 0;
568 dev_kfree_skb_any(skb_temp);
569
570 skb->data_len += frag->size;
571 skb->truesize += edma_cinfo->rx_page_buffer_len;
572 size_remaining -= frag->size;
573
574 /* Increment SW index */
575 sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
576 }
577 }
578
579 return sw_next_to_clean;
580}
581
582/*
583 * edma_rx_complete()
584 * Main api called from the poll function to process rx packets.
585 */
Rakesh Nair03b586c2017-04-03 18:28:58 +0530586static u16 edma_rx_complete(struct edma_common_info *edma_cinfo,
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530587 int *work_done, int work_to_do, int queue_id,
588 struct napi_struct *napi)
589{
590 struct platform_device *pdev = edma_cinfo->pdev;
591 struct edma_rfd_desc_ring *erdr = edma_cinfo->rfd_ring[queue_id];
592 u16 hash_type, rrd[8], cleaned_count = 0, length = 0, num_rfds = 1,
593 sw_next_to_clean, hw_next_to_clean = 0, vlan = 0, ret_count = 0;
594 u32 data = 0;
595 u16 count = erdr->count, rfd_avail;
596 u8 queue_to_rxid[8] = {0, 0, 1, 1, 2, 2, 3, 3};
597
Rakesh Nair03b586c2017-04-03 18:28:58 +0530598 cleaned_count = erdr->pending_fill;
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530599 sw_next_to_clean = erdr->sw_next_to_clean;
600
601 edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), &data);
602 hw_next_to_clean = (data >> EDMA_RFD_CONS_IDX_SHIFT) &
603 EDMA_RFD_CONS_IDX_MASK;
604
605 do {
606 while (sw_next_to_clean != hw_next_to_clean) {
607 struct net_device *netdev;
608 struct edma_adapter *adapter;
609 struct edma_sw_desc *sw_desc;
610 struct sk_buff *skb;
611 struct edma_rx_return_desc *rd;
612 u8 *vaddr;
613 int port_id, i, drop_count = 0;
614 u32 priority;
615
616 if (!work_to_do)
617 break;
618
619 sw_desc = &erdr->sw_desc[sw_next_to_clean];
620 skb = sw_desc->skb;
621
622 /* Get RRD */
623 if (!edma_cinfo->page_mode) {
624 dma_unmap_single(&pdev->dev, sw_desc->dma,
625 sw_desc->length, DMA_FROM_DEVICE);
626 rd = (struct edma_rx_return_desc *)skb->data;
627
628 } else {
629 dma_unmap_page(&pdev->dev, sw_desc->dma,
630 sw_desc->length, DMA_FROM_DEVICE);
631 vaddr = kmap_atomic(skb_frag_page(&skb_shinfo(skb)->frags[0]));
632 memcpy((uint8_t *)&rrd[0], vaddr, 16);
633 rd = (struct edma_rx_return_desc *)rrd;
634 kunmap_atomic(vaddr);
635 }
636
637 /* Check if RRD is valid */
638 if (!(rd->rrd7 & EDMA_RRD_DESC_VALID)) {
639 dev_err(&pdev->dev, "Incorrect RRD DESC valid bit set");
640 edma_clean_rfd(pdev, erdr, sw_next_to_clean, 0);
641 sw_next_to_clean = (sw_next_to_clean + 1) &
642 (erdr->count - 1);
643 cleaned_count++;
644 continue;
645 }
646
647 /* Get the number of RFDs from RRD */
648 num_rfds = rd->rrd1 & EDMA_RRD_NUM_RFD_MASK;
649
650 /* Get Rx port ID from switch */
651 port_id = (rd->rrd1 >> EDMA_PORT_ID_SHIFT) & EDMA_PORT_ID_MASK;
652 if ((!port_id) || (port_id > EDMA_MAX_PORTID_SUPPORTED)) {
653 if (net_ratelimit()) {
654 dev_err(&pdev->dev, "Incorrect RRD source port bit set");
655 dev_err(&pdev->dev,
656 "RRD Dump\n rrd0:%x rrd1: %x rrd2: %x rrd3: %x rrd4: %x rrd5: %x rrd6: %x rrd7: %x",
657 rd->rrd0, rd->rrd1, rd->rrd2, rd->rrd3, rd->rrd4, rd->rrd5, rd->rrd6, rd->rrd7);
658 dev_err(&pdev->dev, "Num_rfds: %d, src_port: %d, pkt_size: %d, cvlan_tag: %d\n",
659 num_rfds, rd->rrd1 & EDMA_RRD_SRC_PORT_NUM_MASK,
660 rd->rrd6 & EDMA_RRD_PKT_SIZE_MASK, rd->rrd7 & EDMA_RRD_CVLAN);
661 }
662 for (i = 0; i < num_rfds; i++) {
663 edma_clean_rfd(pdev, erdr, sw_next_to_clean, i);
664 sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
665 }
666
667 cleaned_count += num_rfds;
668 continue;
669 }
670
671 netdev = edma_cinfo->portid_netdev_lookup_tbl[port_id];
672 if (!netdev) {
673 dev_err(&pdev->dev, "Invalid netdev");
674 for (i = 0; i < num_rfds; i++) {
675 edma_clean_rfd(pdev, erdr, sw_next_to_clean, i);
676 sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
677 }
678
679 cleaned_count += num_rfds;
680 continue;
681 }
682 adapter = netdev_priv(netdev);
683
684 /* This code is added to handle a usecase where high
685 * priority stream and a low priority stream are
686 * received simultaneously on DUT. The problem occurs
687 * if one of the Rx rings is full and the corresponding
688 * core is busy with other stuff. This causes ESS CPU
689 * port to backpressure all incoming traffic including
690 * high priority one. We monitor free descriptor count
691 * on each CPU and whenever it reaches threshold (< 80),
692 * we drop all low priority traffic and let only high
693 * priotiy traffic pass through. We can hence avoid
694 * ESS CPU port to send backpressure on high priroity
695 * stream.
696 */
697 priority = (rd->rrd1 >> EDMA_RRD_PRIORITY_SHIFT)
698 & EDMA_RRD_PRIORITY_MASK;
699 if (likely(!priority && !edma_cinfo->page_mode && (num_rfds <= 1))) {
700 rfd_avail = (count + sw_next_to_clean - hw_next_to_clean - 1) & (count - 1);
701 if (rfd_avail < EDMA_RFD_AVAIL_THR) {
702 sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_REUSE;
703 sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
704 adapter->stats.rx_dropped++;
705 cleaned_count++;
706 drop_count++;
707 if (drop_count == 3) {
708 work_to_do--;
709 (*work_done)++;
710 drop_count = 0;
711 }
712 if (cleaned_count == EDMA_RX_BUFFER_WRITE) {
713 /* If buffer clean count reaches 16, we replenish HW buffers. */
714 ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id);
715 edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id),
716 sw_next_to_clean);
717 cleaned_count = ret_count;
Rakesh Nair03b586c2017-04-03 18:28:58 +0530718 erdr->pending_fill = ret_count;
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530719 }
720 continue;
721 }
722 }
723
724 work_to_do--;
725 (*work_done)++;
726
727 /* Increment SW index */
728 sw_next_to_clean = (sw_next_to_clean + 1) &
729 (erdr->count - 1);
730
731 /* Get the packet size and allocate buffer */
732 length = rd->rrd6 & EDMA_RRD_PKT_SIZE_MASK;
733
734 if (edma_cinfo->page_mode) {
735 /* paged skb */
736 sw_next_to_clean = edma_rx_complete_paged(skb, num_rfds, length,
737 sw_next_to_clean,
738 erdr, edma_cinfo);
739 if (!pskb_may_pull(skb, ETH_HLEN)) {
740 cleaned_count += num_rfds;
741 dev_kfree_skb_any(skb);
742 continue;
743 }
744 } else {
745 /* single or fraglist skb */
746
747 /* Addition of 16 bytes is required, as in the packet
748 * first 16 bytes are rrd descriptors, so actual data
749 * starts from an offset of 16.
750 */
751 skb_reserve(skb, 16);
752 if (likely((num_rfds <= 1) || !edma_cinfo->fraglist_mode))
753 skb_put(skb, length);
754 else
755 sw_next_to_clean = edma_rx_complete_fraglist(skb, num_rfds, length,
756 sw_next_to_clean,
757 erdr, edma_cinfo);
758 }
759
760 cleaned_count += num_rfds;
761
762 if (edma_stp_rstp)
763 edma_rx_complete_stp_rstp(skb, port_id, rd);
764
765 skb->protocol = eth_type_trans(skb, netdev);
766
767 /* Record Rx queue for RFS/RPS and fill flow hash from HW */
768 skb_record_rx_queue(skb, queue_to_rxid[queue_id]);
769 if (netdev->features & NETIF_F_RXHASH) {
770 hash_type = (rd->rrd5 >> EDMA_HASH_TYPE_SHIFT);
771 if ((hash_type > EDMA_HASH_TYPE_START) && (hash_type < EDMA_HASH_TYPE_END))
772 skb_set_hash(skb, rd->rrd2, PKT_HASH_TYPE_L4);
773 }
774
775#ifdef CONFIG_NF_FLOW_COOKIE
776 skb->flow_cookie = rd->rrd3 & EDMA_RRD_FLOW_COOKIE_MASK;
777#endif
778 edma_receive_checksum(rd, skb);
779
780 /* Process VLAN HW acceleration indication provided by HW */
781 if (adapter->default_vlan_tag != rd->rrd4) {
782 vlan = rd->rrd4;
783 if (likely(rd->rrd7 & EDMA_RRD_CVLAN))
784 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
785 else if (rd->rrd1 & EDMA_RRD_SVLAN)
786 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan);
787 }
788
789 /* Update rx statistics */
790 adapter->stats.rx_packets++;
791 adapter->stats.rx_bytes += length;
792
793 /* Check if we reached refill threshold */
794 if (cleaned_count == EDMA_RX_BUFFER_WRITE) {
795 ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id);
796 edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id),
797 sw_next_to_clean);
798 cleaned_count = ret_count;
Rakesh Nair03b586c2017-04-03 18:28:58 +0530799 erdr->pending_fill = ret_count;
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530800 }
801
Rakesh Nair888af952017-06-30 18:41:58 +0530802 /*
803 * We increment per-precedence counters for the rx packets
804 */
805 if (edma_per_prec_stats_enable) {
806 edma_cinfo->edma_ethstats.rx_prec[priority]++;
807 edma_cinfo->edma_ethstats.rx_ac[edma_dscp2ac_tbl[priority]]++;
Rakesh Nair1c6a18c2017-08-02 21:27:06 +0530808
809 if (edma_iad_stats_enable) {
810 if (edma_dscp2ac_tbl[priority] == EDMA_AC_VI)
811 edma_iad_process_flow(edma_cinfo, skb, EDMA_INGRESS_DIR, priority);
812 }
Rakesh Nair888af952017-06-30 18:41:58 +0530813 }
814
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530815 /* At this point skb should go to stack */
816 napi_gro_receive(napi, skb);
817 }
818
819 /* Check if we still have NAPI budget */
820 if (!work_to_do)
821 break;
822
823 /* Read index once again since we still have NAPI budget */
824 edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), &data);
825 hw_next_to_clean = (data >> EDMA_RFD_CONS_IDX_SHIFT) &
826 EDMA_RFD_CONS_IDX_MASK;
827 } while (hw_next_to_clean != sw_next_to_clean);
828
829 erdr->sw_next_to_clean = sw_next_to_clean;
830
831 /* Refill here in case refill threshold wasn't reached */
832 if (likely(cleaned_count)) {
833 ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id);
Rakesh Nair03b586c2017-04-03 18:28:58 +0530834 erdr->pending_fill = ret_count;
835 if (ret_count) {
836 if(net_ratelimit())
837 dev_dbg(&pdev->dev, "Edma not getting memory for descriptors.\n");
838 }
839
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530840 edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id),
841 erdr->sw_next_to_clean);
842 }
Rakesh Nair03b586c2017-04-03 18:28:58 +0530843
844 return erdr->pending_fill;
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530845}
846
847/* edma_delete_rfs_filter()
848 * Remove RFS filter from switch
849 */
850static int edma_delete_rfs_filter(struct edma_adapter *adapter,
851 struct edma_rfs_filter_node *filter_node)
852{
853 int res = -1;
854
855 if (likely(adapter->set_rfs_rule))
856 res = (*adapter->set_rfs_rule)(adapter->netdev,
857#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21))
858 filter_node->keys.src,
859 filter_node->keys.dst, filter_node->keys.port16[0],
860 filter_node->keys.port16[1],
861 filter_node->keys.ip_proto,
862#else
863 filter_node->keys.addrs.v4addrs.src,
864 filter_node->keys.addrs.v4addrs.dst, filter_node->keys.ports.src,
865 filter_node->keys.ports.dst,
866 filter_node->keys.basic.ip_proto,
867#endif
868 filter_node->rq_id,
869 0);
870
871 return res;
872}
873
874/* edma_add_rfs_filter()
875 * Add RFS filter to switch
876 */
877static int edma_add_rfs_filter(struct edma_adapter *adapter,
878 struct flow_keys *keys, u16 rq,
879 struct edma_rfs_filter_node *filter_node)
880{
881 int res = -1;
882
883#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21))
884 filter_node->keys.src = keys->src;
885 filter_node->keys.dst = keys->dst;
886 filter_node->keys.ports = keys->ports;
887 filter_node->keys.ip_proto = keys->ip_proto;
888#else
889 filter_node->keys.addrs.v4addrs.src = keys->addrs.v4addrs.src;
890 filter_node->keys.addrs.v4addrs.dst = keys->addrs.v4addrs.dst;
891 filter_node->keys.ports.ports = keys->ports.ports;
892 filter_node->keys.basic.ip_proto = keys->basic.ip_proto;
893#endif
894
895 /* Call callback registered by ESS driver */
896 if (likely(adapter->set_rfs_rule))
897#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21))
898 res = (*adapter->set_rfs_rule)(adapter->netdev, keys->src,
899 keys->dst, keys->port16[0], keys->port16[1],
900 keys->ip_proto, rq, 1);
901#else
902 res = (*adapter->set_rfs_rule)(adapter->netdev, keys->addrs.v4addrs.src,
903 keys->addrs.v4addrs.dst, keys->ports.src, keys->ports.dst,
904 keys->basic.ip_proto, rq, 1);
905#endif
906
907 return res;
908}
909
910/* edma_rfs_key_search()
911 * Look for existing RFS entry
912 */
913static struct edma_rfs_filter_node *edma_rfs_key_search(struct hlist_head *h,
914 struct flow_keys *key)
915{
916 struct edma_rfs_filter_node *p;
917
918 hlist_for_each_entry(p, h, node)
919#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21))
920 if (p->keys.src == key->src &&
921 p->keys.dst == key->dst &&
922 p->keys.ports == key->ports &&
923 p->keys.ip_proto == key->ip_proto)
924#else
925 if (p->keys.addrs.v4addrs.src == key->addrs.v4addrs.src &&
926 p->keys.addrs.v4addrs.dst == key->addrs.v4addrs.dst &&
927 p->keys.ports.ports == key->ports.ports &&
928 p->keys.basic.ip_proto == key->basic.ip_proto)
929#endif
930 return p;
931 return NULL;
932}
933
934/* edma_initialise_rfs_flow_table()
935 * Initialise EDMA RFS flow table
936 */
937static void edma_initialise_rfs_flow_table(struct edma_adapter *adapter)
938{
939 int i;
940
941 spin_lock_init(&adapter->rfs.rfs_ftab_lock);
942
943 /* Initialize EDMA flow hash table */
944 for (i = 0; i < EDMA_RFS_FLOW_ENTRIES; i++)
945 INIT_HLIST_HEAD(&adapter->rfs.hlist_head[i]);
946
947 adapter->rfs.max_num_filter = EDMA_RFS_FLOW_ENTRIES;
948 adapter->rfs.filter_available = adapter->rfs.max_num_filter;
949 adapter->rfs.hashtoclean = 0;
950
951 /* Add timer to get periodic RFS updates from OS */
952 init_timer(&adapter->rfs.expire_rfs);
953 adapter->rfs.expire_rfs.function = edma_flow_may_expire;
954 adapter->rfs.expire_rfs.data = (unsigned long)adapter;
955 mod_timer(&adapter->rfs.expire_rfs, jiffies + HZ/4);
956}
957
958/* edma_free_rfs_flow_table()
959 * Free EDMA RFS flow table
960 */
961static void edma_free_rfs_flow_table(struct edma_adapter *adapter)
962{
963 int i;
964
965 /* Remove sync timer */
966 del_timer_sync(&adapter->rfs.expire_rfs);
967 spin_lock_bh(&adapter->rfs.rfs_ftab_lock);
968
969 /* Free EDMA RFS table entries */
970 adapter->rfs.filter_available = 0;
971
972 /* Clean-up EDMA flow hash table */
973 for (i = 0; i < EDMA_RFS_FLOW_ENTRIES; i++) {
974 struct hlist_head *hhead;
975 struct hlist_node *tmp;
976 struct edma_rfs_filter_node *filter_node;
977 int res;
978
979 hhead = &adapter->rfs.hlist_head[i];
980 hlist_for_each_entry_safe(filter_node, tmp, hhead, node) {
981 res = edma_delete_rfs_filter(adapter, filter_node);
982 if (res < 0)
983 dev_warn(&adapter->netdev->dev,
984 "EDMA going down but RFS entry %d not allowed to be flushed by Switch",
985 filter_node->flow_id);
986 hlist_del(&filter_node->node);
987 kfree(filter_node);
988 }
989 }
990 spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
991}
992
993/* edma_tx_unmap_and_free()
994 * clean TX buffer
995 */
996static inline void edma_tx_unmap_and_free(struct platform_device *pdev,
997 struct edma_sw_desc *sw_desc)
998{
999 struct sk_buff *skb = sw_desc->skb;
1000
1001 if (likely((sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_HEAD) ||
1002 (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_FRAGLIST)))
1003 /* unmap_single for skb head area */
1004 dma_unmap_single(&pdev->dev, sw_desc->dma,
1005 sw_desc->length, DMA_TO_DEVICE);
1006 else if (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_FRAG)
1007 /* unmap page for paged fragments */
1008 dma_unmap_page(&pdev->dev, sw_desc->dma,
1009 sw_desc->length, DMA_TO_DEVICE);
1010
1011 if (likely(sw_desc->flags & EDMA_SW_DESC_FLAG_LAST))
1012 dev_kfree_skb_any(skb);
1013
1014 sw_desc->flags = 0;
1015}
1016
1017/* edma_tx_complete()
1018 * Used to clean tx queues and update hardware and consumer index
1019 */
1020static void edma_tx_complete(struct edma_common_info *edma_cinfo, int queue_id)
1021{
1022 struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
1023 struct edma_sw_desc *sw_desc;
1024 struct platform_device *pdev = edma_cinfo->pdev;
1025 int i;
1026
1027 u16 sw_next_to_clean = etdr->sw_next_to_clean;
1028 u16 hw_next_to_clean;
1029 u32 data = 0;
1030
1031 edma_read_reg(EDMA_REG_TPD_IDX_Q(queue_id), &data);
1032 hw_next_to_clean = (data >> EDMA_TPD_CONS_IDX_SHIFT) & EDMA_TPD_CONS_IDX_MASK;
1033
1034 /* clean the buffer here */
1035 while (sw_next_to_clean != hw_next_to_clean) {
1036 sw_desc = &etdr->sw_desc[sw_next_to_clean];
1037 edma_tx_unmap_and_free(pdev, sw_desc);
1038 sw_next_to_clean = (sw_next_to_clean + 1) & (etdr->count - 1);
1039 }
1040
1041 etdr->sw_next_to_clean = sw_next_to_clean;
1042
1043 /* update the TPD consumer index register */
1044 edma_write_reg(EDMA_REG_TX_SW_CONS_IDX_Q(queue_id), sw_next_to_clean);
1045
1046 /* Wake the queue if queue is stopped and netdev link is up */
1047 for (i = 0; i < EDMA_MAX_NETDEV_PER_QUEUE && etdr->nq[i] ; i++) {
1048 if (netif_tx_queue_stopped(etdr->nq[i])) {
1049 if ((etdr->netdev[i]) && netif_carrier_ok(etdr->netdev[i]))
1050 netif_tx_wake_queue(etdr->nq[i]);
1051 }
1052 }
1053}
1054
1055/* edma_get_tx_buffer()
1056 * Get sw_desc corresponding to the TPD
1057 */
1058static struct edma_sw_desc *edma_get_tx_buffer(struct edma_common_info *edma_cinfo,
1059 struct edma_tx_desc *tpd, int queue_id)
1060{
1061 struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
1062
1063 return &etdr->sw_desc[tpd - (struct edma_tx_desc *)etdr->hw_desc];
1064}
1065
1066/* edma_get_next_tpd()
1067 * Return a TPD descriptor for transfer
1068 */
1069static struct edma_tx_desc *edma_get_next_tpd(struct edma_common_info *edma_cinfo,
1070 int queue_id)
1071{
1072 struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
1073 u16 sw_next_to_fill = etdr->sw_next_to_fill;
1074 struct edma_tx_desc *tpd_desc =
1075 (&((struct edma_tx_desc *)(etdr->hw_desc))[sw_next_to_fill]);
1076
1077 etdr->sw_next_to_fill = (etdr->sw_next_to_fill + 1) & (etdr->count - 1);
1078
1079 return tpd_desc;
1080}
1081
1082/* edma_tpd_available()
1083 * Check number of free TPDs
1084 */
1085static inline u16 edma_tpd_available(struct edma_common_info *edma_cinfo,
1086 int queue_id)
1087{
1088 struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
1089
1090 u16 sw_next_to_fill;
1091 u16 sw_next_to_clean;
1092 u16 count = 0;
1093
1094 sw_next_to_clean = etdr->sw_next_to_clean;
1095 sw_next_to_fill = etdr->sw_next_to_fill;
1096
1097 if (likely(sw_next_to_clean <= sw_next_to_fill))
1098 count = etdr->count;
1099
1100 return count + sw_next_to_clean - sw_next_to_fill - 1;
1101}
1102
1103/* edma_tx_queue_get()
1104 * Get the starting number of the queue
1105 */
1106static inline int edma_tx_queue_get(struct edma_adapter *adapter,
1107 struct sk_buff *skb, int txq_id)
1108{
1109 /* skb->priority is used as an index to skb priority table
1110 * and based on packet priority, correspong queue is assigned.
1111 */
1112 return adapter->tx_start_offset[txq_id] + edma_skb_priority_offset(skb);
1113}
1114
1115/* edma_tx_update_hw_idx()
1116 * update the producer index for the ring transmitted
1117 */
1118static void edma_tx_update_hw_idx(struct edma_common_info *edma_cinfo,
1119 struct sk_buff *skb, int queue_id)
1120{
1121 struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
1122 u32 tpd_idx_data;
1123
1124 /* Read and update the producer index */
1125 edma_read_reg(EDMA_REG_TPD_IDX_Q(queue_id), &tpd_idx_data);
1126 tpd_idx_data &= ~EDMA_TPD_PROD_IDX_BITS;
1127 tpd_idx_data |= (etdr->sw_next_to_fill & EDMA_TPD_PROD_IDX_MASK)
1128 << EDMA_TPD_PROD_IDX_SHIFT;
1129
1130 edma_write_reg(EDMA_REG_TPD_IDX_Q(queue_id), tpd_idx_data);
1131}
1132
1133/* edma_rollback_tx()
1134 * Function to retrieve tx resources in case of error
1135 */
1136static void edma_rollback_tx(struct edma_adapter *adapter,
1137 struct edma_tx_desc *start_tpd, int queue_id)
1138{
1139 struct edma_tx_desc_ring *etdr = adapter->edma_cinfo->tpd_ring[queue_id];
1140 struct edma_sw_desc *sw_desc;
1141 struct edma_tx_desc *tpd = NULL;
1142 u16 start_index, index;
1143
1144 start_index = start_tpd - (struct edma_tx_desc *)(etdr->hw_desc);
1145
1146 index = start_index;
1147 while (index != etdr->sw_next_to_fill) {
1148 tpd = (&((struct edma_tx_desc *)(etdr->hw_desc))[index]);
1149 sw_desc = &etdr->sw_desc[index];
1150 edma_tx_unmap_and_free(adapter->pdev, sw_desc);
1151 memset(tpd, 0, sizeof(struct edma_tx_desc));
1152 if (++index == etdr->count)
1153 index = 0;
1154 }
1155 etdr->sw_next_to_fill = start_index;
1156}
1157
Rakesh Nair7e053532017-08-18 17:53:25 +05301158/* edma_get_v4_precedence()
1159 * Function to retrieve precedence for IPv4
1160 */
1161static inline int edma_get_v4_precedence(struct sk_buff *skb, int nh_offset, u8 *precedence)
1162{
1163 const struct iphdr *iph;
1164 struct iphdr iph_hdr;
1165
1166 iph = skb_header_pointer(skb, nh_offset, sizeof(iph_hdr), &iph_hdr);
1167
1168 if (!iph || iph->ihl < 5)
1169 return -1;
1170
1171 *precedence = iph->tos >> EDMA_DSCP_PREC_SHIFT;
1172
1173 return 0;
1174}
1175
1176/* edma_get_v6_precedence()
1177 * Function to retrieve precedence for IPv6
1178 */
1179static inline int edma_get_v6_precedence(struct sk_buff *skb, int nh_offset, u8 *precedence)
1180{
1181 const struct ipv6hdr *iph;
1182 struct ipv6hdr iph_hdr;
1183
1184 iph = skb_header_pointer(skb, nh_offset, sizeof(iph_hdr), &iph_hdr);
1185
1186 if (!iph)
1187 return -1;
1188
1189 *precedence = iph->priority >> EDMA_DSCP6_PREC_SHIFT;
1190
1191 return 0;
1192}
1193
1194/* edma_get_skb_precedence()
1195 * Function to retrieve precedence from skb
1196 */
1197static int edma_get_skb_precedence(struct sk_buff *skb, u8 *precedence)
1198{
1199 int nhoff = skb_network_offset(skb);
1200 __be16 proto = skb->protocol;
1201 int ret;
1202 struct pppoeh_proto *pppoeh, ppp_hdr;
1203
1204 switch(proto) {
1205 case __constant_htons(ETH_P_IP): {
1206 ret = edma_get_v4_precedence(skb, nhoff, precedence);
1207 if (ret)
1208 return -1;
1209 break;
1210 }
1211 case __constant_htons(ETH_P_IPV6): {
1212 ret = edma_get_v6_precedence(skb, nhoff, precedence);
1213 if (ret)
1214 return -1;
1215 break;
1216 }
1217 case __constant_htons(ETH_P_PPP_SES): {
1218 pppoeh = skb_header_pointer(skb, nhoff, sizeof(ppp_hdr), &ppp_hdr);
1219 if (!pppoeh)
1220 return -1;
1221
1222 proto = pppoeh->proto;
1223 nhoff += PPPOE_SES_HLEN;
1224 switch (proto) {
1225 case __constant_htons(PPP_IP): {
1226 ret = edma_get_v4_precedence(skb, nhoff, precedence);
1227 if (ret)
1228 return -1;
1229 break;
1230 }
1231 case __constant_htons(PPP_IPV6): {
1232 ret = edma_get_v6_precedence(skb, nhoff, precedence);
1233 if (ret)
1234 return -1;
1235 break;
1236 }
1237 default:
1238 return -1;
1239 }
1240 break;
1241 }
1242 default:
1243 return -1;
1244 }
1245
1246 return 0;
1247}
1248
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301249/* edma_tx_map_and_fill()
1250 * gets called from edma_xmit_frame
1251 *
1252 * This is where the dma of the buffer to be transmitted
1253 * gets mapped
1254 */
1255static int edma_tx_map_and_fill(struct edma_common_info *edma_cinfo,
1256 struct edma_adapter *adapter,
1257 struct sk_buff *skb, int queue_id,
1258 unsigned int flags_transmit,
1259 u16 from_cpu, u16 dp_bitmap,
1260 bool packet_is_rstp, int nr_frags)
1261{
1262 struct edma_sw_desc *sw_desc = NULL;
1263 struct platform_device *pdev = edma_cinfo->pdev;
1264 struct edma_tx_desc *tpd = NULL;
1265 struct edma_tx_desc *start_tpd = NULL;
1266 struct sk_buff *iter_skb;
1267 int i;
1268 u32 word1 = 0, word3 = 0, lso_word1 = 0, svlan_tag = 0;
1269 u16 buf_len, lso_desc_len = 0;
1270
1271 if (skb_is_gso(skb)) {
1272 /* TODO: What additional checks need to be performed here */
1273 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
1274 lso_word1 |= EDMA_TPD_IPV4_EN;
1275 ip_hdr(skb)->check = 0;
1276 tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1277 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
1278 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
1279 lso_word1 |= EDMA_TPD_LSO_V2_EN;
1280 ipv6_hdr(skb)->payload_len = 0;
1281 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1282 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
1283 } else
1284 return -EINVAL;
1285
1286 lso_word1 |= EDMA_TPD_LSO_EN | ((skb_shinfo(skb)->gso_size & EDMA_TPD_MSS_MASK) << EDMA_TPD_MSS_SHIFT) |
1287 (skb_transport_offset(skb) << EDMA_TPD_HDR_SHIFT);
1288 } else if (flags_transmit & EDMA_HW_CHECKSUM) {
1289 u8 css, cso;
1290 cso = skb_checksum_start_offset(skb);
1291 css = cso + skb->csum_offset;
1292
1293 word1 |= (EDMA_TPD_CUSTOM_CSUM_EN);
1294 word1 |= (cso >> 1) << EDMA_TPD_HDR_SHIFT;
1295 word1 |= ((css >> 1) << EDMA_TPD_CUSTOM_CSUM_SHIFT);
1296 }
1297
1298 if (skb->protocol == htons(ETH_P_PPP_SES))
1299 word1 |= EDMA_TPD_PPPOE_EN;
1300
1301 if (flags_transmit & EDMA_VLAN_TX_TAG_INSERT_FLAG) {
1302 switch (skb->vlan_proto) {
1303 case htons(ETH_P_8021Q):
1304 word3 |= (1 << EDMA_TX_INS_CVLAN);
1305#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21))
1306 word3 |= vlan_tx_tag_get(skb) << EDMA_TX_CVLAN_TAG_SHIFT;
1307#else
1308 word3 |= skb_vlan_tag_get(skb) << EDMA_TX_CVLAN_TAG_SHIFT;
1309#endif
1310 break;
1311 case htons(ETH_P_8021AD):
1312 word1 |= (1 << EDMA_TX_INS_SVLAN);
1313#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21))
1314 svlan_tag = vlan_tx_tag_get(skb) << EDMA_TX_SVLAN_TAG_SHIFT;
1315#else
1316 svlan_tag = skb_vlan_tag_get(skb) << EDMA_TX_SVLAN_TAG_SHIFT;
1317#endif
1318 break;
1319 default:
1320 dev_err(&pdev->dev, "no ctag or stag present\n");
1321 goto vlan_tag_error;
1322 }
1323 } else if (flags_transmit & EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG) {
1324 word3 |= (1 << EDMA_TX_INS_CVLAN);
1325 word3 |= (adapter->default_vlan_tag) << EDMA_TX_CVLAN_TAG_SHIFT;
1326 }
1327
1328 if (packet_is_rstp) {
1329 word3 |= dp_bitmap << EDMA_TPD_PORT_BITMAP_SHIFT;
1330 word3 |= from_cpu << EDMA_TPD_FROM_CPU_SHIFT;
1331 } else {
1332 word3 |= adapter->dp_bitmap << EDMA_TPD_PORT_BITMAP_SHIFT;
1333 }
1334
1335 buf_len = skb_headlen(skb);
1336
1337 if (lso_word1) {
1338 if (lso_word1 & EDMA_TPD_LSO_V2_EN) {
1339
1340 /* IPv6 LSOv2 descriptor */
1341 start_tpd = tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1342 sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1343 sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_NONE;
1344
1345 /* LSOv2 descriptor overrides addr field to pass length */
1346 tpd->addr = cpu_to_le16(skb->len);
1347 tpd->svlan_tag = svlan_tag;
1348 tpd->word1 = word1 | lso_word1;
1349 tpd->word3 = word3;
1350 }
1351
1352 tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1353 if (!start_tpd)
1354 start_tpd = tpd;
1355 sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1356
1357 /* The last buffer info contain the skb address,
1358 * so skb will be freed after unmap
1359 */
1360 sw_desc->length = lso_desc_len;
1361 sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_HEAD;
1362
1363 sw_desc->dma = dma_map_single(&adapter->pdev->dev,
1364 skb->data, buf_len, DMA_TO_DEVICE);
1365 if (dma_mapping_error(&pdev->dev, sw_desc->dma))
1366 goto dma_error;
1367
1368 tpd->addr = cpu_to_le32(sw_desc->dma);
1369 tpd->len = cpu_to_le16(buf_len);
1370
1371 tpd->svlan_tag = svlan_tag;
1372 tpd->word1 = word1 | lso_word1;
1373 tpd->word3 = word3;
1374
1375 /* The last buffer info contain the skb address,
1376 * so it will be freed after unmap
1377 */
1378 sw_desc->length = lso_desc_len;
1379 sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_HEAD;
1380
1381 buf_len = 0;
1382 }
1383
1384 if (likely(buf_len)) {
1385
1386 /* TODO Do not dequeue descriptor if there is a potential error */
1387 tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1388
1389 if (!start_tpd)
1390 start_tpd = tpd;
1391
1392 sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1393
1394 /* The last buffer info contain the skb address,
1395 * so it will be free after unmap
1396 */
1397 sw_desc->length = buf_len;
1398 sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_HEAD;
1399 sw_desc->dma = dma_map_single(&adapter->pdev->dev,
1400 skb->data, buf_len, DMA_TO_DEVICE);
1401 if (dma_mapping_error(&pdev->dev, sw_desc->dma))
1402 goto dma_error;
1403
1404 tpd->addr = cpu_to_le32(sw_desc->dma);
1405 tpd->len = cpu_to_le16(buf_len);
1406
1407 tpd->svlan_tag = svlan_tag;
1408 tpd->word1 = word1 | lso_word1;
1409 tpd->word3 = word3;
1410 }
1411
1412 i = 0;
1413
1414 /* Walk through paged frags for head skb */
1415 while (nr_frags--) {
1416 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1417 buf_len = skb_frag_size(frag);
1418 tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1419 sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1420 sw_desc->length = buf_len;
1421 sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_FRAG;
1422
1423 sw_desc->dma = skb_frag_dma_map(&pdev->dev, frag, 0, buf_len, DMA_TO_DEVICE);
1424
1425 if (dma_mapping_error(NULL, sw_desc->dma))
1426 goto dma_error;
1427
1428 tpd->addr = cpu_to_le32(sw_desc->dma);
1429 tpd->len = cpu_to_le16(buf_len);
1430
1431 tpd->svlan_tag = svlan_tag;
1432 tpd->word1 = word1 | lso_word1;
1433 tpd->word3 = word3;
1434 i++;
1435 }
1436
1437 /* Walk through all fraglist skbs */
1438 skb_walk_frags(skb, iter_skb) {
1439 buf_len = iter_skb->len;
1440 tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1441 sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1442 sw_desc->length = buf_len;
1443 sw_desc->dma = dma_map_single(&adapter->pdev->dev,
1444 iter_skb->data, buf_len, DMA_TO_DEVICE);
1445
1446 if (dma_mapping_error(NULL, sw_desc->dma))
1447 goto dma_error;
1448
1449 tpd->addr = cpu_to_le32(sw_desc->dma);
1450 tpd->len = cpu_to_le16(buf_len);
1451 tpd->svlan_tag = svlan_tag;
1452 tpd->word1 = word1 | lso_word1;
1453 tpd->word3 = word3;
1454 sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_FRAGLIST;
1455
1456 i = 0;
1457
1458 nr_frags = skb_shinfo(iter_skb)->nr_frags;
1459
1460 /* Walk through paged frags for this fraglist skb */
1461 while (nr_frags--) {
1462 skb_frag_t *frag = &skb_shinfo(iter_skb)->frags[i];
1463 buf_len = skb_frag_size(frag);
1464 tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1465 sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1466 sw_desc->length = buf_len;
1467 sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_FRAG;
1468
1469 sw_desc->dma = skb_frag_dma_map(&pdev->dev, frag,
1470 0, buf_len, DMA_TO_DEVICE);
1471 if (dma_mapping_error(NULL, sw_desc->dma))
1472 goto dma_error;
1473
1474 tpd->addr = cpu_to_le32(sw_desc->dma);
1475 tpd->len = cpu_to_le16(buf_len);
1476 tpd->svlan_tag = svlan_tag;
1477 tpd->word1 = word1 | lso_word1;
1478 tpd->word3 = word3;
1479 i++;
1480 }
1481 }
1482
Rakesh Nair888af952017-06-30 18:41:58 +05301483 /* If sysctl support for per-precedence stats are enabled */
1484 if (edma_per_prec_stats_enable) {
Rakesh Nair7e053532017-08-18 17:53:25 +05301485 uint8_t precedence = 0;
Rakesh Nair888af952017-06-30 18:41:58 +05301486
Rakesh Nair7e053532017-08-18 17:53:25 +05301487 if(!edma_get_skb_precedence(skb, &precedence)) {
Rakesh Nair888af952017-06-30 18:41:58 +05301488 /* Increment per-precedence counters for tx packets
1489 * and set the precedence in the TPD.
1490 */
1491 edma_cinfo->edma_ethstats.tx_prec[precedence]++;
1492 edma_cinfo->edma_ethstats.tx_ac[edma_dscp2ac_tbl[precedence]]++;
Rakesh Nairdadf1fb2017-09-07 11:58:28 +05301493 if (tpd)
1494 tpd->word3 |= precedence << EDMA_TPD_PRIO_SHIFT;
Rakesh Nair888af952017-06-30 18:41:58 +05301495 }
Rakesh Nair1c6a18c2017-08-02 21:27:06 +05301496
1497 /* If sysctl support for IAD stats are enabled */
1498 if (edma_iad_stats_enable) {
1499 if (edma_dscp2ac_tbl[precedence] == EDMA_AC_VI)
1500 edma_iad_process_flow(edma_cinfo, skb, EDMA_EGRESS_DIR, precedence);
1501 }
Rakesh Nair888af952017-06-30 18:41:58 +05301502 }
1503
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301504 /* If tpd or sw_desc is still unitiialized then we need to return */
1505 if ((!tpd) || (!sw_desc))
1506 return -EINVAL;
1507
1508 tpd->word1 |= 1 << EDMA_TPD_EOP_SHIFT;
1509
1510 sw_desc->skb = skb;
1511 sw_desc->flags |= EDMA_SW_DESC_FLAG_LAST;
1512
1513 return 0;
1514
1515dma_error:
1516 edma_rollback_tx(adapter, start_tpd, queue_id);
1517 dev_err(&pdev->dev, "TX DMA map failed\n");
1518vlan_tag_error:
1519 return -ENOMEM;
1520}
1521
1522/* edma_check_link()
1523 * check Link status
1524 */
1525static int edma_check_link(struct edma_adapter *adapter)
1526{
1527 struct phy_device *phydev = adapter->phydev;
1528
1529 if (!(adapter->poll_required))
1530 return __EDMA_LINKUP;
1531
1532 if (phydev->link)
1533 return __EDMA_LINKUP;
1534
1535 return __EDMA_LINKDOWN;
1536}
1537
1538/* edma_adjust_link()
1539 * check for edma link status
1540 */
1541void edma_adjust_link(struct net_device *netdev)
1542{
1543 int status;
1544 struct edma_adapter *adapter = netdev_priv(netdev);
1545 struct phy_device *phydev = adapter->phydev;
1546
1547 if (!test_bit(__EDMA_UP, &adapter->state_flags))
1548 return;
1549
1550 status = edma_check_link(adapter);
1551
1552 if (status == __EDMA_LINKUP && adapter->link_state == __EDMA_LINKDOWN) {
1553 dev_info(&adapter->pdev->dev, "%s: GMAC Link is up with phy_speed=%d\n", netdev->name, phydev->speed);
1554 adapter->link_state = __EDMA_LINKUP;
1555 netif_carrier_on(netdev);
1556 if (netif_running(netdev))
1557 netif_tx_wake_all_queues(netdev);
1558 } else if (status == __EDMA_LINKDOWN && adapter->link_state == __EDMA_LINKUP) {
1559 dev_info(&adapter->pdev->dev, "%s: GMAC Link is down\n", netdev->name);
1560 adapter->link_state = __EDMA_LINKDOWN;
1561 netif_carrier_off(netdev);
1562 netif_tx_stop_all_queues(netdev);
1563 }
1564}
1565
Bhaskar Valabojue429bab2017-03-15 09:01:23 +05301566/* edma_get_stats64()
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301567 * Statistics api used to retreive the tx/rx statistics
1568 */
Bhaskar Valabojue429bab2017-03-15 09:01:23 +05301569struct rtnl_link_stats64 *edma_get_stats64(struct net_device *netdev,
1570 struct rtnl_link_stats64 *stats)
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301571{
1572 struct edma_adapter *adapter = netdev_priv(netdev);
1573
Bhaskar Valabojue429bab2017-03-15 09:01:23 +05301574 memcpy(stats, &adapter->stats, sizeof(*stats));
1575
1576 return stats;
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301577}
1578
1579/* edma_xmit()
1580 * Main api to be called by the core for packet transmission
1581 */
1582netdev_tx_t edma_xmit(struct sk_buff *skb,
1583 struct net_device *net_dev)
1584{
1585 struct edma_adapter *adapter = netdev_priv(net_dev);
1586 struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
1587 struct edma_tx_desc_ring *etdr;
1588 u16 from_cpu = 0, dp_bitmap = 0, txq_id;
1589 int ret, nr_frags_first = 0, num_tpds_needed = 1, queue_id = 0;
1590 unsigned int flags_transmit = 0;
1591 bool packet_is_rstp = false;
1592 struct netdev_queue *nq = NULL;
1593
1594 if (skb_shinfo(skb)->nr_frags) {
1595 nr_frags_first = skb_shinfo(skb)->nr_frags;
1596
1597 /* It is unlikely below check hits, BUG_ON */
1598 BUG_ON(nr_frags_first > MAX_SKB_FRAGS);
1599
1600 num_tpds_needed += nr_frags_first;
1601 }
1602
1603 if (skb_has_frag_list(skb)) {
1604 struct sk_buff *iter_skb;
1605
1606 /* Walk through fraglist skbs making a note of nr_frags */
1607 skb_walk_frags(skb, iter_skb) {
1608 unsigned char nr_frags = skb_shinfo(iter_skb)->nr_frags;
1609
1610 /* It is unlikely below check hits, BUG_ON */
1611 BUG_ON(nr_frags > MAX_SKB_FRAGS);
1612
1613 /* One TPD for skb->data and more for nr_frags */
1614 num_tpds_needed += (1 + nr_frags);
1615 }
1616 }
1617
1618 if (edma_stp_rstp) {
1619 u16 ath_hdr, ath_eth_type;
1620 u8 mac_addr[EDMA_ETH_HDR_LEN];
1621 ath_eth_type = ntohs(*(uint16_t *)&skb->data[12]);
1622 if (ath_eth_type == edma_ath_eth_type) {
1623 packet_is_rstp = true;
1624 ath_hdr = htons(*(uint16_t *)&skb->data[14]);
1625 dp_bitmap = ath_hdr & EDMA_TX_ATH_HDR_PORT_BITMAP_MASK;
1626 from_cpu = (ath_hdr & EDMA_TX_ATH_HDR_FROM_CPU_MASK) >> EDMA_TX_ATH_HDR_FROM_CPU_SHIFT;
1627 memcpy(mac_addr, skb->data, EDMA_ETH_HDR_LEN);
1628
1629 skb_pull(skb, 4);
1630
1631 memcpy(skb->data, mac_addr, EDMA_ETH_HDR_LEN);
1632 }
1633 }
1634
1635 /* this will be one of the 4 TX queues exposed to linux kernel */
1636 txq_id = skb_get_queue_mapping(skb);
1637 queue_id = edma_tx_queue_get(adapter, skb, txq_id);
1638 etdr = edma_cinfo->tpd_ring[queue_id];
1639 nq = netdev_get_tx_queue(net_dev, txq_id);
1640
1641 local_bh_disable();
1642 /* Tx is not handled in bottom half context. Hence, we need to protect
1643 * Tx from tasks and bottom half
1644 */
1645
1646 if (num_tpds_needed > edma_tpd_available(edma_cinfo, queue_id)) {
1647 /* not enough descriptor, just stop queue */
1648 netif_tx_stop_queue(nq);
1649 local_bh_enable();
1650 dev_dbg(&net_dev->dev, "Not enough descriptors available");
1651 edma_cinfo->edma_ethstats.tx_desc_error++;
1652 return NETDEV_TX_BUSY;
1653 }
1654
1655 /* Check and mark VLAN tag offload */
1656#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21))
1657 if (vlan_tx_tag_present(skb))
1658#else
1659 if (skb_vlan_tag_present(skb))
1660#endif
1661 flags_transmit |= EDMA_VLAN_TX_TAG_INSERT_FLAG;
1662 else if (adapter->default_vlan_tag)
1663 flags_transmit |= EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG;
1664
1665 /* Check and mark checksum offload */
1666 if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
1667 flags_transmit |= EDMA_HW_CHECKSUM;
1668
1669 /* Map and fill descriptor for Tx */
1670 ret = edma_tx_map_and_fill(edma_cinfo, adapter, skb, queue_id,
1671 flags_transmit, from_cpu, dp_bitmap,
1672 packet_is_rstp, nr_frags_first);
1673 if (ret) {
1674 dev_kfree_skb_any(skb);
1675 adapter->stats.tx_errors++;
1676 goto netdev_okay;
1677 }
1678
1679 /* Update SW producer index */
1680 edma_tx_update_hw_idx(edma_cinfo, skb, queue_id);
1681
1682 /* update tx statistics */
1683 adapter->stats.tx_packets++;
1684 adapter->stats.tx_bytes += skb->len;
1685
1686netdev_okay:
1687 local_bh_enable();
1688 return NETDEV_TX_OK;
1689}
1690
1691/*
1692 * edma_flow_may_expire()
1693 * Timer function called periodically to delete the node
1694 */
1695void edma_flow_may_expire(unsigned long data)
1696{
1697 struct edma_adapter *adapter = (struct edma_adapter *)data;
1698 int j;
1699
1700 spin_lock_bh(&adapter->rfs.rfs_ftab_lock);
1701 for (j = 0; j < EDMA_RFS_EXPIRE_COUNT_PER_CALL; j++) {
1702 struct hlist_head *hhead;
1703 struct hlist_node *tmp;
1704 struct edma_rfs_filter_node *n;
1705 bool res;
1706
1707 hhead = &adapter->rfs.hlist_head[adapter->rfs.hashtoclean++];
1708 hlist_for_each_entry_safe(n, tmp, hhead, node) {
1709 res = rps_may_expire_flow(adapter->netdev, n->rq_id,
1710 n->flow_id, n->filter_id);
1711 if (res) {
1712 res = edma_delete_rfs_filter(adapter, n);
1713 if (res < 0)
1714 dev_dbg(&adapter->netdev->dev,
1715 "RFS entry %d not allowed to be flushed by Switch",
1716 n->flow_id);
1717 else {
1718 hlist_del(&n->node);
1719 kfree(n);
1720 adapter->rfs.filter_available++;
1721 }
1722 }
1723 }
1724 }
1725
1726 adapter->rfs.hashtoclean = adapter->rfs.hashtoclean & (EDMA_RFS_FLOW_ENTRIES - 1);
1727 spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
1728 mod_timer(&adapter->rfs.expire_rfs, jiffies + HZ/4);
1729}
1730
1731/* edma_rx_flow_steer()
1732 * Called by core to to steer the flow to CPU
1733 */
1734int edma_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
1735 u16 rxq, u32 flow_id)
1736{
1737 struct flow_keys keys;
1738 struct edma_rfs_filter_node *filter_node;
1739 struct edma_adapter *adapter = netdev_priv(dev);
1740 u16 hash_tblid;
1741 int res;
1742
1743 if (skb->protocol == htons(ETH_P_IPV6)) {
1744 res = -EPROTONOSUPPORT;
1745 goto no_protocol_err;
1746 }
1747
1748 /* Dissect flow parameters
1749 * We only support IPv4 + TCP/UDP
1750 */
1751#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21))
1752 res = skb_flow_dissect(skb, &keys);
1753 if (!((keys.ip_proto == IPPROTO_TCP) || (keys.ip_proto == IPPROTO_UDP))) {
1754#else
1755 res = skb_flow_dissect_flow_keys(skb, &keys, 0);
1756 if (!((keys.basic.ip_proto == IPPROTO_TCP) || (keys.basic.ip_proto == IPPROTO_UDP))) {
1757#endif
1758 res = -EPROTONOSUPPORT;
1759 goto no_protocol_err;
1760 }
1761
1762 /* Check if table entry exists */
1763 hash_tblid = skb_get_hash_raw(skb) & EDMA_RFS_FLOW_ENTRIES_MASK;
1764
1765 spin_lock_bh(&adapter->rfs.rfs_ftab_lock);
1766 filter_node = edma_rfs_key_search(&adapter->rfs.hlist_head[hash_tblid], &keys);
1767
1768 if (filter_node) {
1769 if (rxq == filter_node->rq_id) {
1770 res = -EEXIST;
1771 goto out;
1772 } else {
1773 res = edma_delete_rfs_filter(adapter, filter_node);
1774 if (res < 0)
1775 dev_warn(&adapter->netdev->dev,
1776 "Cannot steer flow %d to different queue",
1777 filter_node->flow_id);
1778 else {
1779 adapter->rfs.filter_available++;
1780 res = edma_add_rfs_filter(adapter, &keys, rxq, filter_node);
1781 if (res < 0) {
1782 dev_warn(&adapter->netdev->dev,
1783 "Cannot steer flow %d to different queue",
1784 filter_node->flow_id);
1785 } else {
1786 adapter->rfs.filter_available--;
1787 filter_node->rq_id = rxq;
1788 filter_node->filter_id = res;
1789 }
1790 }
1791 }
1792 } else {
1793 if (adapter->rfs.filter_available == 0) {
1794 res = -EBUSY;
1795 goto out;
1796 }
1797
1798 filter_node = kmalloc(sizeof(*filter_node), GFP_ATOMIC);
1799 if (!filter_node) {
1800 res = -ENOMEM;
1801 goto out;
1802 }
1803
1804 res = edma_add_rfs_filter(adapter, &keys, rxq, filter_node);
1805 if (res < 0) {
1806 kfree(filter_node);
1807 goto out;
1808 }
1809
1810 adapter->rfs.filter_available--;
1811 filter_node->rq_id = rxq;
1812 filter_node->filter_id = res;
1813 filter_node->flow_id = flow_id;
1814 filter_node->keys = keys;
1815 INIT_HLIST_NODE(&filter_node->node);
1816 hlist_add_head(&filter_node->node, &adapter->rfs.hlist_head[hash_tblid]);
1817 }
1818
1819out:
1820 spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
1821no_protocol_err:
1822 return res;
1823}
1824
1825#ifdef CONFIG_RFS_ACCEL
1826/* edma_register_rfs_filter()
1827 * Add RFS filter callback
1828 */
1829int edma_register_rfs_filter(struct net_device *netdev,
1830 set_rfs_filter_callback_t set_filter)
1831{
1832 struct edma_adapter *adapter = netdev_priv(netdev);
1833
1834 spin_lock_bh(&adapter->rfs.rfs_ftab_lock);
1835
1836 if (adapter->set_rfs_rule) {
1837 spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
1838 return -1;
1839 }
1840
1841 adapter->set_rfs_rule = set_filter;
1842 spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
1843
1844 return 0;
1845}
1846#endif
1847
1848/* edma_select_xps_queue()
1849 * Called by Linux TX stack to populate Linux TX queue
1850 */
1851u16 edma_select_xps_queue(struct net_device *dev, struct sk_buff *skb,
1852 void *accel_priv, select_queue_fallback_t fallback)
1853{
1854#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21))
1855 return smp_processor_id();
1856#else
1857 int cpu = get_cpu();
1858 put_cpu();
1859
1860 return cpu;
1861#endif
1862}
1863
1864/* edma_alloc_tx_rings()
1865 * Allocate rx rings
1866 */
1867int edma_alloc_tx_rings(struct edma_common_info *edma_cinfo)
1868{
1869 struct platform_device *pdev = edma_cinfo->pdev;
1870 int i, err = 0;
1871
1872 for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
1873 err = edma_alloc_tx_ring(edma_cinfo, edma_cinfo->tpd_ring[i]);
1874 if (err) {
1875 dev_err(&pdev->dev, "Tx Queue alloc %u failed\n", i);
1876 return err;
1877 }
1878 }
1879
1880 return 0;
1881}
1882
1883/* edma_free_tx_rings()
1884 * Free tx rings
1885 */
1886void edma_free_tx_rings(struct edma_common_info *edma_cinfo)
1887{
1888 int i;
1889
1890 for (i = 0; i < edma_cinfo->num_tx_queues; i++)
1891 edma_free_tx_ring(edma_cinfo, edma_cinfo->tpd_ring[i]);
1892}
1893
1894/* edma_free_tx_resources()
1895 * Free buffers associated with tx rings
1896 */
1897void edma_free_tx_resources(struct edma_common_info *edma_cinfo)
1898{
1899 struct edma_tx_desc_ring *etdr;
1900 struct edma_sw_desc *sw_desc;
1901 struct platform_device *pdev = edma_cinfo->pdev;
1902 int i, j;
1903
1904 for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
1905 etdr = edma_cinfo->tpd_ring[i];
1906 for (j = 0; j < EDMA_TX_RING_SIZE; j++) {
1907 sw_desc = &etdr->sw_desc[j];
1908 if (sw_desc->flags & (EDMA_SW_DESC_FLAG_SKB_HEAD |
1909 EDMA_SW_DESC_FLAG_SKB_FRAG | EDMA_SW_DESC_FLAG_SKB_FRAGLIST))
1910 edma_tx_unmap_and_free(pdev, sw_desc);
1911 }
1912 }
1913}
1914
1915/* edma_alloc_rx_rings()
1916 * Allocate rx rings
1917 */
1918int edma_alloc_rx_rings(struct edma_common_info *edma_cinfo)
1919{
1920 struct platform_device *pdev = edma_cinfo->pdev;
1921 int i, j, err = 0;
1922
1923 for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
1924 err = edma_alloc_rx_ring(edma_cinfo, edma_cinfo->rfd_ring[j]);
1925 if (err) {
1926 dev_err(&pdev->dev, "Rx Queue alloc%u failed\n", i);
1927 return err;
1928 }
1929 j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1930 }
1931
1932 return 0;
1933}
1934
1935/* edma_free_rx_rings()
1936 * free rx rings
1937 */
1938void edma_free_rx_rings(struct edma_common_info *edma_cinfo)
1939{
1940 int i, j;
1941
1942 for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
1943 edma_free_rx_ring(edma_cinfo, edma_cinfo->rfd_ring[j]);
1944 j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1945 }
1946}
1947
1948/* edma_free_queues()
1949 * Free the queues allocaated
1950 */
1951void edma_free_queues(struct edma_common_info *edma_cinfo)
1952{
1953 int i , j;
1954
1955 for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
1956 if (edma_cinfo->tpd_ring[i])
1957 kfree(edma_cinfo->tpd_ring[i]);
1958 edma_cinfo->tpd_ring[i] = NULL;
1959 }
1960
1961 for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
1962 if (edma_cinfo->rfd_ring[j])
1963 kfree(edma_cinfo->rfd_ring[j]);
1964 edma_cinfo->rfd_ring[j] = NULL;
1965 j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1966 }
1967
1968 edma_cinfo->num_rx_queues = 0;
1969 edma_cinfo->num_tx_queues = 0;
1970
1971 return;
1972}
1973
1974/* edma_free_rx_resources()
1975 * Free buffers associated with tx rings
1976 */
1977void edma_free_rx_resources(struct edma_common_info *edma_cinfo)
1978{
1979 struct edma_rfd_desc_ring *erdr;
1980 struct platform_device *pdev = edma_cinfo->pdev;
1981 int i, j, k;
1982
1983 for (i = 0, k = 0; i < edma_cinfo->num_rx_queues; i++) {
1984 erdr = edma_cinfo->rfd_ring[k];
1985 for (j = 0; j < EDMA_RX_RING_SIZE; j++) {
1986 /* unmap all descriptors while cleaning */
1987 edma_clean_rfd(pdev, erdr, j, 1);
1988 }
1989 k += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1990
1991 }
1992}
1993
1994/* edma_alloc_queues_tx()
1995 * Allocate memory for all rings
1996 */
1997int edma_alloc_queues_tx(struct edma_common_info *edma_cinfo)
1998{
1999 int i;
2000
2001 for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
2002 struct edma_tx_desc_ring *etdr;
2003 etdr = kzalloc(sizeof(struct edma_tx_desc_ring), GFP_KERNEL);
2004 if (!etdr)
2005 goto err;
2006 etdr->count = edma_cinfo->tx_ring_count;
2007 edma_cinfo->tpd_ring[i] = etdr;
2008 }
2009
2010 return 0;
2011err:
2012 edma_free_queues(edma_cinfo);
2013 return -1;
2014}
2015
2016/* edma_alloc_queues_rx()
2017 * Allocate memory for all rings
2018 */
2019int edma_alloc_queues_rx(struct edma_common_info *edma_cinfo)
2020{
2021 int i, j;
2022
2023 for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
2024 struct edma_rfd_desc_ring *rfd_ring;
2025 rfd_ring = kzalloc(sizeof(struct edma_rfd_desc_ring),
2026 GFP_KERNEL);
2027 if (!rfd_ring)
2028 goto err;
2029 rfd_ring->count = edma_cinfo->rx_ring_count;
2030 edma_cinfo->rfd_ring[j] = rfd_ring;
2031 j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
2032 }
2033 return 0;
2034err:
2035 edma_free_queues(edma_cinfo);
2036 return -1;
2037}
2038
2039/* edma_clear_irq_status()
2040 * Clear interrupt status
2041 */
2042void edma_clear_irq_status(void)
2043{
2044 edma_write_reg(EDMA_REG_RX_ISR, 0xff);
2045 edma_write_reg(EDMA_REG_TX_ISR, 0xffff);
2046 edma_write_reg(EDMA_REG_MISC_ISR, 0x1fff);
2047 edma_write_reg(EDMA_REG_WOL_ISR, 0x1);
2048};
2049
2050/* edma_configure()
2051 * Configure skb, edma interrupts and control register.
2052 */
2053int edma_configure(struct edma_common_info *edma_cinfo)
2054{
2055 struct edma_hw *hw = &edma_cinfo->hw;
2056 u32 intr_modrt_data;
2057 u32 intr_ctrl_data = 0;
2058 int i, j, ret_count;
2059
2060 edma_read_reg(EDMA_REG_INTR_CTRL, &intr_ctrl_data);
2061 intr_ctrl_data &= ~(1 << EDMA_INTR_SW_IDX_W_TYP_SHIFT);
2062 intr_ctrl_data |= hw->intr_sw_idx_w << EDMA_INTR_SW_IDX_W_TYP_SHIFT;
2063 edma_write_reg(EDMA_REG_INTR_CTRL, intr_ctrl_data);
2064
2065 edma_clear_irq_status();
2066
2067 /* Clear any WOL status */
2068 edma_write_reg(EDMA_REG_WOL_CTRL, 0);
2069 intr_modrt_data = (EDMA_TX_IMT << EDMA_IRQ_MODRT_TX_TIMER_SHIFT);
2070 intr_modrt_data |= (EDMA_RX_IMT << EDMA_IRQ_MODRT_RX_TIMER_SHIFT);
2071 edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, intr_modrt_data);
2072 edma_configure_tx(edma_cinfo);
2073 edma_configure_rx(edma_cinfo);
2074
2075 /* Allocate the RX buffer */
2076 for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
2077 struct edma_rfd_desc_ring *ring = edma_cinfo->rfd_ring[j];
2078 ret_count = edma_alloc_rx_buf(edma_cinfo, ring, ring->count, j);
2079 if (ret_count)
2080 dev_dbg(&edma_cinfo->pdev->dev, "not all rx buffers allocated\n");
Rakesh Nair03b586c2017-04-03 18:28:58 +05302081 ring->pending_fill = ret_count;
Rakesh Nair9bcf2602017-01-06 16:02:16 +05302082 j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
2083 }
2084
2085 /* Configure descriptor Ring */
2086 edma_init_desc(edma_cinfo);
2087 return 0;
2088}
2089
2090/* edma_irq_enable()
2091 * Enable default interrupt generation settings
2092 */
2093void edma_irq_enable(struct edma_common_info *edma_cinfo)
2094{
2095 struct edma_hw *hw = &edma_cinfo->hw;
2096 int i, j;
2097
2098 edma_write_reg(EDMA_REG_RX_ISR, 0xff);
2099 for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
2100 edma_write_reg(EDMA_REG_RX_INT_MASK_Q(j), hw->rx_intr_mask);
2101 j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
2102 }
2103 edma_write_reg(EDMA_REG_TX_ISR, 0xffff);
2104 for (i = 0; i < edma_cinfo->num_tx_queues; i++)
2105 edma_write_reg(EDMA_REG_TX_INT_MASK_Q(i), hw->tx_intr_mask);
2106}
2107
2108/* edma_irq_disable()
2109 * Disable Interrupt
2110 */
2111void edma_irq_disable(struct edma_common_info *edma_cinfo)
2112{
2113 int i;
2114
2115 for (i = 0; i < EDMA_MAX_RECEIVE_QUEUE; i++)
2116 edma_write_reg(EDMA_REG_RX_INT_MASK_Q(i), 0x0);
2117
2118 for (i = 0; i < EDMA_MAX_TRANSMIT_QUEUE; i++)
2119 edma_write_reg(EDMA_REG_TX_INT_MASK_Q(i), 0x0);
2120 edma_write_reg(EDMA_REG_MISC_IMR, 0);
2121 edma_write_reg(EDMA_REG_WOL_IMR, 0);
2122}
2123
2124/* edma_free_irqs()
2125 * Free All IRQs
2126 */
2127void edma_free_irqs(struct edma_adapter *adapter)
2128{
2129 struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
2130 int i, j;
2131 int k = ((edma_cinfo->num_rx_queues == 4) ? 1 : 2);
2132
2133 for (i = 0; i < CONFIG_NR_CPUS; i++) {
2134 for (j = edma_cinfo->edma_percpu_info[i].tx_start; j < (edma_cinfo->edma_percpu_info[i].tx_start + 4); j++)
2135 free_irq(edma_cinfo->tx_irq[j], &edma_cinfo->edma_percpu_info[i]);
2136
2137 for (j = edma_cinfo->edma_percpu_info[i].rx_start; j < (edma_cinfo->edma_percpu_info[i].rx_start + k); j++)
2138 free_irq(edma_cinfo->rx_irq[j], &edma_cinfo->edma_percpu_info[i]);
2139 }
2140}
2141
2142/* edma_enable_rx_ctrl()
2143 * Enable RX queue control
2144 */
2145void edma_enable_rx_ctrl(struct edma_hw *hw)
2146{
2147 u32 data;
2148
2149 edma_read_reg(EDMA_REG_RXQ_CTRL, &data);
2150 data |= EDMA_RXQ_CTRL_EN;
2151 edma_write_reg(EDMA_REG_RXQ_CTRL, data);
2152}
2153
2154
2155/* edma_enable_tx_ctrl()
2156 * Enable TX queue control
2157 */
2158void edma_enable_tx_ctrl(struct edma_hw *hw)
2159{
2160 u32 data;
2161
2162 edma_read_reg(EDMA_REG_TXQ_CTRL, &data);
2163 data |= EDMA_TXQ_CTRL_TXQ_EN;
2164 edma_write_reg(EDMA_REG_TXQ_CTRL, data);
2165}
2166
2167/* edma_stop_rx_tx()
2168 * Disable RX/TQ Queue control
2169 */
2170void edma_stop_rx_tx(struct edma_hw *hw)
2171{
2172 u32 data;
2173
2174 edma_read_reg(EDMA_REG_RXQ_CTRL, &data);
2175 data &= ~EDMA_RXQ_CTRL_EN;
2176 edma_write_reg(EDMA_REG_RXQ_CTRL, data);
2177 edma_read_reg(EDMA_REG_TXQ_CTRL, &data);
2178 data &= ~EDMA_TXQ_CTRL_TXQ_EN;
2179 edma_write_reg(EDMA_REG_TXQ_CTRL, data);
2180}
2181
2182/* edma_reset()
2183 * Reset the EDMA
2184 */
2185int edma_reset(struct edma_common_info *edma_cinfo)
2186{
2187 struct edma_hw *hw = &edma_cinfo->hw;
2188
2189 edma_irq_disable(edma_cinfo);
2190
2191 edma_clear_irq_status();
2192
2193 edma_stop_rx_tx(hw);
2194
2195 return 0;
2196}
2197
2198/* edma_fill_netdev()
2199 * Fill netdev for each etdr
2200 */
2201int edma_fill_netdev(struct edma_common_info *edma_cinfo, int queue_id,
2202 int dev, int txq_id)
2203{
2204 struct edma_tx_desc_ring *etdr;
2205 int i = 0;
2206
2207 etdr = edma_cinfo->tpd_ring[queue_id];
2208
2209 while (etdr->netdev[i])
2210 i++;
2211
2212 if (i >= EDMA_MAX_NETDEV_PER_QUEUE)
2213 return -1;
2214
2215 /* Populate the netdev associated with the tpd ring */
2216 etdr->netdev[i] = edma_netdev[dev];
2217 etdr->nq[i] = netdev_get_tx_queue(edma_netdev[dev], txq_id);
2218
2219 return 0;
2220}
2221
2222/* edma_change_mtu()
2223 * change the MTU of the NIC.
2224 */
2225int edma_change_mtu(struct net_device *netdev, int new_mtu)
2226{
2227 struct edma_adapter *adapter = netdev_priv(netdev);
2228 struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
2229 int old_mtu = netdev->mtu;
2230 int max_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + (2 * VLAN_HLEN);
2231
2232 if ((max_frame_size < ETH_ZLEN + ETH_FCS_LEN) ||
2233 (max_frame_size > EDMA_MAX_JUMBO_FRAME_SIZE)) {
2234 dev_err(&edma_cinfo->pdev->dev, "MTU setting not correct\n");
2235 return -EINVAL;
2236 }
2237
2238 /* set MTU */
2239 if (old_mtu != new_mtu) {
2240 netdev->mtu = new_mtu;
2241 netdev_update_features(netdev);
2242 }
2243
2244 return 0;
2245}
2246
2247/* edma_set_mac()
2248 * Change the Ethernet Address of the NIC
2249 */
2250int edma_set_mac_addr(struct net_device *netdev, void *p)
2251{
2252 struct sockaddr *addr = p;
2253
2254 if (!is_valid_ether_addr(addr->sa_data))
2255 return -EINVAL;
2256
2257 if (netif_running(netdev))
2258 return -EBUSY;
2259
2260 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2261 return 0;
2262}
2263
2264/* edma_set_stp_rstp()
2265 * set stp/rstp
2266 */
2267void edma_set_stp_rstp(bool rstp)
2268{
2269 edma_stp_rstp = rstp;
2270}
2271
2272/* edma_assign_ath_hdr_type()
2273 * assign atheros header eth type
2274 */
2275void edma_assign_ath_hdr_type(int eth_type)
2276{
2277 edma_ath_eth_type = eth_type & EDMA_ETH_TYPE_MASK;
2278}
2279
2280/* edma_get_default_vlan_tag()
2281 * Used by other modules to get the default vlan tag
2282 */
2283int edma_get_default_vlan_tag(struct net_device *netdev)
2284{
2285 struct edma_adapter *adapter = netdev_priv(netdev);
2286
2287 if (adapter->default_vlan_tag)
2288 return adapter->default_vlan_tag;
2289
2290 return 0;
2291}
2292
2293/* edma_open()
2294 * gets called when netdevice is up, start the queue.
2295 */
2296int edma_open(struct net_device *netdev)
2297{
2298 struct edma_adapter *adapter = netdev_priv(netdev);
2299 struct platform_device *pdev = adapter->edma_cinfo->pdev;
2300
2301 netif_tx_start_all_queues(netdev);
2302 edma_initialise_rfs_flow_table(adapter);
2303 set_bit(__EDMA_UP, &adapter->state_flags);
2304
2305 /* if Link polling is enabled, in our case enabled for WAN, then
2306 * do a phy start, else always set link as UP
2307 */
Rakesh Naired29f6b2017-04-04 15:48:08 +05302308 mutex_lock(&adapter->poll_mutex);
Rakesh Nair9bcf2602017-01-06 16:02:16 +05302309 if (adapter->poll_required) {
2310 if (!IS_ERR(adapter->phydev)) {
2311 phy_start(adapter->phydev);
2312 phy_start_aneg(adapter->phydev);
2313 adapter->link_state = __EDMA_LINKDOWN;
2314 } else {
2315 dev_dbg(&pdev->dev, "Invalid PHY device for a link polled interface\n");
2316 }
2317 } else {
2318 adapter->link_state = __EDMA_LINKUP;
2319 netif_carrier_on(netdev);
2320 }
Rakesh Naired29f6b2017-04-04 15:48:08 +05302321 mutex_unlock(&adapter->poll_mutex);
Rakesh Nair9bcf2602017-01-06 16:02:16 +05302322
2323 return 0;
2324}
2325
2326
2327/* edma_close()
2328 * gets called when netdevice is down, stops the queue.
2329 */
2330int edma_close(struct net_device *netdev)
2331{
2332 struct edma_adapter *adapter = netdev_priv(netdev);
2333
2334 edma_free_rfs_flow_table(adapter);
2335 netif_carrier_off(netdev);
2336 netif_tx_stop_all_queues(netdev);
2337
Rakesh Naired29f6b2017-04-04 15:48:08 +05302338 mutex_lock(&adapter->poll_mutex);
Rakesh Nair9bcf2602017-01-06 16:02:16 +05302339 if (adapter->poll_required) {
2340 if (!IS_ERR(adapter->phydev))
2341 phy_stop(adapter->phydev);
2342 }
Rakesh Naired29f6b2017-04-04 15:48:08 +05302343 mutex_unlock(&adapter->poll_mutex);
Rakesh Nair9bcf2602017-01-06 16:02:16 +05302344
2345 adapter->link_state = __EDMA_LINKDOWN;
2346
2347 /* Set GMAC state to UP before link state is checked
2348 */
2349 clear_bit(__EDMA_UP, &adapter->state_flags);
2350
2351 return 0;
2352}
2353
2354/* edma_poll
2355 * polling function that gets called when the napi gets scheduled.
2356 *
2357 * Main sequence of task performed in this api
2358 * is clear irq status -> clear_tx_irq -> clean_rx_irq->
2359 * enable interrupts.
2360 */
2361int edma_poll(struct napi_struct *napi, int budget)
2362{
2363 struct edma_per_cpu_queues_info *edma_percpu_info = container_of(napi,
2364 struct edma_per_cpu_queues_info, napi);
2365 struct edma_common_info *edma_cinfo = edma_percpu_info->edma_cinfo;
2366 u32 reg_data;
2367 u32 shadow_rx_status, shadow_tx_status;
2368 int queue_id;
2369 int i, work_done = 0;
Rakesh Nair03b586c2017-04-03 18:28:58 +05302370 u16 rx_pending_fill;
Rakesh Nair9bcf2602017-01-06 16:02:16 +05302371
2372 /* Store the Rx/Tx status by ANDing it with
2373 * appropriate CPU RX?TX mask
2374 */
2375 edma_read_reg(EDMA_REG_RX_ISR, &reg_data);
2376 edma_percpu_info->rx_status |= reg_data & edma_percpu_info->rx_mask;
2377 shadow_rx_status = edma_percpu_info->rx_status;
2378 edma_read_reg(EDMA_REG_TX_ISR, &reg_data);
2379 edma_percpu_info->tx_status |= reg_data & edma_percpu_info->tx_mask;
2380 shadow_tx_status = edma_percpu_info->tx_status;
2381
2382 /* Every core will have a start, which will be computed
2383 * in probe and stored in edma_percpu_info->tx_start variable.
2384 * We will shift the status bit by tx_start to obtain
2385 * status bits for the core on which the current processing
2386 * is happening. Since, there are 4 tx queues per core,
2387 * we will run the loop till we get the correct queue to clear.
2388 */
2389 while (edma_percpu_info->tx_status) {
2390 queue_id = ffs(edma_percpu_info->tx_status) - 1;
2391 edma_tx_complete(edma_cinfo, queue_id);
2392 edma_percpu_info->tx_status &= ~(1 << queue_id);
2393 }
2394
2395 /* Every core will have a start, which will be computed
2396 * in probe and stored in edma_percpu_info->tx_start variable.
2397 * We will shift the status bit by tx_start to obtain
2398 * status bits for the core on which the current processing
2399 * is happening. Since, there are 4 tx queues per core, we
2400 * will run the loop till we get the correct queue to clear.
2401 */
2402 while (edma_percpu_info->rx_status) {
2403 queue_id = ffs(edma_percpu_info->rx_status) - 1;
Rakesh Nair03b586c2017-04-03 18:28:58 +05302404 rx_pending_fill = edma_rx_complete(edma_cinfo, &work_done,
Rakesh Nair9bcf2602017-01-06 16:02:16 +05302405 budget, queue_id, napi);
2406
Rakesh Nair03b586c2017-04-03 18:28:58 +05302407 if (likely(work_done < budget)) {
2408 if (rx_pending_fill) {
2409 work_done = budget;
2410 break;
2411 }
2412
Rakesh Nair9bcf2602017-01-06 16:02:16 +05302413 edma_percpu_info->rx_status &= ~(1 << queue_id);
Rakesh Nair03b586c2017-04-03 18:28:58 +05302414 }
Rakesh Nair9bcf2602017-01-06 16:02:16 +05302415 else
2416 break;
2417 }
2418
2419 /* Clear the status register, to avoid the interrupts to
2420 * reoccur.This clearing of interrupt status register is
2421 * done here as writing to status register only takes place
2422 * once the producer/consumer index has been updated to
2423 * reflect that the packet transmission/reception went fine.
2424 */
2425 edma_write_reg(EDMA_REG_RX_ISR, shadow_rx_status);
2426 edma_write_reg(EDMA_REG_TX_ISR, shadow_tx_status);
2427
2428 /* If budget not fully consumed, exit the polling mode */
2429 if (likely(work_done < budget)) {
2430 napi_complete(napi);
2431
2432 /* re-enable the interrupts */
2433 for (i = 0; i < edma_cinfo->num_rxq_per_core; i++)
2434 edma_write_reg(EDMA_REG_RX_INT_MASK_Q(edma_percpu_info->rx_start + i), 0x1);
2435 for (i = 0; i < edma_cinfo->num_txq_per_core; i++)
2436 edma_write_reg(EDMA_REG_TX_INT_MASK_Q(edma_percpu_info->tx_start + i), 0x1);
2437 }
2438
2439 return work_done;
2440}
2441
2442/* edma interrupt()
2443 * interrupt handler
2444 */
2445irqreturn_t edma_interrupt(int irq, void *dev)
2446{
2447 struct edma_per_cpu_queues_info *edma_percpu_info = (struct edma_per_cpu_queues_info *) dev;
2448 struct edma_common_info *edma_cinfo = edma_percpu_info->edma_cinfo;
2449 int i;
2450
2451 /* Unmask the TX/RX interrupt register */
2452 for (i = 0; i < edma_cinfo->num_rxq_per_core; i++)
2453 edma_write_reg(EDMA_REG_RX_INT_MASK_Q(edma_percpu_info->rx_start + i), 0x0);
2454
2455 for (i = 0; i < edma_cinfo->num_txq_per_core; i++)
2456 edma_write_reg(EDMA_REG_TX_INT_MASK_Q(edma_percpu_info->tx_start + i), 0x0);
2457
2458 napi_schedule(&edma_percpu_info->napi);
2459
2460 return IRQ_HANDLED;
2461}