blob: 30644a8091884b4b1b2fed8a414b4dc796654f1d [file] [log] [blame]
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301/*
2 * Copyright (c) 2014 - 2017, The Linux Foundation. All rights reserved.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14 */
15
16#include <linux/platform_device.h>
17#include <linux/if_vlan.h>
Rakesh Nair888af952017-06-30 18:41:58 +053018#include <linux/kernel.h>
Rakesh Nair9bcf2602017-01-06 16:02:16 +053019#include "ess_edma.h"
20#include "edma.h"
21
22extern struct net_device *edma_netdev[EDMA_MAX_PORTID_SUPPORTED];
23bool edma_stp_rstp;
24u16 edma_ath_eth_type;
Rakesh Nair888af952017-06-30 18:41:58 +053025extern u8 edma_dscp2ac_tbl[EDMA_PRECEDENCE_MAX];
26extern u8 edma_per_prec_stats_enable;
Rakesh Nair1c6a18c2017-08-02 21:27:06 +053027extern u32 edma_iad_stats_enable;
Rakesh Nair9bcf2602017-01-06 16:02:16 +053028
29/* edma_skb_priority_offset()
30 * get edma skb priority
31 */
32static unsigned int edma_skb_priority_offset(struct sk_buff *skb)
33{
34 return (skb->priority >> 2) & 1;
35}
36
37/* edma_alloc_tx_ring()
38 * Allocate Tx descriptors ring
39 */
40static int edma_alloc_tx_ring(struct edma_common_info *edma_cinfo,
41 struct edma_tx_desc_ring *etdr)
42{
43 struct platform_device *pdev = edma_cinfo->pdev;
44 u16 sw_size = sizeof(struct edma_sw_desc) * etdr->count;
45
46 /* Initialize ring */
47 etdr->size = sizeof(struct edma_tx_desc) * etdr->count;
48 etdr->sw_next_to_fill = 0;
49 etdr->sw_next_to_clean = 0;
50
51 /* Allocate SW descriptors */
52 etdr->sw_desc = vzalloc(sw_size);
53 if (!etdr->sw_desc) {
54 dev_err(&pdev->dev, "buffer alloc of tx ring failed=%p", etdr);
55 return -ENOMEM;
56 }
57
58 /* Allocate HW descriptors */
59 etdr->hw_desc = dma_alloc_coherent(&pdev->dev, etdr->size, &etdr->dma,
60 GFP_KERNEL);
61 if (!etdr->hw_desc) {
62 dev_err(&pdev->dev, "descriptor allocation for tx ring failed");
63 vfree(etdr->sw_desc);
64 etdr->sw_desc = NULL;
65 return -ENOMEM;
66 }
67
68 return 0;
69}
70
71/* edma_free_tx_ring()
72 * Free tx rings allocated by edma_alloc_tx_rings
73 */
74static void edma_free_tx_ring(struct edma_common_info *edma_cinfo,
75 struct edma_tx_desc_ring *etdr)
76{
77 struct platform_device *pdev = edma_cinfo->pdev;
78
79 if (likely(etdr->hw_desc)) {
80 dma_free_coherent(&pdev->dev, etdr->size, etdr->hw_desc,
81 etdr->dma);
82
83 vfree(etdr->sw_desc);
84 etdr->sw_desc = NULL;
85 }
86}
87
88/* edma_alloc_rx_ring()
89 * allocate rx descriptor ring
90 */
91static int edma_alloc_rx_ring(struct edma_common_info *edma_cinfo,
92 struct edma_rfd_desc_ring *erxd)
93{
94 struct platform_device *pdev = edma_cinfo->pdev;
95 u16 sw_size = sizeof(struct edma_sw_desc) * erxd->count;
96
97 erxd->size = sizeof(struct edma_sw_desc) * erxd->count;
98 erxd->sw_next_to_fill = 0;
99 erxd->sw_next_to_clean = 0;
100
101 /* Allocate SW descriptors */
102 erxd->sw_desc = vzalloc(sw_size);
103 if (!erxd->sw_desc)
104 return -ENOMEM;
105
106 /* Alloc HW descriptors */
107 erxd->hw_desc = dma_alloc_coherent(&pdev->dev, erxd->size, &erxd->dma,
108 GFP_KERNEL);
109 if (!erxd->hw_desc) {
110 vfree(erxd->sw_desc);
111 erxd->sw_desc = NULL;
112 return -ENOMEM;
113 }
114
Rakesh Nair03b586c2017-04-03 18:28:58 +0530115 /* Initialize pending fill */
116 erxd->pending_fill = 0;
117
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530118 return 0;
119}
120
121/* edma_free_rx_ring()
122 * Free rx ring allocated by alloc_rx_ring
123 */
124static void edma_free_rx_ring(struct edma_common_info *edma_cinfo,
125 struct edma_rfd_desc_ring *erxd)
126{
127 struct platform_device *pdev = edma_cinfo->pdev;
128
129 if (likely(erxd->hw_desc)) {
130 dma_free_coherent(&pdev->dev, erxd->size, erxd->hw_desc,
131 erxd->dma);
132
133 vfree(erxd->sw_desc);
134 erxd->sw_desc = NULL;
135 }
136}
137
138/* edma_configure_tx()
139 * Configure transmission control data
140 */
141static void edma_configure_tx(struct edma_common_info *edma_cinfo)
142{
143 u32 txq_ctrl_data;
144
145 txq_ctrl_data = (EDMA_TPD_BURST << EDMA_TXQ_NUM_TPD_BURST_SHIFT);
146 txq_ctrl_data |= EDMA_TXQ_CTRL_TPD_BURST_EN;
147 txq_ctrl_data |= (EDMA_TXF_BURST << EDMA_TXQ_TXF_BURST_NUM_SHIFT);
148 edma_write_reg(EDMA_REG_TXQ_CTRL, txq_ctrl_data);
149}
150
151/* edma_configure_rx()
152 * configure reception control data
153 */
154static void edma_configure_rx(struct edma_common_info *edma_cinfo)
155{
156 struct edma_hw *hw = &edma_cinfo->hw;
157 u32 rss_type, rx_desc1, rxq_ctrl_data;
158
159 /* Set RSS type */
160 rss_type = hw->rss_type;
161 edma_write_reg(EDMA_REG_RSS_TYPE, rss_type);
162
163 /* Set RFD burst number */
164 rx_desc1 = (EDMA_RFD_BURST << EDMA_RXQ_RFD_BURST_NUM_SHIFT);
165
166 /* Set RFD prefetch threshold */
167 rx_desc1 |= (EDMA_RFD_THR << EDMA_RXQ_RFD_PF_THRESH_SHIFT);
168
169 /* Set RFD in host ring low threshold to generte interrupt */
170 rx_desc1 |= (EDMA_RFD_LTHR << EDMA_RXQ_RFD_LOW_THRESH_SHIFT);
171 edma_write_reg(EDMA_REG_RX_DESC1, rx_desc1);
172
173 /* Set Rx FIFO threshold to start to DMA data to host */
174 rxq_ctrl_data = EDMA_FIFO_THRESH_128_BYTE;
175
176 /* Set RX remove vlan bit */
177 rxq_ctrl_data |= EDMA_RXQ_CTRL_RMV_VLAN;
178
179 edma_write_reg(EDMA_REG_RXQ_CTRL, rxq_ctrl_data);
180}
181
182/* edma_alloc_rx_buf()
183 * does skb allocation for the received packets.
184 */
185static int edma_alloc_rx_buf(struct edma_common_info
186 *edma_cinfo,
187 struct edma_rfd_desc_ring *erdr,
188 int cleaned_count, int queue_id)
189{
190 struct platform_device *pdev = edma_cinfo->pdev;
191 struct edma_rx_free_desc *rx_desc;
192 struct edma_sw_desc *sw_desc;
193 struct sk_buff *skb;
194 unsigned int i;
195 u16 prod_idx, length;
196 u32 reg_data;
197
198 if (cleaned_count > erdr->count) {
199 dev_err(&pdev->dev, "Incorrect cleaned_count %d",
200 cleaned_count);
201 return -1;
202 }
203
204 i = erdr->sw_next_to_fill;
205
206 while (cleaned_count) {
207 sw_desc = &erdr->sw_desc[i];
208 length = edma_cinfo->rx_head_buffer_len;
209
210 if (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_REUSE) {
211 skb = sw_desc->skb;
212
213 /* Clear REUSE flag */
214 sw_desc->flags &= ~EDMA_SW_DESC_FLAG_SKB_REUSE;
215 } else {
216 /* alloc skb */
217 skb = netdev_alloc_skb(edma_netdev[0], length);
218 if (!skb) {
219 /* Better luck next round */
220 sw_desc->flags = 0;
221 break;
222 }
223 }
224
225 if (!edma_cinfo->page_mode) {
226 sw_desc->dma = dma_map_single(&pdev->dev, skb->data,
227 length, DMA_FROM_DEVICE);
228 if (dma_mapping_error(&pdev->dev, sw_desc->dma)) {
229 WARN_ONCE(0, "EDMA DMA mapping failed for linear address %x", sw_desc->dma);
230 sw_desc->flags = 0;
231 sw_desc->skb = NULL;
232 dev_kfree_skb_any(skb);
233 break;
234 }
235
236 /*
237 * We should not exit from here with REUSE flag set
238 * This is to avoid re-using same sk_buff for next
239 * time around
240 */
241 sw_desc->flags = EDMA_SW_DESC_FLAG_SKB_HEAD;
242 sw_desc->length = length;
243 } else {
244 struct page *pg = alloc_page(GFP_ATOMIC);
245
246 if (!pg) {
247 sw_desc->flags = 0;
248 sw_desc->skb = NULL;
249 dev_kfree_skb_any(skb);
250 break;
251 }
252
253 sw_desc->dma = dma_map_page(&pdev->dev, pg, 0,
254 edma_cinfo->rx_page_buffer_len,
255 DMA_FROM_DEVICE);
256 if (dma_mapping_error(&pdev->dev, sw_desc->dma)) {
257 WARN_ONCE(0, "EDMA DMA mapping failed for page address %x", sw_desc->dma);
258 sw_desc->flags = 0;
259 sw_desc->skb = NULL;
260 __free_page(pg);
261 dev_kfree_skb_any(skb);
262 break;
263 }
264
265 skb_fill_page_desc(skb, 0, pg, 0,
266 edma_cinfo->rx_page_buffer_len);
267 sw_desc->flags = EDMA_SW_DESC_FLAG_SKB_FRAG;
268 sw_desc->length = edma_cinfo->rx_page_buffer_len;
269 }
270
271 /* Update the buffer info */
272 sw_desc->skb = skb;
273 rx_desc = (&(erdr->hw_desc)[i]);
274 rx_desc->buffer_addr = cpu_to_le64(sw_desc->dma);
275 if (++i == erdr->count)
276 i = 0;
277 cleaned_count--;
278 }
279
280 erdr->sw_next_to_fill = i;
281
282 if (i == 0)
283 prod_idx = erdr->count - 1;
284 else
285 prod_idx = i - 1;
286
287 /* Update the producer index */
288 edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), &reg_data);
289 reg_data &= ~EDMA_RFD_PROD_IDX_BITS;
290 reg_data |= prod_idx;
291 edma_write_reg(EDMA_REG_RFD_IDX_Q(queue_id), reg_data);
Rakesh Nair03b586c2017-04-03 18:28:58 +0530292
293 /* If we couldn't allocate all the buffers,
294 * we increment the alloc failure counters
295 */
296 if (cleaned_count)
297 edma_cinfo->edma_ethstats.rx_alloc_fail_ctr++;
298
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530299 return cleaned_count;
300}
301
302/* edma_init_desc()
303 * update descriptor ring size, buffer and producer/consumer index
304 */
305static void edma_init_desc(struct edma_common_info *edma_cinfo)
306{
307 struct edma_rfd_desc_ring *rfd_ring;
308 struct edma_tx_desc_ring *etdr;
309 int i = 0, j = 0;
310 u32 data = 0;
311 u16 hw_cons_idx = 0;
312
313 /* Set the base address of every TPD ring. */
314 for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
315 etdr = edma_cinfo->tpd_ring[i];
316
317 /* Update descriptor ring base address */
318 edma_write_reg(EDMA_REG_TPD_BASE_ADDR_Q(i), (u32)etdr->dma);
319 edma_read_reg(EDMA_REG_TPD_IDX_Q(i), &data);
320
321 /* Calculate hardware consumer index */
322 hw_cons_idx = (data >> EDMA_TPD_CONS_IDX_SHIFT) & 0xffff;
323 etdr->sw_next_to_fill = hw_cons_idx;
324 etdr->sw_next_to_clean = hw_cons_idx;
325 data &= ~(EDMA_TPD_PROD_IDX_MASK << EDMA_TPD_PROD_IDX_SHIFT);
326 data |= hw_cons_idx;
327
328 /* update producer index */
329 edma_write_reg(EDMA_REG_TPD_IDX_Q(i), data);
330
331 /* update SW consumer index register */
332 edma_write_reg(EDMA_REG_TX_SW_CONS_IDX_Q(i), hw_cons_idx);
333
334 /* Set TPD ring size */
335 edma_write_reg(EDMA_REG_TPD_RING_SIZE,
336 edma_cinfo->tx_ring_count &
337 EDMA_TPD_RING_SIZE_MASK);
338 }
339
340 for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
341 rfd_ring = edma_cinfo->rfd_ring[j];
342 /* Update Receive Free descriptor ring base address */
343 edma_write_reg(EDMA_REG_RFD_BASE_ADDR_Q(j),
344 (u32)(rfd_ring->dma));
345 j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
346 }
347
348 data = edma_cinfo->rx_head_buffer_len;
349 if (edma_cinfo->page_mode)
350 data = edma_cinfo->rx_page_buffer_len;
351
352 data &= EDMA_RX_BUF_SIZE_MASK;
353 data <<= EDMA_RX_BUF_SIZE_SHIFT;
354
355 /* Update RFD ring size and RX buffer size */
356 data |= (edma_cinfo->rx_ring_count & EDMA_RFD_RING_SIZE_MASK)
357 << EDMA_RFD_RING_SIZE_SHIFT;
358
359 edma_write_reg(EDMA_REG_RX_DESC0, data);
360
361 /* Disable TX FIFO low watermark and high watermark */
362 edma_write_reg(EDMA_REG_TXF_WATER_MARK, 0);
363
364 /* Load all of base address above */
365 edma_read_reg(EDMA_REG_TX_SRAM_PART, &data);
366 data |= 1 << EDMA_LOAD_PTR_SHIFT;
367 edma_write_reg(EDMA_REG_TX_SRAM_PART, data);
368}
369
370/* edma_receive_checksum
371 * Api to check checksum on receive packets
372 */
373static void edma_receive_checksum(struct edma_rx_return_desc *rd,
374 struct sk_buff *skb)
375{
376 skb_checksum_none_assert(skb);
377
378 /* check the RRD IP/L4 checksum bit to see if
379 * its set, which in turn indicates checksum
380 * failure.
381 */
382 if (rd->rrd6 & EDMA_RRD_CSUM_FAIL_MASK)
383 return;
384
Rakesh Nair72e1d282017-05-19 22:21:01 +0530385 /*
386 * We disable checksum verification only if
387 * we have a TCP/UDP packet
388 */
389 if (rd->rrd7 & (EDMA_RRD_L4OFFSET_MASK << EDMA_RRD_L4OFFSET_SHIFT))
390 skb->ip_summed = CHECKSUM_UNNECESSARY;
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530391}
392
393/* edma_clean_rfd()
394 * clean up rx resourcers on error
395 */
396static void edma_clean_rfd(struct platform_device *pdev,
397 struct edma_rfd_desc_ring *erdr,
398 u16 index,
399 int pos)
400{
401 struct edma_rx_free_desc *rx_desc = &(erdr->hw_desc[index]);
402 struct edma_sw_desc *sw_desc = &erdr->sw_desc[index];
403
404 /* Unmap non-first RFD positions in packet */
405 if (pos) {
406 if (likely(sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_HEAD))
407 dma_unmap_single(&pdev->dev, sw_desc->dma,
408 sw_desc->length, DMA_FROM_DEVICE);
409 else
410 dma_unmap_page(&pdev->dev, sw_desc->dma,
411 sw_desc->length, DMA_FROM_DEVICE);
412 }
413
414 if (sw_desc->skb) {
415 dev_kfree_skb_any(sw_desc->skb);
416 sw_desc->skb = NULL;
417 }
418
419 sw_desc->flags = 0;
420 memset(rx_desc, 0, sizeof(struct edma_rx_free_desc));
421}
422
423/* edma_rx_complete_stp_rstp()
424 * Complete Rx processing for STP RSTP packets
425 */
426static void edma_rx_complete_stp_rstp(struct sk_buff *skb, int port_id, struct edma_rx_return_desc *rd)
427{
428 int i;
429 u32 priority;
430 u16 port_type;
431 u8 mac_addr[EDMA_ETH_HDR_LEN];
432
433 port_type = (rd->rrd1 >> EDMA_RRD_PORT_TYPE_SHIFT)
434 & EDMA_RRD_PORT_TYPE_MASK;
435 /* if port type is 0x4, then only proceed with
436 * other stp/rstp calculation
437 */
438 if (port_type == EDMA_RX_ATH_HDR_RSTP_PORT_TYPE) {
439 u8 bpdu_mac[6] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00};
440
441 /* calculate the frame priority */
442 priority = (rd->rrd1 >> EDMA_RRD_PRIORITY_SHIFT)
443 & EDMA_RRD_PRIORITY_MASK;
444
445 for (i = 0; i < EDMA_ETH_HDR_LEN; i++)
446 mac_addr[i] = skb->data[i];
447
448 /* Check if destination mac addr is bpdu addr */
449 if (!memcmp(mac_addr, bpdu_mac, 6)) {
450 /* destination mac address is BPDU
451 * destination mac address, then add
452 * atheros header to the packet.
453 */
454 u16 athr_hdr = (EDMA_RX_ATH_HDR_VERSION << EDMA_RX_ATH_HDR_VERSION_SHIFT) |
455 (priority << EDMA_RX_ATH_HDR_PRIORITY_SHIFT) |
456 (EDMA_RX_ATH_HDR_RSTP_PORT_TYPE << EDMA_RX_ATH_PORT_TYPE_SHIFT) | port_id;
457 skb_push(skb, 4);
458 memcpy(skb->data, mac_addr, EDMA_ETH_HDR_LEN);
459 *(uint16_t *)&skb->data[12] = htons(edma_ath_eth_type);
460 *(uint16_t *)&skb->data[14] = htons(athr_hdr);
461 }
462 }
463}
464
465/* edma_rx_complete_fraglist()
466 * Complete Rx processing for fraglist skbs
467 */
468static int edma_rx_complete_fraglist(struct sk_buff *skb, u16 num_rfds, u16 length, u32 sw_next_to_clean,
469 struct edma_rfd_desc_ring *erdr, struct edma_common_info *edma_cinfo)
470{
471 struct platform_device *pdev = edma_cinfo->pdev;
472 struct edma_hw *hw = &edma_cinfo->hw;
473 struct sk_buff *skb_temp;
474 struct edma_sw_desc *sw_desc;
475 int i;
476 u16 size_remaining;
477
478 skb->data_len = 0;
479 skb->tail += (hw->rx_head_buff_size - 16);
480 skb->len = skb->truesize = length;
481 size_remaining = length - (hw->rx_head_buff_size - 16);
482
483 /* clean-up all related sw_descs */
484 for (i = 1; i < num_rfds; i++) {
485 struct sk_buff *skb_prev;
486
487 sw_desc = &erdr->sw_desc[sw_next_to_clean];
488 skb_temp = sw_desc->skb;
489
490 dma_unmap_single(&pdev->dev, sw_desc->dma,
491 sw_desc->length, DMA_FROM_DEVICE);
492
493 if (size_remaining < hw->rx_head_buff_size)
494 skb_put(skb_temp, size_remaining);
495 else
496 skb_put(skb_temp, hw->rx_head_buff_size);
497
498 /* If we are processing the first rfd, we link
499 * skb->frag_list to the skb corresponding to the
500 * first RFD
501 */
502 if (i == 1)
503 skb_shinfo(skb)->frag_list = skb_temp;
504 else
505 skb_prev->next = skb_temp;
506 skb_prev = skb_temp;
507 skb_temp->next = NULL;
508
509 skb->data_len += skb_temp->len;
510 size_remaining -= skb_temp->len;
511
512 /* Increment SW index */
513 sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
514 }
515
516 return sw_next_to_clean;
517}
518
519/* edma_rx_complete_paged()
520 * Complete Rx processing for paged skbs
521 */
522static int edma_rx_complete_paged(struct sk_buff *skb, u16 num_rfds,
523 u16 length, u32 sw_next_to_clean,
524 struct edma_rfd_desc_ring *erdr,
525 struct edma_common_info *edma_cinfo)
526{
527 struct platform_device *pdev = edma_cinfo->pdev;
528 struct sk_buff *skb_temp;
529 struct edma_sw_desc *sw_desc;
530 int i;
531 u16 size_remaining;
532
533 skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
534
535 /* Setup skbuff fields */
536 skb->len = length;
537
538 if (likely(num_rfds <= 1)) {
539 skb->data_len = length;
540 skb->truesize += edma_cinfo->rx_page_buffer_len;
541 skb_fill_page_desc(skb, 0, skb_frag_page(frag),
542 16, length);
543 } else {
544 frag->size -= 16;
545 skb->data_len = frag->size;
546 skb->truesize += edma_cinfo->rx_page_buffer_len;
547 size_remaining = length - frag->size;
548
549 skb_fill_page_desc(skb, 0, skb_frag_page(frag),
550 16, frag->size);
551
552 /* clean-up all related sw_descs */
553 for (i = 1; i < num_rfds; i++) {
554 sw_desc = &erdr->sw_desc[sw_next_to_clean];
555 skb_temp = sw_desc->skb;
556 frag = &skb_shinfo(skb_temp)->frags[0];
557 dma_unmap_page(&pdev->dev, sw_desc->dma,
558 sw_desc->length, DMA_FROM_DEVICE);
559
560 if (size_remaining < edma_cinfo->rx_page_buffer_len)
561 frag->size = size_remaining;
562
563 skb_fill_page_desc(skb, i, skb_frag_page(frag),
564 0, frag->size);
565
566 /* We used frag pages from skb_temp in skb */
567 skb_shinfo(skb_temp)->nr_frags = 0;
568 dev_kfree_skb_any(skb_temp);
569
570 skb->data_len += frag->size;
571 skb->truesize += edma_cinfo->rx_page_buffer_len;
572 size_remaining -= frag->size;
573
574 /* Increment SW index */
575 sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
576 }
577 }
578
579 return sw_next_to_clean;
580}
581
582/*
583 * edma_rx_complete()
584 * Main api called from the poll function to process rx packets.
585 */
Rakesh Nair03b586c2017-04-03 18:28:58 +0530586static u16 edma_rx_complete(struct edma_common_info *edma_cinfo,
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530587 int *work_done, int work_to_do, int queue_id,
588 struct napi_struct *napi)
589{
590 struct platform_device *pdev = edma_cinfo->pdev;
591 struct edma_rfd_desc_ring *erdr = edma_cinfo->rfd_ring[queue_id];
592 u16 hash_type, rrd[8], cleaned_count = 0, length = 0, num_rfds = 1,
593 sw_next_to_clean, hw_next_to_clean = 0, vlan = 0, ret_count = 0;
594 u32 data = 0;
595 u16 count = erdr->count, rfd_avail;
596 u8 queue_to_rxid[8] = {0, 0, 1, 1, 2, 2, 3, 3};
597
Rakesh Nair03b586c2017-04-03 18:28:58 +0530598 cleaned_count = erdr->pending_fill;
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530599 sw_next_to_clean = erdr->sw_next_to_clean;
600
601 edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), &data);
602 hw_next_to_clean = (data >> EDMA_RFD_CONS_IDX_SHIFT) &
603 EDMA_RFD_CONS_IDX_MASK;
604
605 do {
606 while (sw_next_to_clean != hw_next_to_clean) {
607 struct net_device *netdev;
608 struct edma_adapter *adapter;
609 struct edma_sw_desc *sw_desc;
610 struct sk_buff *skb;
611 struct edma_rx_return_desc *rd;
612 u8 *vaddr;
613 int port_id, i, drop_count = 0;
614 u32 priority;
615
616 if (!work_to_do)
617 break;
618
619 sw_desc = &erdr->sw_desc[sw_next_to_clean];
620 skb = sw_desc->skb;
621
622 /* Get RRD */
623 if (!edma_cinfo->page_mode) {
624 dma_unmap_single(&pdev->dev, sw_desc->dma,
625 sw_desc->length, DMA_FROM_DEVICE);
626 rd = (struct edma_rx_return_desc *)skb->data;
627
628 } else {
629 dma_unmap_page(&pdev->dev, sw_desc->dma,
630 sw_desc->length, DMA_FROM_DEVICE);
631 vaddr = kmap_atomic(skb_frag_page(&skb_shinfo(skb)->frags[0]));
632 memcpy((uint8_t *)&rrd[0], vaddr, 16);
633 rd = (struct edma_rx_return_desc *)rrd;
634 kunmap_atomic(vaddr);
635 }
636
637 /* Check if RRD is valid */
638 if (!(rd->rrd7 & EDMA_RRD_DESC_VALID)) {
639 dev_err(&pdev->dev, "Incorrect RRD DESC valid bit set");
640 edma_clean_rfd(pdev, erdr, sw_next_to_clean, 0);
641 sw_next_to_clean = (sw_next_to_clean + 1) &
642 (erdr->count - 1);
643 cleaned_count++;
644 continue;
645 }
646
647 /* Get the number of RFDs from RRD */
648 num_rfds = rd->rrd1 & EDMA_RRD_NUM_RFD_MASK;
649
650 /* Get Rx port ID from switch */
651 port_id = (rd->rrd1 >> EDMA_PORT_ID_SHIFT) & EDMA_PORT_ID_MASK;
652 if ((!port_id) || (port_id > EDMA_MAX_PORTID_SUPPORTED)) {
653 if (net_ratelimit()) {
654 dev_err(&pdev->dev, "Incorrect RRD source port bit set");
655 dev_err(&pdev->dev,
656 "RRD Dump\n rrd0:%x rrd1: %x rrd2: %x rrd3: %x rrd4: %x rrd5: %x rrd6: %x rrd7: %x",
657 rd->rrd0, rd->rrd1, rd->rrd2, rd->rrd3, rd->rrd4, rd->rrd5, rd->rrd6, rd->rrd7);
658 dev_err(&pdev->dev, "Num_rfds: %d, src_port: %d, pkt_size: %d, cvlan_tag: %d\n",
659 num_rfds, rd->rrd1 & EDMA_RRD_SRC_PORT_NUM_MASK,
660 rd->rrd6 & EDMA_RRD_PKT_SIZE_MASK, rd->rrd7 & EDMA_RRD_CVLAN);
661 }
662 for (i = 0; i < num_rfds; i++) {
663 edma_clean_rfd(pdev, erdr, sw_next_to_clean, i);
664 sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
665 }
666
667 cleaned_count += num_rfds;
668 continue;
669 }
670
671 netdev = edma_cinfo->portid_netdev_lookup_tbl[port_id];
672 if (!netdev) {
673 dev_err(&pdev->dev, "Invalid netdev");
674 for (i = 0; i < num_rfds; i++) {
675 edma_clean_rfd(pdev, erdr, sw_next_to_clean, i);
676 sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
677 }
678
679 cleaned_count += num_rfds;
680 continue;
681 }
682 adapter = netdev_priv(netdev);
683
684 /* This code is added to handle a usecase where high
685 * priority stream and a low priority stream are
686 * received simultaneously on DUT. The problem occurs
687 * if one of the Rx rings is full and the corresponding
688 * core is busy with other stuff. This causes ESS CPU
689 * port to backpressure all incoming traffic including
690 * high priority one. We monitor free descriptor count
691 * on each CPU and whenever it reaches threshold (< 80),
692 * we drop all low priority traffic and let only high
693 * priotiy traffic pass through. We can hence avoid
694 * ESS CPU port to send backpressure on high priroity
695 * stream.
696 */
697 priority = (rd->rrd1 >> EDMA_RRD_PRIORITY_SHIFT)
698 & EDMA_RRD_PRIORITY_MASK;
699 if (likely(!priority && !edma_cinfo->page_mode && (num_rfds <= 1))) {
700 rfd_avail = (count + sw_next_to_clean - hw_next_to_clean - 1) & (count - 1);
701 if (rfd_avail < EDMA_RFD_AVAIL_THR) {
702 sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_REUSE;
703 sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
704 adapter->stats.rx_dropped++;
705 cleaned_count++;
706 drop_count++;
707 if (drop_count == 3) {
708 work_to_do--;
709 (*work_done)++;
710 drop_count = 0;
711 }
712 if (cleaned_count == EDMA_RX_BUFFER_WRITE) {
713 /* If buffer clean count reaches 16, we replenish HW buffers. */
714 ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id);
715 edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id),
716 sw_next_to_clean);
717 cleaned_count = ret_count;
Rakesh Nair03b586c2017-04-03 18:28:58 +0530718 erdr->pending_fill = ret_count;
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530719 }
720 continue;
721 }
722 }
723
724 work_to_do--;
725 (*work_done)++;
726
727 /* Increment SW index */
728 sw_next_to_clean = (sw_next_to_clean + 1) &
729 (erdr->count - 1);
730
731 /* Get the packet size and allocate buffer */
732 length = rd->rrd6 & EDMA_RRD_PKT_SIZE_MASK;
733
734 if (edma_cinfo->page_mode) {
735 /* paged skb */
736 sw_next_to_clean = edma_rx_complete_paged(skb, num_rfds, length,
737 sw_next_to_clean,
738 erdr, edma_cinfo);
739 if (!pskb_may_pull(skb, ETH_HLEN)) {
740 cleaned_count += num_rfds;
741 dev_kfree_skb_any(skb);
742 continue;
743 }
744 } else {
745 /* single or fraglist skb */
746
747 /* Addition of 16 bytes is required, as in the packet
748 * first 16 bytes are rrd descriptors, so actual data
749 * starts from an offset of 16.
750 */
751 skb_reserve(skb, 16);
752 if (likely((num_rfds <= 1) || !edma_cinfo->fraglist_mode))
753 skb_put(skb, length);
754 else
755 sw_next_to_clean = edma_rx_complete_fraglist(skb, num_rfds, length,
756 sw_next_to_clean,
757 erdr, edma_cinfo);
758 }
759
760 cleaned_count += num_rfds;
761
762 if (edma_stp_rstp)
763 edma_rx_complete_stp_rstp(skb, port_id, rd);
764
765 skb->protocol = eth_type_trans(skb, netdev);
766
767 /* Record Rx queue for RFS/RPS and fill flow hash from HW */
768 skb_record_rx_queue(skb, queue_to_rxid[queue_id]);
769 if (netdev->features & NETIF_F_RXHASH) {
770 hash_type = (rd->rrd5 >> EDMA_HASH_TYPE_SHIFT);
771 if ((hash_type > EDMA_HASH_TYPE_START) && (hash_type < EDMA_HASH_TYPE_END))
772 skb_set_hash(skb, rd->rrd2, PKT_HASH_TYPE_L4);
773 }
774
775#ifdef CONFIG_NF_FLOW_COOKIE
776 skb->flow_cookie = rd->rrd3 & EDMA_RRD_FLOW_COOKIE_MASK;
777#endif
778 edma_receive_checksum(rd, skb);
779
780 /* Process VLAN HW acceleration indication provided by HW */
781 if (adapter->default_vlan_tag != rd->rrd4) {
782 vlan = rd->rrd4;
783 if (likely(rd->rrd7 & EDMA_RRD_CVLAN))
784 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
785 else if (rd->rrd1 & EDMA_RRD_SVLAN)
786 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan);
787 }
788
789 /* Update rx statistics */
790 adapter->stats.rx_packets++;
791 adapter->stats.rx_bytes += length;
792
793 /* Check if we reached refill threshold */
794 if (cleaned_count == EDMA_RX_BUFFER_WRITE) {
795 ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id);
796 edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id),
797 sw_next_to_clean);
798 cleaned_count = ret_count;
Rakesh Nair03b586c2017-04-03 18:28:58 +0530799 erdr->pending_fill = ret_count;
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530800 }
801
Rakesh Nair888af952017-06-30 18:41:58 +0530802 /*
803 * We increment per-precedence counters for the rx packets
804 */
805 if (edma_per_prec_stats_enable) {
806 edma_cinfo->edma_ethstats.rx_prec[priority]++;
807 edma_cinfo->edma_ethstats.rx_ac[edma_dscp2ac_tbl[priority]]++;
Rakesh Nair1c6a18c2017-08-02 21:27:06 +0530808
809 if (edma_iad_stats_enable) {
810 if (edma_dscp2ac_tbl[priority] == EDMA_AC_VI)
811 edma_iad_process_flow(edma_cinfo, skb, EDMA_INGRESS_DIR, priority);
812 }
Rakesh Nair888af952017-06-30 18:41:58 +0530813 }
814
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530815 /* At this point skb should go to stack */
816 napi_gro_receive(napi, skb);
817 }
818
819 /* Check if we still have NAPI budget */
820 if (!work_to_do)
821 break;
822
823 /* Read index once again since we still have NAPI budget */
824 edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), &data);
825 hw_next_to_clean = (data >> EDMA_RFD_CONS_IDX_SHIFT) &
826 EDMA_RFD_CONS_IDX_MASK;
827 } while (hw_next_to_clean != sw_next_to_clean);
828
829 erdr->sw_next_to_clean = sw_next_to_clean;
830
831 /* Refill here in case refill threshold wasn't reached */
832 if (likely(cleaned_count)) {
833 ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id);
Rakesh Nair03b586c2017-04-03 18:28:58 +0530834 erdr->pending_fill = ret_count;
835 if (ret_count) {
836 if(net_ratelimit())
837 dev_dbg(&pdev->dev, "Edma not getting memory for descriptors.\n");
838 }
839
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530840 edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id),
841 erdr->sw_next_to_clean);
842 }
Rakesh Nair03b586c2017-04-03 18:28:58 +0530843
844 return erdr->pending_fill;
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530845}
846
847/* edma_delete_rfs_filter()
848 * Remove RFS filter from switch
849 */
850static int edma_delete_rfs_filter(struct edma_adapter *adapter,
851 struct edma_rfs_filter_node *filter_node)
852{
853 int res = -1;
854
855 if (likely(adapter->set_rfs_rule))
856 res = (*adapter->set_rfs_rule)(adapter->netdev,
857#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21))
858 filter_node->keys.src,
859 filter_node->keys.dst, filter_node->keys.port16[0],
860 filter_node->keys.port16[1],
861 filter_node->keys.ip_proto,
862#else
863 filter_node->keys.addrs.v4addrs.src,
864 filter_node->keys.addrs.v4addrs.dst, filter_node->keys.ports.src,
865 filter_node->keys.ports.dst,
866 filter_node->keys.basic.ip_proto,
867#endif
868 filter_node->rq_id,
869 0);
870
871 return res;
872}
873
874/* edma_add_rfs_filter()
875 * Add RFS filter to switch
876 */
877static int edma_add_rfs_filter(struct edma_adapter *adapter,
878 struct flow_keys *keys, u16 rq,
879 struct edma_rfs_filter_node *filter_node)
880{
881 int res = -1;
882
883#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21))
884 filter_node->keys.src = keys->src;
885 filter_node->keys.dst = keys->dst;
886 filter_node->keys.ports = keys->ports;
887 filter_node->keys.ip_proto = keys->ip_proto;
888#else
889 filter_node->keys.addrs.v4addrs.src = keys->addrs.v4addrs.src;
890 filter_node->keys.addrs.v4addrs.dst = keys->addrs.v4addrs.dst;
891 filter_node->keys.ports.ports = keys->ports.ports;
892 filter_node->keys.basic.ip_proto = keys->basic.ip_proto;
893#endif
894
895 /* Call callback registered by ESS driver */
896 if (likely(adapter->set_rfs_rule))
897#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21))
898 res = (*adapter->set_rfs_rule)(adapter->netdev, keys->src,
899 keys->dst, keys->port16[0], keys->port16[1],
900 keys->ip_proto, rq, 1);
901#else
902 res = (*adapter->set_rfs_rule)(adapter->netdev, keys->addrs.v4addrs.src,
903 keys->addrs.v4addrs.dst, keys->ports.src, keys->ports.dst,
904 keys->basic.ip_proto, rq, 1);
905#endif
906
907 return res;
908}
909
910/* edma_rfs_key_search()
911 * Look for existing RFS entry
912 */
913static struct edma_rfs_filter_node *edma_rfs_key_search(struct hlist_head *h,
914 struct flow_keys *key)
915{
916 struct edma_rfs_filter_node *p;
917
918 hlist_for_each_entry(p, h, node)
919#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21))
920 if (p->keys.src == key->src &&
921 p->keys.dst == key->dst &&
922 p->keys.ports == key->ports &&
923 p->keys.ip_proto == key->ip_proto)
924#else
925 if (p->keys.addrs.v4addrs.src == key->addrs.v4addrs.src &&
926 p->keys.addrs.v4addrs.dst == key->addrs.v4addrs.dst &&
927 p->keys.ports.ports == key->ports.ports &&
928 p->keys.basic.ip_proto == key->basic.ip_proto)
929#endif
930 return p;
931 return NULL;
932}
933
934/* edma_initialise_rfs_flow_table()
935 * Initialise EDMA RFS flow table
936 */
937static void edma_initialise_rfs_flow_table(struct edma_adapter *adapter)
938{
939 int i;
940
941 spin_lock_init(&adapter->rfs.rfs_ftab_lock);
942
943 /* Initialize EDMA flow hash table */
944 for (i = 0; i < EDMA_RFS_FLOW_ENTRIES; i++)
945 INIT_HLIST_HEAD(&adapter->rfs.hlist_head[i]);
946
947 adapter->rfs.max_num_filter = EDMA_RFS_FLOW_ENTRIES;
948 adapter->rfs.filter_available = adapter->rfs.max_num_filter;
949 adapter->rfs.hashtoclean = 0;
950
951 /* Add timer to get periodic RFS updates from OS */
952 init_timer(&adapter->rfs.expire_rfs);
953 adapter->rfs.expire_rfs.function = edma_flow_may_expire;
954 adapter->rfs.expire_rfs.data = (unsigned long)adapter;
955 mod_timer(&adapter->rfs.expire_rfs, jiffies + HZ/4);
956}
957
958/* edma_free_rfs_flow_table()
959 * Free EDMA RFS flow table
960 */
961static void edma_free_rfs_flow_table(struct edma_adapter *adapter)
962{
963 int i;
964
965 /* Remove sync timer */
966 del_timer_sync(&adapter->rfs.expire_rfs);
967 spin_lock_bh(&adapter->rfs.rfs_ftab_lock);
968
969 /* Free EDMA RFS table entries */
970 adapter->rfs.filter_available = 0;
971
972 /* Clean-up EDMA flow hash table */
973 for (i = 0; i < EDMA_RFS_FLOW_ENTRIES; i++) {
974 struct hlist_head *hhead;
975 struct hlist_node *tmp;
976 struct edma_rfs_filter_node *filter_node;
977 int res;
978
979 hhead = &adapter->rfs.hlist_head[i];
980 hlist_for_each_entry_safe(filter_node, tmp, hhead, node) {
981 res = edma_delete_rfs_filter(adapter, filter_node);
982 if (res < 0)
983 dev_warn(&adapter->netdev->dev,
984 "EDMA going down but RFS entry %d not allowed to be flushed by Switch",
985 filter_node->flow_id);
986 hlist_del(&filter_node->node);
987 kfree(filter_node);
988 }
989 }
990 spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
991}
992
993/* edma_tx_unmap_and_free()
994 * clean TX buffer
995 */
996static inline void edma_tx_unmap_and_free(struct platform_device *pdev,
997 struct edma_sw_desc *sw_desc)
998{
999 struct sk_buff *skb = sw_desc->skb;
1000
1001 if (likely((sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_HEAD) ||
1002 (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_FRAGLIST)))
1003 /* unmap_single for skb head area */
1004 dma_unmap_single(&pdev->dev, sw_desc->dma,
1005 sw_desc->length, DMA_TO_DEVICE);
1006 else if (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_FRAG)
1007 /* unmap page for paged fragments */
1008 dma_unmap_page(&pdev->dev, sw_desc->dma,
1009 sw_desc->length, DMA_TO_DEVICE);
1010
1011 if (likely(sw_desc->flags & EDMA_SW_DESC_FLAG_LAST))
1012 dev_kfree_skb_any(skb);
1013
1014 sw_desc->flags = 0;
1015}
1016
1017/* edma_tx_complete()
1018 * Used to clean tx queues and update hardware and consumer index
1019 */
1020static void edma_tx_complete(struct edma_common_info *edma_cinfo, int queue_id)
1021{
1022 struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
1023 struct edma_sw_desc *sw_desc;
1024 struct platform_device *pdev = edma_cinfo->pdev;
1025 int i;
1026
1027 u16 sw_next_to_clean = etdr->sw_next_to_clean;
1028 u16 hw_next_to_clean;
1029 u32 data = 0;
1030
1031 edma_read_reg(EDMA_REG_TPD_IDX_Q(queue_id), &data);
1032 hw_next_to_clean = (data >> EDMA_TPD_CONS_IDX_SHIFT) & EDMA_TPD_CONS_IDX_MASK;
1033
1034 /* clean the buffer here */
1035 while (sw_next_to_clean != hw_next_to_clean) {
1036 sw_desc = &etdr->sw_desc[sw_next_to_clean];
1037 edma_tx_unmap_and_free(pdev, sw_desc);
1038 sw_next_to_clean = (sw_next_to_clean + 1) & (etdr->count - 1);
1039 }
1040
1041 etdr->sw_next_to_clean = sw_next_to_clean;
1042
1043 /* update the TPD consumer index register */
1044 edma_write_reg(EDMA_REG_TX_SW_CONS_IDX_Q(queue_id), sw_next_to_clean);
1045
1046 /* Wake the queue if queue is stopped and netdev link is up */
1047 for (i = 0; i < EDMA_MAX_NETDEV_PER_QUEUE && etdr->nq[i] ; i++) {
1048 if (netif_tx_queue_stopped(etdr->nq[i])) {
1049 if ((etdr->netdev[i]) && netif_carrier_ok(etdr->netdev[i]))
1050 netif_tx_wake_queue(etdr->nq[i]);
1051 }
1052 }
1053}
1054
1055/* edma_get_tx_buffer()
1056 * Get sw_desc corresponding to the TPD
1057 */
1058static struct edma_sw_desc *edma_get_tx_buffer(struct edma_common_info *edma_cinfo,
1059 struct edma_tx_desc *tpd, int queue_id)
1060{
1061 struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
1062
1063 return &etdr->sw_desc[tpd - (struct edma_tx_desc *)etdr->hw_desc];
1064}
1065
1066/* edma_get_next_tpd()
1067 * Return a TPD descriptor for transfer
1068 */
1069static struct edma_tx_desc *edma_get_next_tpd(struct edma_common_info *edma_cinfo,
1070 int queue_id)
1071{
1072 struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
1073 u16 sw_next_to_fill = etdr->sw_next_to_fill;
1074 struct edma_tx_desc *tpd_desc =
1075 (&((struct edma_tx_desc *)(etdr->hw_desc))[sw_next_to_fill]);
1076
1077 etdr->sw_next_to_fill = (etdr->sw_next_to_fill + 1) & (etdr->count - 1);
1078
1079 return tpd_desc;
1080}
1081
1082/* edma_tpd_available()
1083 * Check number of free TPDs
1084 */
1085static inline u16 edma_tpd_available(struct edma_common_info *edma_cinfo,
1086 int queue_id)
1087{
1088 struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
1089
1090 u16 sw_next_to_fill;
1091 u16 sw_next_to_clean;
1092 u16 count = 0;
1093
1094 sw_next_to_clean = etdr->sw_next_to_clean;
1095 sw_next_to_fill = etdr->sw_next_to_fill;
1096
1097 if (likely(sw_next_to_clean <= sw_next_to_fill))
1098 count = etdr->count;
1099
1100 return count + sw_next_to_clean - sw_next_to_fill - 1;
1101}
1102
1103/* edma_tx_queue_get()
1104 * Get the starting number of the queue
1105 */
1106static inline int edma_tx_queue_get(struct edma_adapter *adapter,
1107 struct sk_buff *skb, int txq_id)
1108{
1109 /* skb->priority is used as an index to skb priority table
1110 * and based on packet priority, correspong queue is assigned.
1111 */
1112 return adapter->tx_start_offset[txq_id] + edma_skb_priority_offset(skb);
1113}
1114
1115/* edma_tx_update_hw_idx()
1116 * update the producer index for the ring transmitted
1117 */
1118static void edma_tx_update_hw_idx(struct edma_common_info *edma_cinfo,
1119 struct sk_buff *skb, int queue_id)
1120{
1121 struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
1122 u32 tpd_idx_data;
1123
1124 /* Read and update the producer index */
1125 edma_read_reg(EDMA_REG_TPD_IDX_Q(queue_id), &tpd_idx_data);
1126 tpd_idx_data &= ~EDMA_TPD_PROD_IDX_BITS;
1127 tpd_idx_data |= (etdr->sw_next_to_fill & EDMA_TPD_PROD_IDX_MASK)
1128 << EDMA_TPD_PROD_IDX_SHIFT;
1129
1130 edma_write_reg(EDMA_REG_TPD_IDX_Q(queue_id), tpd_idx_data);
1131}
1132
1133/* edma_rollback_tx()
1134 * Function to retrieve tx resources in case of error
1135 */
1136static void edma_rollback_tx(struct edma_adapter *adapter,
1137 struct edma_tx_desc *start_tpd, int queue_id)
1138{
1139 struct edma_tx_desc_ring *etdr = adapter->edma_cinfo->tpd_ring[queue_id];
1140 struct edma_sw_desc *sw_desc;
1141 struct edma_tx_desc *tpd = NULL;
1142 u16 start_index, index;
1143
1144 start_index = start_tpd - (struct edma_tx_desc *)(etdr->hw_desc);
1145
1146 index = start_index;
1147 while (index != etdr->sw_next_to_fill) {
1148 tpd = (&((struct edma_tx_desc *)(etdr->hw_desc))[index]);
1149 sw_desc = &etdr->sw_desc[index];
1150 edma_tx_unmap_and_free(adapter->pdev, sw_desc);
1151 memset(tpd, 0, sizeof(struct edma_tx_desc));
1152 if (++index == etdr->count)
1153 index = 0;
1154 }
1155 etdr->sw_next_to_fill = start_index;
1156}
1157
Rakesh Nair7e053532017-08-18 17:53:25 +05301158/* edma_get_v4_precedence()
1159 * Function to retrieve precedence for IPv4
1160 */
1161static inline int edma_get_v4_precedence(struct sk_buff *skb, int nh_offset, u8 *precedence)
1162{
1163 const struct iphdr *iph;
1164 struct iphdr iph_hdr;
1165
1166 iph = skb_header_pointer(skb, nh_offset, sizeof(iph_hdr), &iph_hdr);
1167
1168 if (!iph || iph->ihl < 5)
1169 return -1;
1170
1171 *precedence = iph->tos >> EDMA_DSCP_PREC_SHIFT;
1172
1173 return 0;
1174}
1175
1176/* edma_get_v6_precedence()
1177 * Function to retrieve precedence for IPv6
1178 */
1179static inline int edma_get_v6_precedence(struct sk_buff *skb, int nh_offset, u8 *precedence)
1180{
1181 const struct ipv6hdr *iph;
1182 struct ipv6hdr iph_hdr;
1183
1184 iph = skb_header_pointer(skb, nh_offset, sizeof(iph_hdr), &iph_hdr);
1185
1186 if (!iph)
1187 return -1;
1188
1189 *precedence = iph->priority >> EDMA_DSCP6_PREC_SHIFT;
1190
1191 return 0;
1192}
1193
1194/* edma_get_skb_precedence()
1195 * Function to retrieve precedence from skb
1196 */
1197static int edma_get_skb_precedence(struct sk_buff *skb, u8 *precedence)
1198{
1199 int nhoff = skb_network_offset(skb);
1200 __be16 proto = skb->protocol;
1201 int ret;
1202 struct pppoeh_proto *pppoeh, ppp_hdr;
1203
1204 switch(proto) {
1205 case __constant_htons(ETH_P_IP): {
1206 ret = edma_get_v4_precedence(skb, nhoff, precedence);
1207 if (ret)
1208 return -1;
1209 break;
1210 }
1211 case __constant_htons(ETH_P_IPV6): {
1212 ret = edma_get_v6_precedence(skb, nhoff, precedence);
1213 if (ret)
1214 return -1;
1215 break;
1216 }
1217 case __constant_htons(ETH_P_PPP_SES): {
1218 pppoeh = skb_header_pointer(skb, nhoff, sizeof(ppp_hdr), &ppp_hdr);
1219 if (!pppoeh)
1220 return -1;
1221
1222 proto = pppoeh->proto;
1223 nhoff += PPPOE_SES_HLEN;
1224 switch (proto) {
1225 case __constant_htons(PPP_IP): {
1226 ret = edma_get_v4_precedence(skb, nhoff, precedence);
1227 if (ret)
1228 return -1;
1229 break;
1230 }
1231 case __constant_htons(PPP_IPV6): {
1232 ret = edma_get_v6_precedence(skb, nhoff, precedence);
1233 if (ret)
1234 return -1;
1235 break;
1236 }
1237 default:
1238 return -1;
1239 }
1240 break;
1241 }
1242 default:
1243 return -1;
1244 }
1245
1246 return 0;
1247}
1248
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301249/* edma_tx_map_and_fill()
1250 * gets called from edma_xmit_frame
1251 *
1252 * This is where the dma of the buffer to be transmitted
1253 * gets mapped
1254 */
1255static int edma_tx_map_and_fill(struct edma_common_info *edma_cinfo,
1256 struct edma_adapter *adapter,
1257 struct sk_buff *skb, int queue_id,
1258 unsigned int flags_transmit,
1259 u16 from_cpu, u16 dp_bitmap,
1260 bool packet_is_rstp, int nr_frags)
1261{
1262 struct edma_sw_desc *sw_desc = NULL;
1263 struct platform_device *pdev = edma_cinfo->pdev;
1264 struct edma_tx_desc *tpd = NULL;
1265 struct edma_tx_desc *start_tpd = NULL;
1266 struct sk_buff *iter_skb;
1267 int i;
1268 u32 word1 = 0, word3 = 0, lso_word1 = 0, svlan_tag = 0;
1269 u16 buf_len, lso_desc_len = 0;
1270
1271 if (skb_is_gso(skb)) {
1272 /* TODO: What additional checks need to be performed here */
1273 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
1274 lso_word1 |= EDMA_TPD_IPV4_EN;
1275 ip_hdr(skb)->check = 0;
1276 tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1277 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
1278 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
1279 lso_word1 |= EDMA_TPD_LSO_V2_EN;
1280 ipv6_hdr(skb)->payload_len = 0;
1281 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1282 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
1283 } else
1284 return -EINVAL;
1285
1286 lso_word1 |= EDMA_TPD_LSO_EN | ((skb_shinfo(skb)->gso_size & EDMA_TPD_MSS_MASK) << EDMA_TPD_MSS_SHIFT) |
1287 (skb_transport_offset(skb) << EDMA_TPD_HDR_SHIFT);
1288 } else if (flags_transmit & EDMA_HW_CHECKSUM) {
1289 u8 css, cso;
1290 cso = skb_checksum_start_offset(skb);
1291 css = cso + skb->csum_offset;
1292
1293 word1 |= (EDMA_TPD_CUSTOM_CSUM_EN);
1294 word1 |= (cso >> 1) << EDMA_TPD_HDR_SHIFT;
1295 word1 |= ((css >> 1) << EDMA_TPD_CUSTOM_CSUM_SHIFT);
1296 }
1297
1298 if (skb->protocol == htons(ETH_P_PPP_SES))
1299 word1 |= EDMA_TPD_PPPOE_EN;
1300
1301 if (flags_transmit & EDMA_VLAN_TX_TAG_INSERT_FLAG) {
1302 switch (skb->vlan_proto) {
1303 case htons(ETH_P_8021Q):
1304 word3 |= (1 << EDMA_TX_INS_CVLAN);
1305#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21))
1306 word3 |= vlan_tx_tag_get(skb) << EDMA_TX_CVLAN_TAG_SHIFT;
1307#else
1308 word3 |= skb_vlan_tag_get(skb) << EDMA_TX_CVLAN_TAG_SHIFT;
1309#endif
1310 break;
1311 case htons(ETH_P_8021AD):
1312 word1 |= (1 << EDMA_TX_INS_SVLAN);
1313#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21))
1314 svlan_tag = vlan_tx_tag_get(skb) << EDMA_TX_SVLAN_TAG_SHIFT;
1315#else
1316 svlan_tag = skb_vlan_tag_get(skb) << EDMA_TX_SVLAN_TAG_SHIFT;
1317#endif
1318 break;
1319 default:
1320 dev_err(&pdev->dev, "no ctag or stag present\n");
1321 goto vlan_tag_error;
1322 }
1323 } else if (flags_transmit & EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG) {
1324 word3 |= (1 << EDMA_TX_INS_CVLAN);
1325 word3 |= (adapter->default_vlan_tag) << EDMA_TX_CVLAN_TAG_SHIFT;
1326 }
1327
1328 if (packet_is_rstp) {
1329 word3 |= dp_bitmap << EDMA_TPD_PORT_BITMAP_SHIFT;
1330 word3 |= from_cpu << EDMA_TPD_FROM_CPU_SHIFT;
1331 } else {
1332 word3 |= adapter->dp_bitmap << EDMA_TPD_PORT_BITMAP_SHIFT;
1333 }
1334
1335 buf_len = skb_headlen(skb);
1336
1337 if (lso_word1) {
1338 if (lso_word1 & EDMA_TPD_LSO_V2_EN) {
1339
1340 /* IPv6 LSOv2 descriptor */
1341 start_tpd = tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1342 sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1343 sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_NONE;
1344
1345 /* LSOv2 descriptor overrides addr field to pass length */
1346 tpd->addr = cpu_to_le16(skb->len);
1347 tpd->svlan_tag = svlan_tag;
1348 tpd->word1 = word1 | lso_word1;
1349 tpd->word3 = word3;
1350 }
1351
1352 tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1353 if (!start_tpd)
1354 start_tpd = tpd;
1355 sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1356
1357 /* The last buffer info contain the skb address,
1358 * so skb will be freed after unmap
1359 */
1360 sw_desc->length = lso_desc_len;
1361 sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_HEAD;
1362
1363 sw_desc->dma = dma_map_single(&adapter->pdev->dev,
1364 skb->data, buf_len, DMA_TO_DEVICE);
1365 if (dma_mapping_error(&pdev->dev, sw_desc->dma))
1366 goto dma_error;
1367
1368 tpd->addr = cpu_to_le32(sw_desc->dma);
1369 tpd->len = cpu_to_le16(buf_len);
1370
1371 tpd->svlan_tag = svlan_tag;
1372 tpd->word1 = word1 | lso_word1;
1373 tpd->word3 = word3;
1374
1375 /* The last buffer info contain the skb address,
1376 * so it will be freed after unmap
1377 */
1378 sw_desc->length = lso_desc_len;
1379 sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_HEAD;
1380
1381 buf_len = 0;
1382 }
1383
1384 if (likely(buf_len)) {
1385
1386 /* TODO Do not dequeue descriptor if there is a potential error */
1387 tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1388
1389 if (!start_tpd)
1390 start_tpd = tpd;
1391
1392 sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1393
1394 /* The last buffer info contain the skb address,
1395 * so it will be free after unmap
1396 */
1397 sw_desc->length = buf_len;
1398 sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_HEAD;
1399 sw_desc->dma = dma_map_single(&adapter->pdev->dev,
1400 skb->data, buf_len, DMA_TO_DEVICE);
1401 if (dma_mapping_error(&pdev->dev, sw_desc->dma))
1402 goto dma_error;
1403
1404 tpd->addr = cpu_to_le32(sw_desc->dma);
1405 tpd->len = cpu_to_le16(buf_len);
1406
1407 tpd->svlan_tag = svlan_tag;
1408 tpd->word1 = word1 | lso_word1;
1409 tpd->word3 = word3;
1410 }
1411
1412 i = 0;
1413
1414 /* Walk through paged frags for head skb */
1415 while (nr_frags--) {
1416 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1417 buf_len = skb_frag_size(frag);
1418 tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1419 sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1420 sw_desc->length = buf_len;
1421 sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_FRAG;
1422
1423 sw_desc->dma = skb_frag_dma_map(&pdev->dev, frag, 0, buf_len, DMA_TO_DEVICE);
1424
1425 if (dma_mapping_error(NULL, sw_desc->dma))
1426 goto dma_error;
1427
1428 tpd->addr = cpu_to_le32(sw_desc->dma);
1429 tpd->len = cpu_to_le16(buf_len);
1430
1431 tpd->svlan_tag = svlan_tag;
1432 tpd->word1 = word1 | lso_word1;
1433 tpd->word3 = word3;
1434 i++;
1435 }
1436
1437 /* Walk through all fraglist skbs */
1438 skb_walk_frags(skb, iter_skb) {
1439 buf_len = iter_skb->len;
1440 tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1441 sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1442 sw_desc->length = buf_len;
1443 sw_desc->dma = dma_map_single(&adapter->pdev->dev,
1444 iter_skb->data, buf_len, DMA_TO_DEVICE);
1445
1446 if (dma_mapping_error(NULL, sw_desc->dma))
1447 goto dma_error;
1448
1449 tpd->addr = cpu_to_le32(sw_desc->dma);
1450 tpd->len = cpu_to_le16(buf_len);
1451 tpd->svlan_tag = svlan_tag;
1452 tpd->word1 = word1 | lso_word1;
1453 tpd->word3 = word3;
1454 sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_FRAGLIST;
1455
1456 i = 0;
1457
1458 nr_frags = skb_shinfo(iter_skb)->nr_frags;
1459
1460 /* Walk through paged frags for this fraglist skb */
1461 while (nr_frags--) {
1462 skb_frag_t *frag = &skb_shinfo(iter_skb)->frags[i];
1463 buf_len = skb_frag_size(frag);
1464 tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1465 sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1466 sw_desc->length = buf_len;
1467 sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_FRAG;
1468
1469 sw_desc->dma = skb_frag_dma_map(&pdev->dev, frag,
1470 0, buf_len, DMA_TO_DEVICE);
1471 if (dma_mapping_error(NULL, sw_desc->dma))
1472 goto dma_error;
1473
1474 tpd->addr = cpu_to_le32(sw_desc->dma);
1475 tpd->len = cpu_to_le16(buf_len);
1476 tpd->svlan_tag = svlan_tag;
1477 tpd->word1 = word1 | lso_word1;
1478 tpd->word3 = word3;
1479 i++;
1480 }
1481 }
1482
Rakesh Nair888af952017-06-30 18:41:58 +05301483 /* If sysctl support for per-precedence stats are enabled */
1484 if (edma_per_prec_stats_enable) {
Rakesh Nair7e053532017-08-18 17:53:25 +05301485 uint8_t precedence = 0;
Rakesh Nair888af952017-06-30 18:41:58 +05301486
Rakesh Nair7e053532017-08-18 17:53:25 +05301487 if(!edma_get_skb_precedence(skb, &precedence)) {
Rakesh Nair888af952017-06-30 18:41:58 +05301488 /* Increment per-precedence counters for tx packets
1489 * and set the precedence in the TPD.
1490 */
1491 edma_cinfo->edma_ethstats.tx_prec[precedence]++;
1492 edma_cinfo->edma_ethstats.tx_ac[edma_dscp2ac_tbl[precedence]]++;
1493 tpd->word3 |= precedence << EDMA_TPD_PRIO_SHIFT;
Rakesh Nair888af952017-06-30 18:41:58 +05301494 }
Rakesh Nair1c6a18c2017-08-02 21:27:06 +05301495
1496 /* If sysctl support for IAD stats are enabled */
1497 if (edma_iad_stats_enable) {
1498 if (edma_dscp2ac_tbl[precedence] == EDMA_AC_VI)
1499 edma_iad_process_flow(edma_cinfo, skb, EDMA_EGRESS_DIR, precedence);
1500 }
Rakesh Nair888af952017-06-30 18:41:58 +05301501 }
1502
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301503 /* If tpd or sw_desc is still unitiialized then we need to return */
1504 if ((!tpd) || (!sw_desc))
1505 return -EINVAL;
1506
1507 tpd->word1 |= 1 << EDMA_TPD_EOP_SHIFT;
1508
1509 sw_desc->skb = skb;
1510 sw_desc->flags |= EDMA_SW_DESC_FLAG_LAST;
1511
1512 return 0;
1513
1514dma_error:
1515 edma_rollback_tx(adapter, start_tpd, queue_id);
1516 dev_err(&pdev->dev, "TX DMA map failed\n");
1517vlan_tag_error:
1518 return -ENOMEM;
1519}
1520
1521/* edma_check_link()
1522 * check Link status
1523 */
1524static int edma_check_link(struct edma_adapter *adapter)
1525{
1526 struct phy_device *phydev = adapter->phydev;
1527
1528 if (!(adapter->poll_required))
1529 return __EDMA_LINKUP;
1530
1531 if (phydev->link)
1532 return __EDMA_LINKUP;
1533
1534 return __EDMA_LINKDOWN;
1535}
1536
1537/* edma_adjust_link()
1538 * check for edma link status
1539 */
1540void edma_adjust_link(struct net_device *netdev)
1541{
1542 int status;
1543 struct edma_adapter *adapter = netdev_priv(netdev);
1544 struct phy_device *phydev = adapter->phydev;
1545
1546 if (!test_bit(__EDMA_UP, &adapter->state_flags))
1547 return;
1548
1549 status = edma_check_link(adapter);
1550
1551 if (status == __EDMA_LINKUP && adapter->link_state == __EDMA_LINKDOWN) {
1552 dev_info(&adapter->pdev->dev, "%s: GMAC Link is up with phy_speed=%d\n", netdev->name, phydev->speed);
1553 adapter->link_state = __EDMA_LINKUP;
1554 netif_carrier_on(netdev);
1555 if (netif_running(netdev))
1556 netif_tx_wake_all_queues(netdev);
1557 } else if (status == __EDMA_LINKDOWN && adapter->link_state == __EDMA_LINKUP) {
1558 dev_info(&adapter->pdev->dev, "%s: GMAC Link is down\n", netdev->name);
1559 adapter->link_state = __EDMA_LINKDOWN;
1560 netif_carrier_off(netdev);
1561 netif_tx_stop_all_queues(netdev);
1562 }
1563}
1564
Bhaskar Valabojue429bab2017-03-15 09:01:23 +05301565/* edma_get_stats64()
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301566 * Statistics api used to retreive the tx/rx statistics
1567 */
Bhaskar Valabojue429bab2017-03-15 09:01:23 +05301568struct rtnl_link_stats64 *edma_get_stats64(struct net_device *netdev,
1569 struct rtnl_link_stats64 *stats)
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301570{
1571 struct edma_adapter *adapter = netdev_priv(netdev);
1572
Bhaskar Valabojue429bab2017-03-15 09:01:23 +05301573 memcpy(stats, &adapter->stats, sizeof(*stats));
1574
1575 return stats;
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301576}
1577
1578/* edma_xmit()
1579 * Main api to be called by the core for packet transmission
1580 */
1581netdev_tx_t edma_xmit(struct sk_buff *skb,
1582 struct net_device *net_dev)
1583{
1584 struct edma_adapter *adapter = netdev_priv(net_dev);
1585 struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
1586 struct edma_tx_desc_ring *etdr;
1587 u16 from_cpu = 0, dp_bitmap = 0, txq_id;
1588 int ret, nr_frags_first = 0, num_tpds_needed = 1, queue_id = 0;
1589 unsigned int flags_transmit = 0;
1590 bool packet_is_rstp = false;
1591 struct netdev_queue *nq = NULL;
1592
1593 if (skb_shinfo(skb)->nr_frags) {
1594 nr_frags_first = skb_shinfo(skb)->nr_frags;
1595
1596 /* It is unlikely below check hits, BUG_ON */
1597 BUG_ON(nr_frags_first > MAX_SKB_FRAGS);
1598
1599 num_tpds_needed += nr_frags_first;
1600 }
1601
1602 if (skb_has_frag_list(skb)) {
1603 struct sk_buff *iter_skb;
1604
1605 /* Walk through fraglist skbs making a note of nr_frags */
1606 skb_walk_frags(skb, iter_skb) {
1607 unsigned char nr_frags = skb_shinfo(iter_skb)->nr_frags;
1608
1609 /* It is unlikely below check hits, BUG_ON */
1610 BUG_ON(nr_frags > MAX_SKB_FRAGS);
1611
1612 /* One TPD for skb->data and more for nr_frags */
1613 num_tpds_needed += (1 + nr_frags);
1614 }
1615 }
1616
1617 if (edma_stp_rstp) {
1618 u16 ath_hdr, ath_eth_type;
1619 u8 mac_addr[EDMA_ETH_HDR_LEN];
1620 ath_eth_type = ntohs(*(uint16_t *)&skb->data[12]);
1621 if (ath_eth_type == edma_ath_eth_type) {
1622 packet_is_rstp = true;
1623 ath_hdr = htons(*(uint16_t *)&skb->data[14]);
1624 dp_bitmap = ath_hdr & EDMA_TX_ATH_HDR_PORT_BITMAP_MASK;
1625 from_cpu = (ath_hdr & EDMA_TX_ATH_HDR_FROM_CPU_MASK) >> EDMA_TX_ATH_HDR_FROM_CPU_SHIFT;
1626 memcpy(mac_addr, skb->data, EDMA_ETH_HDR_LEN);
1627
1628 skb_pull(skb, 4);
1629
1630 memcpy(skb->data, mac_addr, EDMA_ETH_HDR_LEN);
1631 }
1632 }
1633
1634 /* this will be one of the 4 TX queues exposed to linux kernel */
1635 txq_id = skb_get_queue_mapping(skb);
1636 queue_id = edma_tx_queue_get(adapter, skb, txq_id);
1637 etdr = edma_cinfo->tpd_ring[queue_id];
1638 nq = netdev_get_tx_queue(net_dev, txq_id);
1639
1640 local_bh_disable();
1641 /* Tx is not handled in bottom half context. Hence, we need to protect
1642 * Tx from tasks and bottom half
1643 */
1644
1645 if (num_tpds_needed > edma_tpd_available(edma_cinfo, queue_id)) {
1646 /* not enough descriptor, just stop queue */
1647 netif_tx_stop_queue(nq);
1648 local_bh_enable();
1649 dev_dbg(&net_dev->dev, "Not enough descriptors available");
1650 edma_cinfo->edma_ethstats.tx_desc_error++;
1651 return NETDEV_TX_BUSY;
1652 }
1653
1654 /* Check and mark VLAN tag offload */
1655#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21))
1656 if (vlan_tx_tag_present(skb))
1657#else
1658 if (skb_vlan_tag_present(skb))
1659#endif
1660 flags_transmit |= EDMA_VLAN_TX_TAG_INSERT_FLAG;
1661 else if (adapter->default_vlan_tag)
1662 flags_transmit |= EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG;
1663
1664 /* Check and mark checksum offload */
1665 if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
1666 flags_transmit |= EDMA_HW_CHECKSUM;
1667
1668 /* Map and fill descriptor for Tx */
1669 ret = edma_tx_map_and_fill(edma_cinfo, adapter, skb, queue_id,
1670 flags_transmit, from_cpu, dp_bitmap,
1671 packet_is_rstp, nr_frags_first);
1672 if (ret) {
1673 dev_kfree_skb_any(skb);
1674 adapter->stats.tx_errors++;
1675 goto netdev_okay;
1676 }
1677
1678 /* Update SW producer index */
1679 edma_tx_update_hw_idx(edma_cinfo, skb, queue_id);
1680
1681 /* update tx statistics */
1682 adapter->stats.tx_packets++;
1683 adapter->stats.tx_bytes += skb->len;
1684
1685netdev_okay:
1686 local_bh_enable();
1687 return NETDEV_TX_OK;
1688}
1689
1690/*
1691 * edma_flow_may_expire()
1692 * Timer function called periodically to delete the node
1693 */
1694void edma_flow_may_expire(unsigned long data)
1695{
1696 struct edma_adapter *adapter = (struct edma_adapter *)data;
1697 int j;
1698
1699 spin_lock_bh(&adapter->rfs.rfs_ftab_lock);
1700 for (j = 0; j < EDMA_RFS_EXPIRE_COUNT_PER_CALL; j++) {
1701 struct hlist_head *hhead;
1702 struct hlist_node *tmp;
1703 struct edma_rfs_filter_node *n;
1704 bool res;
1705
1706 hhead = &adapter->rfs.hlist_head[adapter->rfs.hashtoclean++];
1707 hlist_for_each_entry_safe(n, tmp, hhead, node) {
1708 res = rps_may_expire_flow(adapter->netdev, n->rq_id,
1709 n->flow_id, n->filter_id);
1710 if (res) {
1711 res = edma_delete_rfs_filter(adapter, n);
1712 if (res < 0)
1713 dev_dbg(&adapter->netdev->dev,
1714 "RFS entry %d not allowed to be flushed by Switch",
1715 n->flow_id);
1716 else {
1717 hlist_del(&n->node);
1718 kfree(n);
1719 adapter->rfs.filter_available++;
1720 }
1721 }
1722 }
1723 }
1724
1725 adapter->rfs.hashtoclean = adapter->rfs.hashtoclean & (EDMA_RFS_FLOW_ENTRIES - 1);
1726 spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
1727 mod_timer(&adapter->rfs.expire_rfs, jiffies + HZ/4);
1728}
1729
1730/* edma_rx_flow_steer()
1731 * Called by core to to steer the flow to CPU
1732 */
1733int edma_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
1734 u16 rxq, u32 flow_id)
1735{
1736 struct flow_keys keys;
1737 struct edma_rfs_filter_node *filter_node;
1738 struct edma_adapter *adapter = netdev_priv(dev);
1739 u16 hash_tblid;
1740 int res;
1741
1742 if (skb->protocol == htons(ETH_P_IPV6)) {
1743 res = -EPROTONOSUPPORT;
1744 goto no_protocol_err;
1745 }
1746
1747 /* Dissect flow parameters
1748 * We only support IPv4 + TCP/UDP
1749 */
1750#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21))
1751 res = skb_flow_dissect(skb, &keys);
1752 if (!((keys.ip_proto == IPPROTO_TCP) || (keys.ip_proto == IPPROTO_UDP))) {
1753#else
1754 res = skb_flow_dissect_flow_keys(skb, &keys, 0);
1755 if (!((keys.basic.ip_proto == IPPROTO_TCP) || (keys.basic.ip_proto == IPPROTO_UDP))) {
1756#endif
1757 res = -EPROTONOSUPPORT;
1758 goto no_protocol_err;
1759 }
1760
1761 /* Check if table entry exists */
1762 hash_tblid = skb_get_hash_raw(skb) & EDMA_RFS_FLOW_ENTRIES_MASK;
1763
1764 spin_lock_bh(&adapter->rfs.rfs_ftab_lock);
1765 filter_node = edma_rfs_key_search(&adapter->rfs.hlist_head[hash_tblid], &keys);
1766
1767 if (filter_node) {
1768 if (rxq == filter_node->rq_id) {
1769 res = -EEXIST;
1770 goto out;
1771 } else {
1772 res = edma_delete_rfs_filter(adapter, filter_node);
1773 if (res < 0)
1774 dev_warn(&adapter->netdev->dev,
1775 "Cannot steer flow %d to different queue",
1776 filter_node->flow_id);
1777 else {
1778 adapter->rfs.filter_available++;
1779 res = edma_add_rfs_filter(adapter, &keys, rxq, filter_node);
1780 if (res < 0) {
1781 dev_warn(&adapter->netdev->dev,
1782 "Cannot steer flow %d to different queue",
1783 filter_node->flow_id);
1784 } else {
1785 adapter->rfs.filter_available--;
1786 filter_node->rq_id = rxq;
1787 filter_node->filter_id = res;
1788 }
1789 }
1790 }
1791 } else {
1792 if (adapter->rfs.filter_available == 0) {
1793 res = -EBUSY;
1794 goto out;
1795 }
1796
1797 filter_node = kmalloc(sizeof(*filter_node), GFP_ATOMIC);
1798 if (!filter_node) {
1799 res = -ENOMEM;
1800 goto out;
1801 }
1802
1803 res = edma_add_rfs_filter(adapter, &keys, rxq, filter_node);
1804 if (res < 0) {
1805 kfree(filter_node);
1806 goto out;
1807 }
1808
1809 adapter->rfs.filter_available--;
1810 filter_node->rq_id = rxq;
1811 filter_node->filter_id = res;
1812 filter_node->flow_id = flow_id;
1813 filter_node->keys = keys;
1814 INIT_HLIST_NODE(&filter_node->node);
1815 hlist_add_head(&filter_node->node, &adapter->rfs.hlist_head[hash_tblid]);
1816 }
1817
1818out:
1819 spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
1820no_protocol_err:
1821 return res;
1822}
1823
1824#ifdef CONFIG_RFS_ACCEL
1825/* edma_register_rfs_filter()
1826 * Add RFS filter callback
1827 */
1828int edma_register_rfs_filter(struct net_device *netdev,
1829 set_rfs_filter_callback_t set_filter)
1830{
1831 struct edma_adapter *adapter = netdev_priv(netdev);
1832
1833 spin_lock_bh(&adapter->rfs.rfs_ftab_lock);
1834
1835 if (adapter->set_rfs_rule) {
1836 spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
1837 return -1;
1838 }
1839
1840 adapter->set_rfs_rule = set_filter;
1841 spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
1842
1843 return 0;
1844}
1845#endif
1846
1847/* edma_select_xps_queue()
1848 * Called by Linux TX stack to populate Linux TX queue
1849 */
1850u16 edma_select_xps_queue(struct net_device *dev, struct sk_buff *skb,
1851 void *accel_priv, select_queue_fallback_t fallback)
1852{
1853#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21))
1854 return smp_processor_id();
1855#else
1856 int cpu = get_cpu();
1857 put_cpu();
1858
1859 return cpu;
1860#endif
1861}
1862
1863/* edma_alloc_tx_rings()
1864 * Allocate rx rings
1865 */
1866int edma_alloc_tx_rings(struct edma_common_info *edma_cinfo)
1867{
1868 struct platform_device *pdev = edma_cinfo->pdev;
1869 int i, err = 0;
1870
1871 for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
1872 err = edma_alloc_tx_ring(edma_cinfo, edma_cinfo->tpd_ring[i]);
1873 if (err) {
1874 dev_err(&pdev->dev, "Tx Queue alloc %u failed\n", i);
1875 return err;
1876 }
1877 }
1878
1879 return 0;
1880}
1881
1882/* edma_free_tx_rings()
1883 * Free tx rings
1884 */
1885void edma_free_tx_rings(struct edma_common_info *edma_cinfo)
1886{
1887 int i;
1888
1889 for (i = 0; i < edma_cinfo->num_tx_queues; i++)
1890 edma_free_tx_ring(edma_cinfo, edma_cinfo->tpd_ring[i]);
1891}
1892
1893/* edma_free_tx_resources()
1894 * Free buffers associated with tx rings
1895 */
1896void edma_free_tx_resources(struct edma_common_info *edma_cinfo)
1897{
1898 struct edma_tx_desc_ring *etdr;
1899 struct edma_sw_desc *sw_desc;
1900 struct platform_device *pdev = edma_cinfo->pdev;
1901 int i, j;
1902
1903 for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
1904 etdr = edma_cinfo->tpd_ring[i];
1905 for (j = 0; j < EDMA_TX_RING_SIZE; j++) {
1906 sw_desc = &etdr->sw_desc[j];
1907 if (sw_desc->flags & (EDMA_SW_DESC_FLAG_SKB_HEAD |
1908 EDMA_SW_DESC_FLAG_SKB_FRAG | EDMA_SW_DESC_FLAG_SKB_FRAGLIST))
1909 edma_tx_unmap_and_free(pdev, sw_desc);
1910 }
1911 }
1912}
1913
1914/* edma_alloc_rx_rings()
1915 * Allocate rx rings
1916 */
1917int edma_alloc_rx_rings(struct edma_common_info *edma_cinfo)
1918{
1919 struct platform_device *pdev = edma_cinfo->pdev;
1920 int i, j, err = 0;
1921
1922 for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
1923 err = edma_alloc_rx_ring(edma_cinfo, edma_cinfo->rfd_ring[j]);
1924 if (err) {
1925 dev_err(&pdev->dev, "Rx Queue alloc%u failed\n", i);
1926 return err;
1927 }
1928 j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1929 }
1930
1931 return 0;
1932}
1933
1934/* edma_free_rx_rings()
1935 * free rx rings
1936 */
1937void edma_free_rx_rings(struct edma_common_info *edma_cinfo)
1938{
1939 int i, j;
1940
1941 for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
1942 edma_free_rx_ring(edma_cinfo, edma_cinfo->rfd_ring[j]);
1943 j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1944 }
1945}
1946
1947/* edma_free_queues()
1948 * Free the queues allocaated
1949 */
1950void edma_free_queues(struct edma_common_info *edma_cinfo)
1951{
1952 int i , j;
1953
1954 for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
1955 if (edma_cinfo->tpd_ring[i])
1956 kfree(edma_cinfo->tpd_ring[i]);
1957 edma_cinfo->tpd_ring[i] = NULL;
1958 }
1959
1960 for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
1961 if (edma_cinfo->rfd_ring[j])
1962 kfree(edma_cinfo->rfd_ring[j]);
1963 edma_cinfo->rfd_ring[j] = NULL;
1964 j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1965 }
1966
1967 edma_cinfo->num_rx_queues = 0;
1968 edma_cinfo->num_tx_queues = 0;
1969
1970 return;
1971}
1972
1973/* edma_free_rx_resources()
1974 * Free buffers associated with tx rings
1975 */
1976void edma_free_rx_resources(struct edma_common_info *edma_cinfo)
1977{
1978 struct edma_rfd_desc_ring *erdr;
1979 struct platform_device *pdev = edma_cinfo->pdev;
1980 int i, j, k;
1981
1982 for (i = 0, k = 0; i < edma_cinfo->num_rx_queues; i++) {
1983 erdr = edma_cinfo->rfd_ring[k];
1984 for (j = 0; j < EDMA_RX_RING_SIZE; j++) {
1985 /* unmap all descriptors while cleaning */
1986 edma_clean_rfd(pdev, erdr, j, 1);
1987 }
1988 k += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1989
1990 }
1991}
1992
1993/* edma_alloc_queues_tx()
1994 * Allocate memory for all rings
1995 */
1996int edma_alloc_queues_tx(struct edma_common_info *edma_cinfo)
1997{
1998 int i;
1999
2000 for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
2001 struct edma_tx_desc_ring *etdr;
2002 etdr = kzalloc(sizeof(struct edma_tx_desc_ring), GFP_KERNEL);
2003 if (!etdr)
2004 goto err;
2005 etdr->count = edma_cinfo->tx_ring_count;
2006 edma_cinfo->tpd_ring[i] = etdr;
2007 }
2008
2009 return 0;
2010err:
2011 edma_free_queues(edma_cinfo);
2012 return -1;
2013}
2014
2015/* edma_alloc_queues_rx()
2016 * Allocate memory for all rings
2017 */
2018int edma_alloc_queues_rx(struct edma_common_info *edma_cinfo)
2019{
2020 int i, j;
2021
2022 for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
2023 struct edma_rfd_desc_ring *rfd_ring;
2024 rfd_ring = kzalloc(sizeof(struct edma_rfd_desc_ring),
2025 GFP_KERNEL);
2026 if (!rfd_ring)
2027 goto err;
2028 rfd_ring->count = edma_cinfo->rx_ring_count;
2029 edma_cinfo->rfd_ring[j] = rfd_ring;
2030 j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
2031 }
2032 return 0;
2033err:
2034 edma_free_queues(edma_cinfo);
2035 return -1;
2036}
2037
2038/* edma_clear_irq_status()
2039 * Clear interrupt status
2040 */
2041void edma_clear_irq_status(void)
2042{
2043 edma_write_reg(EDMA_REG_RX_ISR, 0xff);
2044 edma_write_reg(EDMA_REG_TX_ISR, 0xffff);
2045 edma_write_reg(EDMA_REG_MISC_ISR, 0x1fff);
2046 edma_write_reg(EDMA_REG_WOL_ISR, 0x1);
2047};
2048
2049/* edma_configure()
2050 * Configure skb, edma interrupts and control register.
2051 */
2052int edma_configure(struct edma_common_info *edma_cinfo)
2053{
2054 struct edma_hw *hw = &edma_cinfo->hw;
2055 u32 intr_modrt_data;
2056 u32 intr_ctrl_data = 0;
2057 int i, j, ret_count;
2058
2059 edma_read_reg(EDMA_REG_INTR_CTRL, &intr_ctrl_data);
2060 intr_ctrl_data &= ~(1 << EDMA_INTR_SW_IDX_W_TYP_SHIFT);
2061 intr_ctrl_data |= hw->intr_sw_idx_w << EDMA_INTR_SW_IDX_W_TYP_SHIFT;
2062 edma_write_reg(EDMA_REG_INTR_CTRL, intr_ctrl_data);
2063
2064 edma_clear_irq_status();
2065
2066 /* Clear any WOL status */
2067 edma_write_reg(EDMA_REG_WOL_CTRL, 0);
2068 intr_modrt_data = (EDMA_TX_IMT << EDMA_IRQ_MODRT_TX_TIMER_SHIFT);
2069 intr_modrt_data |= (EDMA_RX_IMT << EDMA_IRQ_MODRT_RX_TIMER_SHIFT);
2070 edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, intr_modrt_data);
2071 edma_configure_tx(edma_cinfo);
2072 edma_configure_rx(edma_cinfo);
2073
2074 /* Allocate the RX buffer */
2075 for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
2076 struct edma_rfd_desc_ring *ring = edma_cinfo->rfd_ring[j];
2077 ret_count = edma_alloc_rx_buf(edma_cinfo, ring, ring->count, j);
2078 if (ret_count)
2079 dev_dbg(&edma_cinfo->pdev->dev, "not all rx buffers allocated\n");
Rakesh Nair03b586c2017-04-03 18:28:58 +05302080 ring->pending_fill = ret_count;
Rakesh Nair9bcf2602017-01-06 16:02:16 +05302081 j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
2082 }
2083
2084 /* Configure descriptor Ring */
2085 edma_init_desc(edma_cinfo);
2086 return 0;
2087}
2088
2089/* edma_irq_enable()
2090 * Enable default interrupt generation settings
2091 */
2092void edma_irq_enable(struct edma_common_info *edma_cinfo)
2093{
2094 struct edma_hw *hw = &edma_cinfo->hw;
2095 int i, j;
2096
2097 edma_write_reg(EDMA_REG_RX_ISR, 0xff);
2098 for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
2099 edma_write_reg(EDMA_REG_RX_INT_MASK_Q(j), hw->rx_intr_mask);
2100 j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
2101 }
2102 edma_write_reg(EDMA_REG_TX_ISR, 0xffff);
2103 for (i = 0; i < edma_cinfo->num_tx_queues; i++)
2104 edma_write_reg(EDMA_REG_TX_INT_MASK_Q(i), hw->tx_intr_mask);
2105}
2106
2107/* edma_irq_disable()
2108 * Disable Interrupt
2109 */
2110void edma_irq_disable(struct edma_common_info *edma_cinfo)
2111{
2112 int i;
2113
2114 for (i = 0; i < EDMA_MAX_RECEIVE_QUEUE; i++)
2115 edma_write_reg(EDMA_REG_RX_INT_MASK_Q(i), 0x0);
2116
2117 for (i = 0; i < EDMA_MAX_TRANSMIT_QUEUE; i++)
2118 edma_write_reg(EDMA_REG_TX_INT_MASK_Q(i), 0x0);
2119 edma_write_reg(EDMA_REG_MISC_IMR, 0);
2120 edma_write_reg(EDMA_REG_WOL_IMR, 0);
2121}
2122
2123/* edma_free_irqs()
2124 * Free All IRQs
2125 */
2126void edma_free_irqs(struct edma_adapter *adapter)
2127{
2128 struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
2129 int i, j;
2130 int k = ((edma_cinfo->num_rx_queues == 4) ? 1 : 2);
2131
2132 for (i = 0; i < CONFIG_NR_CPUS; i++) {
2133 for (j = edma_cinfo->edma_percpu_info[i].tx_start; j < (edma_cinfo->edma_percpu_info[i].tx_start + 4); j++)
2134 free_irq(edma_cinfo->tx_irq[j], &edma_cinfo->edma_percpu_info[i]);
2135
2136 for (j = edma_cinfo->edma_percpu_info[i].rx_start; j < (edma_cinfo->edma_percpu_info[i].rx_start + k); j++)
2137 free_irq(edma_cinfo->rx_irq[j], &edma_cinfo->edma_percpu_info[i]);
2138 }
2139}
2140
2141/* edma_enable_rx_ctrl()
2142 * Enable RX queue control
2143 */
2144void edma_enable_rx_ctrl(struct edma_hw *hw)
2145{
2146 u32 data;
2147
2148 edma_read_reg(EDMA_REG_RXQ_CTRL, &data);
2149 data |= EDMA_RXQ_CTRL_EN;
2150 edma_write_reg(EDMA_REG_RXQ_CTRL, data);
2151}
2152
2153
2154/* edma_enable_tx_ctrl()
2155 * Enable TX queue control
2156 */
2157void edma_enable_tx_ctrl(struct edma_hw *hw)
2158{
2159 u32 data;
2160
2161 edma_read_reg(EDMA_REG_TXQ_CTRL, &data);
2162 data |= EDMA_TXQ_CTRL_TXQ_EN;
2163 edma_write_reg(EDMA_REG_TXQ_CTRL, data);
2164}
2165
2166/* edma_stop_rx_tx()
2167 * Disable RX/TQ Queue control
2168 */
2169void edma_stop_rx_tx(struct edma_hw *hw)
2170{
2171 u32 data;
2172
2173 edma_read_reg(EDMA_REG_RXQ_CTRL, &data);
2174 data &= ~EDMA_RXQ_CTRL_EN;
2175 edma_write_reg(EDMA_REG_RXQ_CTRL, data);
2176 edma_read_reg(EDMA_REG_TXQ_CTRL, &data);
2177 data &= ~EDMA_TXQ_CTRL_TXQ_EN;
2178 edma_write_reg(EDMA_REG_TXQ_CTRL, data);
2179}
2180
2181/* edma_reset()
2182 * Reset the EDMA
2183 */
2184int edma_reset(struct edma_common_info *edma_cinfo)
2185{
2186 struct edma_hw *hw = &edma_cinfo->hw;
2187
2188 edma_irq_disable(edma_cinfo);
2189
2190 edma_clear_irq_status();
2191
2192 edma_stop_rx_tx(hw);
2193
2194 return 0;
2195}
2196
2197/* edma_fill_netdev()
2198 * Fill netdev for each etdr
2199 */
2200int edma_fill_netdev(struct edma_common_info *edma_cinfo, int queue_id,
2201 int dev, int txq_id)
2202{
2203 struct edma_tx_desc_ring *etdr;
2204 int i = 0;
2205
2206 etdr = edma_cinfo->tpd_ring[queue_id];
2207
2208 while (etdr->netdev[i])
2209 i++;
2210
2211 if (i >= EDMA_MAX_NETDEV_PER_QUEUE)
2212 return -1;
2213
2214 /* Populate the netdev associated with the tpd ring */
2215 etdr->netdev[i] = edma_netdev[dev];
2216 etdr->nq[i] = netdev_get_tx_queue(edma_netdev[dev], txq_id);
2217
2218 return 0;
2219}
2220
2221/* edma_change_mtu()
2222 * change the MTU of the NIC.
2223 */
2224int edma_change_mtu(struct net_device *netdev, int new_mtu)
2225{
2226 struct edma_adapter *adapter = netdev_priv(netdev);
2227 struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
2228 int old_mtu = netdev->mtu;
2229 int max_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + (2 * VLAN_HLEN);
2230
2231 if ((max_frame_size < ETH_ZLEN + ETH_FCS_LEN) ||
2232 (max_frame_size > EDMA_MAX_JUMBO_FRAME_SIZE)) {
2233 dev_err(&edma_cinfo->pdev->dev, "MTU setting not correct\n");
2234 return -EINVAL;
2235 }
2236
2237 /* set MTU */
2238 if (old_mtu != new_mtu) {
2239 netdev->mtu = new_mtu;
2240 netdev_update_features(netdev);
2241 }
2242
2243 return 0;
2244}
2245
2246/* edma_set_mac()
2247 * Change the Ethernet Address of the NIC
2248 */
2249int edma_set_mac_addr(struct net_device *netdev, void *p)
2250{
2251 struct sockaddr *addr = p;
2252
2253 if (!is_valid_ether_addr(addr->sa_data))
2254 return -EINVAL;
2255
2256 if (netif_running(netdev))
2257 return -EBUSY;
2258
2259 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2260 return 0;
2261}
2262
2263/* edma_set_stp_rstp()
2264 * set stp/rstp
2265 */
2266void edma_set_stp_rstp(bool rstp)
2267{
2268 edma_stp_rstp = rstp;
2269}
2270
2271/* edma_assign_ath_hdr_type()
2272 * assign atheros header eth type
2273 */
2274void edma_assign_ath_hdr_type(int eth_type)
2275{
2276 edma_ath_eth_type = eth_type & EDMA_ETH_TYPE_MASK;
2277}
2278
2279/* edma_get_default_vlan_tag()
2280 * Used by other modules to get the default vlan tag
2281 */
2282int edma_get_default_vlan_tag(struct net_device *netdev)
2283{
2284 struct edma_adapter *adapter = netdev_priv(netdev);
2285
2286 if (adapter->default_vlan_tag)
2287 return adapter->default_vlan_tag;
2288
2289 return 0;
2290}
2291
2292/* edma_open()
2293 * gets called when netdevice is up, start the queue.
2294 */
2295int edma_open(struct net_device *netdev)
2296{
2297 struct edma_adapter *adapter = netdev_priv(netdev);
2298 struct platform_device *pdev = adapter->edma_cinfo->pdev;
2299
2300 netif_tx_start_all_queues(netdev);
2301 edma_initialise_rfs_flow_table(adapter);
2302 set_bit(__EDMA_UP, &adapter->state_flags);
2303
2304 /* if Link polling is enabled, in our case enabled for WAN, then
2305 * do a phy start, else always set link as UP
2306 */
Rakesh Naired29f6b2017-04-04 15:48:08 +05302307 mutex_lock(&adapter->poll_mutex);
Rakesh Nair9bcf2602017-01-06 16:02:16 +05302308 if (adapter->poll_required) {
2309 if (!IS_ERR(adapter->phydev)) {
2310 phy_start(adapter->phydev);
2311 phy_start_aneg(adapter->phydev);
2312 adapter->link_state = __EDMA_LINKDOWN;
2313 } else {
2314 dev_dbg(&pdev->dev, "Invalid PHY device for a link polled interface\n");
2315 }
2316 } else {
2317 adapter->link_state = __EDMA_LINKUP;
2318 netif_carrier_on(netdev);
2319 }
Rakesh Naired29f6b2017-04-04 15:48:08 +05302320 mutex_unlock(&adapter->poll_mutex);
Rakesh Nair9bcf2602017-01-06 16:02:16 +05302321
2322 return 0;
2323}
2324
2325
2326/* edma_close()
2327 * gets called when netdevice is down, stops the queue.
2328 */
2329int edma_close(struct net_device *netdev)
2330{
2331 struct edma_adapter *adapter = netdev_priv(netdev);
2332
2333 edma_free_rfs_flow_table(adapter);
2334 netif_carrier_off(netdev);
2335 netif_tx_stop_all_queues(netdev);
2336
Rakesh Naired29f6b2017-04-04 15:48:08 +05302337 mutex_lock(&adapter->poll_mutex);
Rakesh Nair9bcf2602017-01-06 16:02:16 +05302338 if (adapter->poll_required) {
2339 if (!IS_ERR(adapter->phydev))
2340 phy_stop(adapter->phydev);
2341 }
Rakesh Naired29f6b2017-04-04 15:48:08 +05302342 mutex_unlock(&adapter->poll_mutex);
Rakesh Nair9bcf2602017-01-06 16:02:16 +05302343
2344 adapter->link_state = __EDMA_LINKDOWN;
2345
2346 /* Set GMAC state to UP before link state is checked
2347 */
2348 clear_bit(__EDMA_UP, &adapter->state_flags);
2349
2350 return 0;
2351}
2352
2353/* edma_poll
2354 * polling function that gets called when the napi gets scheduled.
2355 *
2356 * Main sequence of task performed in this api
2357 * is clear irq status -> clear_tx_irq -> clean_rx_irq->
2358 * enable interrupts.
2359 */
2360int edma_poll(struct napi_struct *napi, int budget)
2361{
2362 struct edma_per_cpu_queues_info *edma_percpu_info = container_of(napi,
2363 struct edma_per_cpu_queues_info, napi);
2364 struct edma_common_info *edma_cinfo = edma_percpu_info->edma_cinfo;
2365 u32 reg_data;
2366 u32 shadow_rx_status, shadow_tx_status;
2367 int queue_id;
2368 int i, work_done = 0;
Rakesh Nair03b586c2017-04-03 18:28:58 +05302369 u16 rx_pending_fill;
Rakesh Nair9bcf2602017-01-06 16:02:16 +05302370
2371 /* Store the Rx/Tx status by ANDing it with
2372 * appropriate CPU RX?TX mask
2373 */
2374 edma_read_reg(EDMA_REG_RX_ISR, &reg_data);
2375 edma_percpu_info->rx_status |= reg_data & edma_percpu_info->rx_mask;
2376 shadow_rx_status = edma_percpu_info->rx_status;
2377 edma_read_reg(EDMA_REG_TX_ISR, &reg_data);
2378 edma_percpu_info->tx_status |= reg_data & edma_percpu_info->tx_mask;
2379 shadow_tx_status = edma_percpu_info->tx_status;
2380
2381 /* Every core will have a start, which will be computed
2382 * in probe and stored in edma_percpu_info->tx_start variable.
2383 * We will shift the status bit by tx_start to obtain
2384 * status bits for the core on which the current processing
2385 * is happening. Since, there are 4 tx queues per core,
2386 * we will run the loop till we get the correct queue to clear.
2387 */
2388 while (edma_percpu_info->tx_status) {
2389 queue_id = ffs(edma_percpu_info->tx_status) - 1;
2390 edma_tx_complete(edma_cinfo, queue_id);
2391 edma_percpu_info->tx_status &= ~(1 << queue_id);
2392 }
2393
2394 /* Every core will have a start, which will be computed
2395 * in probe and stored in edma_percpu_info->tx_start variable.
2396 * We will shift the status bit by tx_start to obtain
2397 * status bits for the core on which the current processing
2398 * is happening. Since, there are 4 tx queues per core, we
2399 * will run the loop till we get the correct queue to clear.
2400 */
2401 while (edma_percpu_info->rx_status) {
2402 queue_id = ffs(edma_percpu_info->rx_status) - 1;
Rakesh Nair03b586c2017-04-03 18:28:58 +05302403 rx_pending_fill = edma_rx_complete(edma_cinfo, &work_done,
Rakesh Nair9bcf2602017-01-06 16:02:16 +05302404 budget, queue_id, napi);
2405
Rakesh Nair03b586c2017-04-03 18:28:58 +05302406 if (likely(work_done < budget)) {
2407 if (rx_pending_fill) {
2408 work_done = budget;
2409 break;
2410 }
2411
Rakesh Nair9bcf2602017-01-06 16:02:16 +05302412 edma_percpu_info->rx_status &= ~(1 << queue_id);
Rakesh Nair03b586c2017-04-03 18:28:58 +05302413 }
Rakesh Nair9bcf2602017-01-06 16:02:16 +05302414 else
2415 break;
2416 }
2417
2418 /* Clear the status register, to avoid the interrupts to
2419 * reoccur.This clearing of interrupt status register is
2420 * done here as writing to status register only takes place
2421 * once the producer/consumer index has been updated to
2422 * reflect that the packet transmission/reception went fine.
2423 */
2424 edma_write_reg(EDMA_REG_RX_ISR, shadow_rx_status);
2425 edma_write_reg(EDMA_REG_TX_ISR, shadow_tx_status);
2426
2427 /* If budget not fully consumed, exit the polling mode */
2428 if (likely(work_done < budget)) {
2429 napi_complete(napi);
2430
2431 /* re-enable the interrupts */
2432 for (i = 0; i < edma_cinfo->num_rxq_per_core; i++)
2433 edma_write_reg(EDMA_REG_RX_INT_MASK_Q(edma_percpu_info->rx_start + i), 0x1);
2434 for (i = 0; i < edma_cinfo->num_txq_per_core; i++)
2435 edma_write_reg(EDMA_REG_TX_INT_MASK_Q(edma_percpu_info->tx_start + i), 0x1);
2436 }
2437
2438 return work_done;
2439}
2440
2441/* edma interrupt()
2442 * interrupt handler
2443 */
2444irqreturn_t edma_interrupt(int irq, void *dev)
2445{
2446 struct edma_per_cpu_queues_info *edma_percpu_info = (struct edma_per_cpu_queues_info *) dev;
2447 struct edma_common_info *edma_cinfo = edma_percpu_info->edma_cinfo;
2448 int i;
2449
2450 /* Unmask the TX/RX interrupt register */
2451 for (i = 0; i < edma_cinfo->num_rxq_per_core; i++)
2452 edma_write_reg(EDMA_REG_RX_INT_MASK_Q(edma_percpu_info->rx_start + i), 0x0);
2453
2454 for (i = 0; i < edma_cinfo->num_txq_per_core; i++)
2455 edma_write_reg(EDMA_REG_TX_INT_MASK_Q(edma_percpu_info->tx_start + i), 0x0);
2456
2457 napi_schedule(&edma_percpu_info->napi);
2458
2459 return IRQ_HANDLED;
2460}