Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 1 | /* |
Manish Verma | 924d3ed | 2020-01-07 12:01:36 +0530 | [diff] [blame^] | 2 | * Copyright (c) 2014 - 2018, 2020 The Linux Foundation. All rights reserved. |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 3 | * |
| 4 | * Permission to use, copy, modify, and/or distribute this software for |
| 5 | * any purpose with or without fee is hereby granted, provided that the |
| 6 | * above copyright notice and this permission notice appear in all copies. |
| 7 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
| 8 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
| 9 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
| 10 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
| 11 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
| 12 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT |
| 13 | * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
| 14 | */ |
| 15 | |
| 16 | #include <linux/platform_device.h> |
| 17 | #include <linux/if_vlan.h> |
Rakesh Nair | 888af95 | 2017-06-30 18:41:58 +0530 | [diff] [blame] | 18 | #include <linux/kernel.h> |
Rakesh Nair | c402d75 | 2018-01-20 16:21:54 +0530 | [diff] [blame] | 19 | #include <asm/atomic.h> |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 20 | #include "ess_edma.h" |
| 21 | #include "edma.h" |
| 22 | |
| 23 | extern struct net_device *edma_netdev[EDMA_MAX_PORTID_SUPPORTED]; |
Rakesh Nair | d4a1150 | 2017-11-07 17:02:11 +0530 | [diff] [blame] | 24 | extern u32 edma_disable_queue_stop; |
| 25 | |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 26 | bool edma_stp_rstp; |
Sourav Poddar | 44858a7 | 2019-12-09 22:51:30 +0530 | [diff] [blame] | 27 | bool edma_jumbo_multi_segment; |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 28 | u16 edma_ath_eth_type; |
Rakesh Nair | 888af95 | 2017-06-30 18:41:58 +0530 | [diff] [blame] | 29 | extern u8 edma_dscp2ac_tbl[EDMA_PRECEDENCE_MAX]; |
| 30 | extern u8 edma_per_prec_stats_enable; |
Rakesh Nair | 1c6a18c | 2017-08-02 21:27:06 +0530 | [diff] [blame] | 31 | extern u32 edma_iad_stats_enable; |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 32 | |
| 33 | /* edma_skb_priority_offset() |
| 34 | * get edma skb priority |
| 35 | */ |
| 36 | static unsigned int edma_skb_priority_offset(struct sk_buff *skb) |
| 37 | { |
| 38 | return (skb->priority >> 2) & 1; |
| 39 | } |
| 40 | |
| 41 | /* edma_alloc_tx_ring() |
| 42 | * Allocate Tx descriptors ring |
| 43 | */ |
| 44 | static int edma_alloc_tx_ring(struct edma_common_info *edma_cinfo, |
| 45 | struct edma_tx_desc_ring *etdr) |
| 46 | { |
| 47 | struct platform_device *pdev = edma_cinfo->pdev; |
| 48 | u16 sw_size = sizeof(struct edma_sw_desc) * etdr->count; |
| 49 | |
| 50 | /* Initialize ring */ |
| 51 | etdr->size = sizeof(struct edma_tx_desc) * etdr->count; |
| 52 | etdr->sw_next_to_fill = 0; |
| 53 | etdr->sw_next_to_clean = 0; |
| 54 | |
| 55 | /* Allocate SW descriptors */ |
| 56 | etdr->sw_desc = vzalloc(sw_size); |
| 57 | if (!etdr->sw_desc) { |
| 58 | dev_err(&pdev->dev, "buffer alloc of tx ring failed=%p", etdr); |
| 59 | return -ENOMEM; |
| 60 | } |
| 61 | |
| 62 | /* Allocate HW descriptors */ |
| 63 | etdr->hw_desc = dma_alloc_coherent(&pdev->dev, etdr->size, &etdr->dma, |
| 64 | GFP_KERNEL); |
| 65 | if (!etdr->hw_desc) { |
| 66 | dev_err(&pdev->dev, "descriptor allocation for tx ring failed"); |
| 67 | vfree(etdr->sw_desc); |
| 68 | etdr->sw_desc = NULL; |
| 69 | return -ENOMEM; |
| 70 | } |
| 71 | |
| 72 | return 0; |
| 73 | } |
| 74 | |
| 75 | /* edma_free_tx_ring() |
| 76 | * Free tx rings allocated by edma_alloc_tx_rings |
| 77 | */ |
| 78 | static void edma_free_tx_ring(struct edma_common_info *edma_cinfo, |
| 79 | struct edma_tx_desc_ring *etdr) |
| 80 | { |
| 81 | struct platform_device *pdev = edma_cinfo->pdev; |
| 82 | |
| 83 | if (likely(etdr->hw_desc)) { |
| 84 | dma_free_coherent(&pdev->dev, etdr->size, etdr->hw_desc, |
| 85 | etdr->dma); |
| 86 | |
| 87 | vfree(etdr->sw_desc); |
| 88 | etdr->sw_desc = NULL; |
| 89 | } |
| 90 | } |
| 91 | |
| 92 | /* edma_alloc_rx_ring() |
| 93 | * allocate rx descriptor ring |
| 94 | */ |
| 95 | static int edma_alloc_rx_ring(struct edma_common_info *edma_cinfo, |
| 96 | struct edma_rfd_desc_ring *erxd) |
| 97 | { |
| 98 | struct platform_device *pdev = edma_cinfo->pdev; |
| 99 | u16 sw_size = sizeof(struct edma_sw_desc) * erxd->count; |
| 100 | |
| 101 | erxd->size = sizeof(struct edma_sw_desc) * erxd->count; |
| 102 | erxd->sw_next_to_fill = 0; |
| 103 | erxd->sw_next_to_clean = 0; |
| 104 | |
| 105 | /* Allocate SW descriptors */ |
| 106 | erxd->sw_desc = vzalloc(sw_size); |
| 107 | if (!erxd->sw_desc) |
| 108 | return -ENOMEM; |
| 109 | |
| 110 | /* Alloc HW descriptors */ |
| 111 | erxd->hw_desc = dma_alloc_coherent(&pdev->dev, erxd->size, &erxd->dma, |
| 112 | GFP_KERNEL); |
| 113 | if (!erxd->hw_desc) { |
| 114 | vfree(erxd->sw_desc); |
| 115 | erxd->sw_desc = NULL; |
| 116 | return -ENOMEM; |
| 117 | } |
| 118 | |
Rakesh Nair | 03b586c | 2017-04-03 18:28:58 +0530 | [diff] [blame] | 119 | /* Initialize pending fill */ |
| 120 | erxd->pending_fill = 0; |
| 121 | |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 122 | return 0; |
| 123 | } |
| 124 | |
| 125 | /* edma_free_rx_ring() |
| 126 | * Free rx ring allocated by alloc_rx_ring |
| 127 | */ |
| 128 | static void edma_free_rx_ring(struct edma_common_info *edma_cinfo, |
| 129 | struct edma_rfd_desc_ring *erxd) |
| 130 | { |
| 131 | struct platform_device *pdev = edma_cinfo->pdev; |
| 132 | |
| 133 | if (likely(erxd->hw_desc)) { |
| 134 | dma_free_coherent(&pdev->dev, erxd->size, erxd->hw_desc, |
| 135 | erxd->dma); |
| 136 | |
| 137 | vfree(erxd->sw_desc); |
| 138 | erxd->sw_desc = NULL; |
| 139 | } |
| 140 | } |
| 141 | |
| 142 | /* edma_configure_tx() |
| 143 | * Configure transmission control data |
| 144 | */ |
| 145 | static void edma_configure_tx(struct edma_common_info *edma_cinfo) |
| 146 | { |
| 147 | u32 txq_ctrl_data; |
| 148 | |
| 149 | txq_ctrl_data = (EDMA_TPD_BURST << EDMA_TXQ_NUM_TPD_BURST_SHIFT); |
| 150 | txq_ctrl_data |= EDMA_TXQ_CTRL_TPD_BURST_EN; |
| 151 | txq_ctrl_data |= (EDMA_TXF_BURST << EDMA_TXQ_TXF_BURST_NUM_SHIFT); |
| 152 | edma_write_reg(EDMA_REG_TXQ_CTRL, txq_ctrl_data); |
| 153 | } |
| 154 | |
| 155 | /* edma_configure_rx() |
| 156 | * configure reception control data |
| 157 | */ |
| 158 | static void edma_configure_rx(struct edma_common_info *edma_cinfo) |
| 159 | { |
| 160 | struct edma_hw *hw = &edma_cinfo->hw; |
| 161 | u32 rss_type, rx_desc1, rxq_ctrl_data; |
| 162 | |
| 163 | /* Set RSS type */ |
| 164 | rss_type = hw->rss_type; |
| 165 | edma_write_reg(EDMA_REG_RSS_TYPE, rss_type); |
| 166 | |
| 167 | /* Set RFD burst number */ |
| 168 | rx_desc1 = (EDMA_RFD_BURST << EDMA_RXQ_RFD_BURST_NUM_SHIFT); |
| 169 | |
| 170 | /* Set RFD prefetch threshold */ |
| 171 | rx_desc1 |= (EDMA_RFD_THR << EDMA_RXQ_RFD_PF_THRESH_SHIFT); |
| 172 | |
| 173 | /* Set RFD in host ring low threshold to generte interrupt */ |
| 174 | rx_desc1 |= (EDMA_RFD_LTHR << EDMA_RXQ_RFD_LOW_THRESH_SHIFT); |
| 175 | edma_write_reg(EDMA_REG_RX_DESC1, rx_desc1); |
| 176 | |
| 177 | /* Set Rx FIFO threshold to start to DMA data to host */ |
| 178 | rxq_ctrl_data = EDMA_FIFO_THRESH_128_BYTE; |
| 179 | |
| 180 | /* Set RX remove vlan bit */ |
| 181 | rxq_ctrl_data |= EDMA_RXQ_CTRL_RMV_VLAN; |
| 182 | |
| 183 | edma_write_reg(EDMA_REG_RXQ_CTRL, rxq_ctrl_data); |
| 184 | } |
| 185 | |
| 186 | /* edma_alloc_rx_buf() |
| 187 | * does skb allocation for the received packets. |
| 188 | */ |
| 189 | static int edma_alloc_rx_buf(struct edma_common_info |
| 190 | *edma_cinfo, |
| 191 | struct edma_rfd_desc_ring *erdr, |
| 192 | int cleaned_count, int queue_id) |
| 193 | { |
| 194 | struct platform_device *pdev = edma_cinfo->pdev; |
| 195 | struct edma_rx_free_desc *rx_desc; |
| 196 | struct edma_sw_desc *sw_desc; |
| 197 | struct sk_buff *skb; |
| 198 | unsigned int i; |
Sourav Poddar | 44858a7 | 2019-12-09 22:51:30 +0530 | [diff] [blame] | 199 | u16 prod_idx, length, alloc_length; |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 200 | u32 reg_data; |
| 201 | |
Sourav Poddar | 44858a7 | 2019-12-09 22:51:30 +0530 | [diff] [blame] | 202 | BUG_ON(cleaned_count > erdr->count); |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 203 | |
| 204 | i = erdr->sw_next_to_fill; |
| 205 | |
| 206 | while (cleaned_count) { |
| 207 | sw_desc = &erdr->sw_desc[i]; |
| 208 | length = edma_cinfo->rx_head_buffer_len; |
| 209 | |
| 210 | if (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_REUSE) { |
| 211 | skb = sw_desc->skb; |
| 212 | |
| 213 | /* Clear REUSE flag */ |
| 214 | sw_desc->flags &= ~EDMA_SW_DESC_FLAG_SKB_REUSE; |
| 215 | } else { |
| 216 | /* alloc skb */ |
Sourav Poddar | 44858a7 | 2019-12-09 22:51:30 +0530 | [diff] [blame] | 217 | alloc_length = length; |
| 218 | |
| 219 | /* |
| 220 | * if jumbo multi segment is enabled, then we need to ensure that |
| 221 | * all packets are crossing 1K address boundary |
| 222 | */ |
| 223 | if (unlikely(edma_jumbo_multi_segment && !edma_cinfo->page_mode)) |
| 224 | alloc_length += 0x400; |
| 225 | |
| 226 | skb = netdev_alloc_skb(edma_netdev[0], alloc_length); |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 227 | if (!skb) { |
| 228 | /* Better luck next round */ |
| 229 | sw_desc->flags = 0; |
| 230 | break; |
| 231 | } |
| 232 | } |
| 233 | |
| 234 | if (!edma_cinfo->page_mode) { |
Sourav Poddar | 44858a7 | 2019-12-09 22:51:30 +0530 | [diff] [blame] | 235 | /* |
| 236 | * if jumbo multi segment is enabled, then we need to ensure that |
| 237 | * all packets are crossing 1K address boundary |
| 238 | */ |
| 239 | if (unlikely(edma_jumbo_multi_segment)) { |
| 240 | u32 addr = (u32)skb->data; |
| 241 | skb->data = (u8 *)(((addr + 0x400) & ~0x3FF) - 32); |
| 242 | } |
| 243 | |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 244 | sw_desc->dma = dma_map_single(&pdev->dev, skb->data, |
| 245 | length, DMA_FROM_DEVICE); |
Sourav Poddar | 44858a7 | 2019-12-09 22:51:30 +0530 | [diff] [blame] | 246 | if (dma_mapping_error(&pdev->dev, sw_desc->dma)) { |
| 247 | WARN_ONCE(0, "EDMA DMA mapping failed for linear address %x", sw_desc->dma); |
| 248 | sw_desc->flags = 0; |
| 249 | sw_desc->skb = NULL; |
| 250 | dev_kfree_skb_any(skb); |
| 251 | break; |
| 252 | } |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 253 | |
Sourav Poddar | 44858a7 | 2019-12-09 22:51:30 +0530 | [diff] [blame] | 254 | /* |
| 255 | * We should not exit from here with REUSE flag set |
| 256 | * This is to avoid re-using same sk_buff for next |
| 257 | * time around |
| 258 | */ |
| 259 | sw_desc->flags = EDMA_SW_DESC_FLAG_SKB_HEAD; |
| 260 | sw_desc->length = length; |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 261 | } else { |
| 262 | struct page *pg = alloc_page(GFP_ATOMIC); |
| 263 | |
| 264 | if (!pg) { |
| 265 | sw_desc->flags = 0; |
| 266 | sw_desc->skb = NULL; |
| 267 | dev_kfree_skb_any(skb); |
| 268 | break; |
| 269 | } |
| 270 | |
| 271 | sw_desc->dma = dma_map_page(&pdev->dev, pg, 0, |
| 272 | edma_cinfo->rx_page_buffer_len, |
| 273 | DMA_FROM_DEVICE); |
| 274 | if (dma_mapping_error(&pdev->dev, sw_desc->dma)) { |
| 275 | WARN_ONCE(0, "EDMA DMA mapping failed for page address %x", sw_desc->dma); |
| 276 | sw_desc->flags = 0; |
| 277 | sw_desc->skb = NULL; |
| 278 | __free_page(pg); |
| 279 | dev_kfree_skb_any(skb); |
| 280 | break; |
| 281 | } |
| 282 | |
| 283 | skb_fill_page_desc(skb, 0, pg, 0, |
| 284 | edma_cinfo->rx_page_buffer_len); |
| 285 | sw_desc->flags = EDMA_SW_DESC_FLAG_SKB_FRAG; |
| 286 | sw_desc->length = edma_cinfo->rx_page_buffer_len; |
| 287 | } |
| 288 | |
| 289 | /* Update the buffer info */ |
| 290 | sw_desc->skb = skb; |
| 291 | rx_desc = (&(erdr->hw_desc)[i]); |
| 292 | rx_desc->buffer_addr = cpu_to_le64(sw_desc->dma); |
| 293 | if (++i == erdr->count) |
| 294 | i = 0; |
| 295 | cleaned_count--; |
| 296 | } |
| 297 | |
| 298 | erdr->sw_next_to_fill = i; |
| 299 | |
| 300 | if (i == 0) |
| 301 | prod_idx = erdr->count - 1; |
| 302 | else |
| 303 | prod_idx = i - 1; |
| 304 | |
| 305 | /* Update the producer index */ |
| 306 | edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), ®_data); |
| 307 | reg_data &= ~EDMA_RFD_PROD_IDX_BITS; |
| 308 | reg_data |= prod_idx; |
| 309 | edma_write_reg(EDMA_REG_RFD_IDX_Q(queue_id), reg_data); |
Rakesh Nair | 03b586c | 2017-04-03 18:28:58 +0530 | [diff] [blame] | 310 | |
| 311 | /* If we couldn't allocate all the buffers, |
| 312 | * we increment the alloc failure counters |
| 313 | */ |
| 314 | if (cleaned_count) |
| 315 | edma_cinfo->edma_ethstats.rx_alloc_fail_ctr++; |
| 316 | |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 317 | return cleaned_count; |
| 318 | } |
| 319 | |
| 320 | /* edma_init_desc() |
| 321 | * update descriptor ring size, buffer and producer/consumer index |
| 322 | */ |
| 323 | static void edma_init_desc(struct edma_common_info *edma_cinfo) |
| 324 | { |
| 325 | struct edma_rfd_desc_ring *rfd_ring; |
| 326 | struct edma_tx_desc_ring *etdr; |
| 327 | int i = 0, j = 0; |
| 328 | u32 data = 0; |
| 329 | u16 hw_cons_idx = 0; |
| 330 | |
| 331 | /* Set the base address of every TPD ring. */ |
| 332 | for (i = 0; i < edma_cinfo->num_tx_queues; i++) { |
| 333 | etdr = edma_cinfo->tpd_ring[i]; |
| 334 | |
| 335 | /* Update descriptor ring base address */ |
| 336 | edma_write_reg(EDMA_REG_TPD_BASE_ADDR_Q(i), (u32)etdr->dma); |
| 337 | edma_read_reg(EDMA_REG_TPD_IDX_Q(i), &data); |
| 338 | |
| 339 | /* Calculate hardware consumer index */ |
| 340 | hw_cons_idx = (data >> EDMA_TPD_CONS_IDX_SHIFT) & 0xffff; |
| 341 | etdr->sw_next_to_fill = hw_cons_idx; |
| 342 | etdr->sw_next_to_clean = hw_cons_idx; |
| 343 | data &= ~(EDMA_TPD_PROD_IDX_MASK << EDMA_TPD_PROD_IDX_SHIFT); |
| 344 | data |= hw_cons_idx; |
| 345 | |
| 346 | /* update producer index */ |
| 347 | edma_write_reg(EDMA_REG_TPD_IDX_Q(i), data); |
| 348 | |
| 349 | /* update SW consumer index register */ |
| 350 | edma_write_reg(EDMA_REG_TX_SW_CONS_IDX_Q(i), hw_cons_idx); |
| 351 | |
| 352 | /* Set TPD ring size */ |
| 353 | edma_write_reg(EDMA_REG_TPD_RING_SIZE, |
| 354 | edma_cinfo->tx_ring_count & |
| 355 | EDMA_TPD_RING_SIZE_MASK); |
| 356 | } |
| 357 | |
| 358 | for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) { |
| 359 | rfd_ring = edma_cinfo->rfd_ring[j]; |
| 360 | /* Update Receive Free descriptor ring base address */ |
| 361 | edma_write_reg(EDMA_REG_RFD_BASE_ADDR_Q(j), |
| 362 | (u32)(rfd_ring->dma)); |
| 363 | j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1); |
| 364 | } |
| 365 | |
| 366 | data = edma_cinfo->rx_head_buffer_len; |
| 367 | if (edma_cinfo->page_mode) |
| 368 | data = edma_cinfo->rx_page_buffer_len; |
| 369 | |
| 370 | data &= EDMA_RX_BUF_SIZE_MASK; |
| 371 | data <<= EDMA_RX_BUF_SIZE_SHIFT; |
| 372 | |
| 373 | /* Update RFD ring size and RX buffer size */ |
| 374 | data |= (edma_cinfo->rx_ring_count & EDMA_RFD_RING_SIZE_MASK) |
| 375 | << EDMA_RFD_RING_SIZE_SHIFT; |
| 376 | |
| 377 | edma_write_reg(EDMA_REG_RX_DESC0, data); |
| 378 | |
| 379 | /* Disable TX FIFO low watermark and high watermark */ |
| 380 | edma_write_reg(EDMA_REG_TXF_WATER_MARK, 0); |
| 381 | |
| 382 | /* Load all of base address above */ |
| 383 | edma_read_reg(EDMA_REG_TX_SRAM_PART, &data); |
| 384 | data |= 1 << EDMA_LOAD_PTR_SHIFT; |
| 385 | edma_write_reg(EDMA_REG_TX_SRAM_PART, data); |
| 386 | } |
| 387 | |
| 388 | /* edma_receive_checksum |
| 389 | * Api to check checksum on receive packets |
| 390 | */ |
| 391 | static void edma_receive_checksum(struct edma_rx_return_desc *rd, |
| 392 | struct sk_buff *skb) |
| 393 | { |
| 394 | skb_checksum_none_assert(skb); |
| 395 | |
| 396 | /* check the RRD IP/L4 checksum bit to see if |
| 397 | * its set, which in turn indicates checksum |
| 398 | * failure. |
| 399 | */ |
| 400 | if (rd->rrd6 & EDMA_RRD_CSUM_FAIL_MASK) |
| 401 | return; |
| 402 | |
Rakesh Nair | 72e1d28 | 2017-05-19 22:21:01 +0530 | [diff] [blame] | 403 | /* |
| 404 | * We disable checksum verification only if |
| 405 | * we have a TCP/UDP packet |
| 406 | */ |
| 407 | if (rd->rrd7 & (EDMA_RRD_L4OFFSET_MASK << EDMA_RRD_L4OFFSET_SHIFT)) |
| 408 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 409 | } |
| 410 | |
| 411 | /* edma_clean_rfd() |
| 412 | * clean up rx resourcers on error |
| 413 | */ |
| 414 | static void edma_clean_rfd(struct platform_device *pdev, |
| 415 | struct edma_rfd_desc_ring *erdr, |
| 416 | u16 index, |
| 417 | int pos) |
| 418 | { |
| 419 | struct edma_rx_free_desc *rx_desc = &(erdr->hw_desc[index]); |
| 420 | struct edma_sw_desc *sw_desc = &erdr->sw_desc[index]; |
| 421 | |
| 422 | /* Unmap non-first RFD positions in packet */ |
| 423 | if (pos) { |
| 424 | if (likely(sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_HEAD)) |
| 425 | dma_unmap_single(&pdev->dev, sw_desc->dma, |
| 426 | sw_desc->length, DMA_FROM_DEVICE); |
| 427 | else |
| 428 | dma_unmap_page(&pdev->dev, sw_desc->dma, |
| 429 | sw_desc->length, DMA_FROM_DEVICE); |
| 430 | } |
| 431 | |
| 432 | if (sw_desc->skb) { |
| 433 | dev_kfree_skb_any(sw_desc->skb); |
| 434 | sw_desc->skb = NULL; |
| 435 | } |
| 436 | |
| 437 | sw_desc->flags = 0; |
| 438 | memset(rx_desc, 0, sizeof(struct edma_rx_free_desc)); |
| 439 | } |
| 440 | |
| 441 | /* edma_rx_complete_stp_rstp() |
| 442 | * Complete Rx processing for STP RSTP packets |
| 443 | */ |
| 444 | static void edma_rx_complete_stp_rstp(struct sk_buff *skb, int port_id, struct edma_rx_return_desc *rd) |
| 445 | { |
| 446 | int i; |
| 447 | u32 priority; |
| 448 | u16 port_type; |
| 449 | u8 mac_addr[EDMA_ETH_HDR_LEN]; |
| 450 | |
| 451 | port_type = (rd->rrd1 >> EDMA_RRD_PORT_TYPE_SHIFT) |
| 452 | & EDMA_RRD_PORT_TYPE_MASK; |
| 453 | /* if port type is 0x4, then only proceed with |
| 454 | * other stp/rstp calculation |
| 455 | */ |
| 456 | if (port_type == EDMA_RX_ATH_HDR_RSTP_PORT_TYPE) { |
| 457 | u8 bpdu_mac[6] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00}; |
| 458 | |
| 459 | /* calculate the frame priority */ |
| 460 | priority = (rd->rrd1 >> EDMA_RRD_PRIORITY_SHIFT) |
| 461 | & EDMA_RRD_PRIORITY_MASK; |
| 462 | |
| 463 | for (i = 0; i < EDMA_ETH_HDR_LEN; i++) |
| 464 | mac_addr[i] = skb->data[i]; |
| 465 | |
| 466 | /* Check if destination mac addr is bpdu addr */ |
| 467 | if (!memcmp(mac_addr, bpdu_mac, 6)) { |
| 468 | /* destination mac address is BPDU |
| 469 | * destination mac address, then add |
| 470 | * atheros header to the packet. |
| 471 | */ |
| 472 | u16 athr_hdr = (EDMA_RX_ATH_HDR_VERSION << EDMA_RX_ATH_HDR_VERSION_SHIFT) | |
| 473 | (priority << EDMA_RX_ATH_HDR_PRIORITY_SHIFT) | |
| 474 | (EDMA_RX_ATH_HDR_RSTP_PORT_TYPE << EDMA_RX_ATH_PORT_TYPE_SHIFT) | port_id; |
| 475 | skb_push(skb, 4); |
| 476 | memcpy(skb->data, mac_addr, EDMA_ETH_HDR_LEN); |
| 477 | *(uint16_t *)&skb->data[12] = htons(edma_ath_eth_type); |
| 478 | *(uint16_t *)&skb->data[14] = htons(athr_hdr); |
| 479 | } |
| 480 | } |
| 481 | } |
| 482 | |
| 483 | /* edma_rx_complete_fraglist() |
| 484 | * Complete Rx processing for fraglist skbs |
| 485 | */ |
| 486 | static int edma_rx_complete_fraglist(struct sk_buff *skb, u16 num_rfds, u16 length, u32 sw_next_to_clean, |
| 487 | struct edma_rfd_desc_ring *erdr, struct edma_common_info *edma_cinfo) |
| 488 | { |
| 489 | struct platform_device *pdev = edma_cinfo->pdev; |
| 490 | struct edma_hw *hw = &edma_cinfo->hw; |
| 491 | struct sk_buff *skb_temp; |
| 492 | struct edma_sw_desc *sw_desc; |
| 493 | int i; |
| 494 | u16 size_remaining; |
| 495 | |
| 496 | skb->data_len = 0; |
| 497 | skb->tail += (hw->rx_head_buff_size - 16); |
| 498 | skb->len = skb->truesize = length; |
| 499 | size_remaining = length - (hw->rx_head_buff_size - 16); |
| 500 | |
| 501 | /* clean-up all related sw_descs */ |
| 502 | for (i = 1; i < num_rfds; i++) { |
| 503 | struct sk_buff *skb_prev; |
| 504 | |
| 505 | sw_desc = &erdr->sw_desc[sw_next_to_clean]; |
| 506 | skb_temp = sw_desc->skb; |
| 507 | |
| 508 | dma_unmap_single(&pdev->dev, sw_desc->dma, |
| 509 | sw_desc->length, DMA_FROM_DEVICE); |
| 510 | |
| 511 | if (size_remaining < hw->rx_head_buff_size) |
| 512 | skb_put(skb_temp, size_remaining); |
| 513 | else |
| 514 | skb_put(skb_temp, hw->rx_head_buff_size); |
| 515 | |
| 516 | /* If we are processing the first rfd, we link |
| 517 | * skb->frag_list to the skb corresponding to the |
| 518 | * first RFD |
| 519 | */ |
| 520 | if (i == 1) |
| 521 | skb_shinfo(skb)->frag_list = skb_temp; |
| 522 | else |
| 523 | skb_prev->next = skb_temp; |
| 524 | skb_prev = skb_temp; |
| 525 | skb_temp->next = NULL; |
| 526 | |
| 527 | skb->data_len += skb_temp->len; |
| 528 | size_remaining -= skb_temp->len; |
| 529 | |
| 530 | /* Increment SW index */ |
| 531 | sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1); |
| 532 | } |
| 533 | |
| 534 | return sw_next_to_clean; |
| 535 | } |
| 536 | |
| 537 | /* edma_rx_complete_paged() |
| 538 | * Complete Rx processing for paged skbs |
| 539 | */ |
| 540 | static int edma_rx_complete_paged(struct sk_buff *skb, u16 num_rfds, |
| 541 | u16 length, u32 sw_next_to_clean, |
| 542 | struct edma_rfd_desc_ring *erdr, |
| 543 | struct edma_common_info *edma_cinfo) |
| 544 | { |
| 545 | struct platform_device *pdev = edma_cinfo->pdev; |
| 546 | struct sk_buff *skb_temp; |
| 547 | struct edma_sw_desc *sw_desc; |
| 548 | int i; |
| 549 | u16 size_remaining; |
| 550 | |
| 551 | skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; |
| 552 | |
| 553 | /* Setup skbuff fields */ |
| 554 | skb->len = length; |
| 555 | |
| 556 | if (likely(num_rfds <= 1)) { |
| 557 | skb->data_len = length; |
| 558 | skb->truesize += edma_cinfo->rx_page_buffer_len; |
| 559 | skb_fill_page_desc(skb, 0, skb_frag_page(frag), |
| 560 | 16, length); |
| 561 | } else { |
| 562 | frag->size -= 16; |
| 563 | skb->data_len = frag->size; |
| 564 | skb->truesize += edma_cinfo->rx_page_buffer_len; |
| 565 | size_remaining = length - frag->size; |
| 566 | |
| 567 | skb_fill_page_desc(skb, 0, skb_frag_page(frag), |
| 568 | 16, frag->size); |
| 569 | |
| 570 | /* clean-up all related sw_descs */ |
| 571 | for (i = 1; i < num_rfds; i++) { |
| 572 | sw_desc = &erdr->sw_desc[sw_next_to_clean]; |
| 573 | skb_temp = sw_desc->skb; |
| 574 | frag = &skb_shinfo(skb_temp)->frags[0]; |
| 575 | dma_unmap_page(&pdev->dev, sw_desc->dma, |
| 576 | sw_desc->length, DMA_FROM_DEVICE); |
| 577 | |
| 578 | if (size_remaining < edma_cinfo->rx_page_buffer_len) |
| 579 | frag->size = size_remaining; |
| 580 | |
| 581 | skb_fill_page_desc(skb, i, skb_frag_page(frag), |
| 582 | 0, frag->size); |
| 583 | |
| 584 | /* We used frag pages from skb_temp in skb */ |
| 585 | skb_shinfo(skb_temp)->nr_frags = 0; |
| 586 | dev_kfree_skb_any(skb_temp); |
| 587 | |
| 588 | skb->data_len += frag->size; |
| 589 | skb->truesize += edma_cinfo->rx_page_buffer_len; |
| 590 | size_remaining -= frag->size; |
| 591 | |
| 592 | /* Increment SW index */ |
| 593 | sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1); |
| 594 | } |
| 595 | } |
| 596 | |
| 597 | return sw_next_to_clean; |
| 598 | } |
| 599 | |
| 600 | /* |
| 601 | * edma_rx_complete() |
| 602 | * Main api called from the poll function to process rx packets. |
| 603 | */ |
Rakesh Nair | 03b586c | 2017-04-03 18:28:58 +0530 | [diff] [blame] | 604 | static u16 edma_rx_complete(struct edma_common_info *edma_cinfo, |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 605 | int *work_done, int work_to_do, int queue_id, |
| 606 | struct napi_struct *napi) |
| 607 | { |
| 608 | struct platform_device *pdev = edma_cinfo->pdev; |
| 609 | struct edma_rfd_desc_ring *erdr = edma_cinfo->rfd_ring[queue_id]; |
| 610 | u16 hash_type, rrd[8], cleaned_count = 0, length = 0, num_rfds = 1, |
| 611 | sw_next_to_clean, hw_next_to_clean = 0, vlan = 0, ret_count = 0; |
| 612 | u32 data = 0; |
| 613 | u16 count = erdr->count, rfd_avail; |
| 614 | u8 queue_to_rxid[8] = {0, 0, 1, 1, 2, 2, 3, 3}; |
| 615 | |
Rakesh Nair | 03b586c | 2017-04-03 18:28:58 +0530 | [diff] [blame] | 616 | cleaned_count = erdr->pending_fill; |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 617 | sw_next_to_clean = erdr->sw_next_to_clean; |
| 618 | |
| 619 | edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), &data); |
| 620 | hw_next_to_clean = (data >> EDMA_RFD_CONS_IDX_SHIFT) & |
| 621 | EDMA_RFD_CONS_IDX_MASK; |
| 622 | |
| 623 | do { |
| 624 | while (sw_next_to_clean != hw_next_to_clean) { |
| 625 | struct net_device *netdev; |
| 626 | struct edma_adapter *adapter; |
| 627 | struct edma_sw_desc *sw_desc; |
| 628 | struct sk_buff *skb; |
| 629 | struct edma_rx_return_desc *rd; |
| 630 | u8 *vaddr; |
| 631 | int port_id, i, drop_count = 0; |
| 632 | u32 priority; |
| 633 | |
| 634 | if (!work_to_do) |
| 635 | break; |
| 636 | |
| 637 | sw_desc = &erdr->sw_desc[sw_next_to_clean]; |
| 638 | skb = sw_desc->skb; |
| 639 | |
| 640 | /* Get RRD */ |
| 641 | if (!edma_cinfo->page_mode) { |
| 642 | dma_unmap_single(&pdev->dev, sw_desc->dma, |
| 643 | sw_desc->length, DMA_FROM_DEVICE); |
| 644 | rd = (struct edma_rx_return_desc *)skb->data; |
| 645 | |
| 646 | } else { |
| 647 | dma_unmap_page(&pdev->dev, sw_desc->dma, |
| 648 | sw_desc->length, DMA_FROM_DEVICE); |
| 649 | vaddr = kmap_atomic(skb_frag_page(&skb_shinfo(skb)->frags[0])); |
| 650 | memcpy((uint8_t *)&rrd[0], vaddr, 16); |
| 651 | rd = (struct edma_rx_return_desc *)rrd; |
| 652 | kunmap_atomic(vaddr); |
| 653 | } |
| 654 | |
| 655 | /* Check if RRD is valid */ |
| 656 | if (!(rd->rrd7 & EDMA_RRD_DESC_VALID)) { |
| 657 | dev_err(&pdev->dev, "Incorrect RRD DESC valid bit set"); |
| 658 | edma_clean_rfd(pdev, erdr, sw_next_to_clean, 0); |
| 659 | sw_next_to_clean = (sw_next_to_clean + 1) & |
| 660 | (erdr->count - 1); |
| 661 | cleaned_count++; |
| 662 | continue; |
| 663 | } |
| 664 | |
| 665 | /* Get the number of RFDs from RRD */ |
| 666 | num_rfds = rd->rrd1 & EDMA_RRD_NUM_RFD_MASK; |
| 667 | |
| 668 | /* Get Rx port ID from switch */ |
| 669 | port_id = (rd->rrd1 >> EDMA_PORT_ID_SHIFT) & EDMA_PORT_ID_MASK; |
| 670 | if ((!port_id) || (port_id > EDMA_MAX_PORTID_SUPPORTED)) { |
| 671 | if (net_ratelimit()) { |
| 672 | dev_err(&pdev->dev, "Incorrect RRD source port bit set"); |
| 673 | dev_err(&pdev->dev, |
| 674 | "RRD Dump\n rrd0:%x rrd1: %x rrd2: %x rrd3: %x rrd4: %x rrd5: %x rrd6: %x rrd7: %x", |
| 675 | rd->rrd0, rd->rrd1, rd->rrd2, rd->rrd3, rd->rrd4, rd->rrd5, rd->rrd6, rd->rrd7); |
| 676 | dev_err(&pdev->dev, "Num_rfds: %d, src_port: %d, pkt_size: %d, cvlan_tag: %d\n", |
| 677 | num_rfds, rd->rrd1 & EDMA_RRD_SRC_PORT_NUM_MASK, |
| 678 | rd->rrd6 & EDMA_RRD_PKT_SIZE_MASK, rd->rrd7 & EDMA_RRD_CVLAN); |
| 679 | } |
| 680 | for (i = 0; i < num_rfds; i++) { |
| 681 | edma_clean_rfd(pdev, erdr, sw_next_to_clean, i); |
| 682 | sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1); |
| 683 | } |
| 684 | |
| 685 | cleaned_count += num_rfds; |
| 686 | continue; |
| 687 | } |
| 688 | |
| 689 | netdev = edma_cinfo->portid_netdev_lookup_tbl[port_id]; |
| 690 | if (!netdev) { |
| 691 | dev_err(&pdev->dev, "Invalid netdev"); |
| 692 | for (i = 0; i < num_rfds; i++) { |
| 693 | edma_clean_rfd(pdev, erdr, sw_next_to_clean, i); |
| 694 | sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1); |
| 695 | } |
| 696 | |
| 697 | cleaned_count += num_rfds; |
| 698 | continue; |
| 699 | } |
| 700 | adapter = netdev_priv(netdev); |
| 701 | |
| 702 | /* This code is added to handle a usecase where high |
| 703 | * priority stream and a low priority stream are |
| 704 | * received simultaneously on DUT. The problem occurs |
| 705 | * if one of the Rx rings is full and the corresponding |
| 706 | * core is busy with other stuff. This causes ESS CPU |
| 707 | * port to backpressure all incoming traffic including |
| 708 | * high priority one. We monitor free descriptor count |
| 709 | * on each CPU and whenever it reaches threshold (< 80), |
| 710 | * we drop all low priority traffic and let only high |
| 711 | * priotiy traffic pass through. We can hence avoid |
| 712 | * ESS CPU port to send backpressure on high priroity |
| 713 | * stream. |
| 714 | */ |
| 715 | priority = (rd->rrd1 >> EDMA_RRD_PRIORITY_SHIFT) |
| 716 | & EDMA_RRD_PRIORITY_MASK; |
Bhaskar Valaboju | e0efb01 | 2018-02-27 13:48:12 +0530 | [diff] [blame] | 717 | if (likely((priority <= edma_cinfo->rx_low_priority) && !edma_cinfo->page_mode && (num_rfds <= 1))) { |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 718 | rfd_avail = (count + sw_next_to_clean - hw_next_to_clean - 1) & (count - 1); |
| 719 | if (rfd_avail < EDMA_RFD_AVAIL_THR) { |
| 720 | sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_REUSE; |
| 721 | sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1); |
| 722 | adapter->stats.rx_dropped++; |
| 723 | cleaned_count++; |
| 724 | drop_count++; |
| 725 | if (drop_count == 3) { |
| 726 | work_to_do--; |
| 727 | (*work_done)++; |
| 728 | drop_count = 0; |
| 729 | } |
| 730 | if (cleaned_count == EDMA_RX_BUFFER_WRITE) { |
| 731 | /* If buffer clean count reaches 16, we replenish HW buffers. */ |
| 732 | ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id); |
| 733 | edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id), |
| 734 | sw_next_to_clean); |
| 735 | cleaned_count = ret_count; |
Rakesh Nair | 03b586c | 2017-04-03 18:28:58 +0530 | [diff] [blame] | 736 | erdr->pending_fill = ret_count; |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 737 | } |
| 738 | continue; |
| 739 | } |
| 740 | } |
| 741 | |
| 742 | work_to_do--; |
| 743 | (*work_done)++; |
| 744 | |
| 745 | /* Increment SW index */ |
| 746 | sw_next_to_clean = (sw_next_to_clean + 1) & |
| 747 | (erdr->count - 1); |
| 748 | |
| 749 | /* Get the packet size and allocate buffer */ |
| 750 | length = rd->rrd6 & EDMA_RRD_PKT_SIZE_MASK; |
| 751 | |
| 752 | if (edma_cinfo->page_mode) { |
| 753 | /* paged skb */ |
| 754 | sw_next_to_clean = edma_rx_complete_paged(skb, num_rfds, length, |
| 755 | sw_next_to_clean, |
| 756 | erdr, edma_cinfo); |
| 757 | if (!pskb_may_pull(skb, ETH_HLEN)) { |
| 758 | cleaned_count += num_rfds; |
| 759 | dev_kfree_skb_any(skb); |
| 760 | continue; |
| 761 | } |
| 762 | } else { |
| 763 | /* single or fraglist skb */ |
| 764 | |
| 765 | /* Addition of 16 bytes is required, as in the packet |
| 766 | * first 16 bytes are rrd descriptors, so actual data |
| 767 | * starts from an offset of 16. |
| 768 | */ |
| 769 | skb_reserve(skb, 16); |
| 770 | if (likely((num_rfds <= 1) || !edma_cinfo->fraglist_mode)) |
| 771 | skb_put(skb, length); |
| 772 | else |
| 773 | sw_next_to_clean = edma_rx_complete_fraglist(skb, num_rfds, length, |
| 774 | sw_next_to_clean, |
| 775 | erdr, edma_cinfo); |
| 776 | } |
| 777 | |
| 778 | cleaned_count += num_rfds; |
| 779 | |
| 780 | if (edma_stp_rstp) |
| 781 | edma_rx_complete_stp_rstp(skb, port_id, rd); |
| 782 | |
| 783 | skb->protocol = eth_type_trans(skb, netdev); |
| 784 | |
| 785 | /* Record Rx queue for RFS/RPS and fill flow hash from HW */ |
| 786 | skb_record_rx_queue(skb, queue_to_rxid[queue_id]); |
| 787 | if (netdev->features & NETIF_F_RXHASH) { |
| 788 | hash_type = (rd->rrd5 >> EDMA_HASH_TYPE_SHIFT); |
| 789 | if ((hash_type > EDMA_HASH_TYPE_START) && (hash_type < EDMA_HASH_TYPE_END)) |
| 790 | skb_set_hash(skb, rd->rrd2, PKT_HASH_TYPE_L4); |
| 791 | } |
| 792 | |
| 793 | #ifdef CONFIG_NF_FLOW_COOKIE |
| 794 | skb->flow_cookie = rd->rrd3 & EDMA_RRD_FLOW_COOKIE_MASK; |
| 795 | #endif |
| 796 | edma_receive_checksum(rd, skb); |
| 797 | |
| 798 | /* Process VLAN HW acceleration indication provided by HW */ |
| 799 | if (adapter->default_vlan_tag != rd->rrd4) { |
| 800 | vlan = rd->rrd4; |
| 801 | if (likely(rd->rrd7 & EDMA_RRD_CVLAN)) |
| 802 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan); |
| 803 | else if (rd->rrd1 & EDMA_RRD_SVLAN) |
| 804 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan); |
| 805 | } |
| 806 | |
| 807 | /* Update rx statistics */ |
| 808 | adapter->stats.rx_packets++; |
| 809 | adapter->stats.rx_bytes += length; |
| 810 | |
| 811 | /* Check if we reached refill threshold */ |
| 812 | if (cleaned_count == EDMA_RX_BUFFER_WRITE) { |
| 813 | ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id); |
| 814 | edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id), |
| 815 | sw_next_to_clean); |
| 816 | cleaned_count = ret_count; |
Rakesh Nair | 03b586c | 2017-04-03 18:28:58 +0530 | [diff] [blame] | 817 | erdr->pending_fill = ret_count; |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 818 | } |
| 819 | |
Rakesh Nair | 888af95 | 2017-06-30 18:41:58 +0530 | [diff] [blame] | 820 | /* |
| 821 | * We increment per-precedence counters for the rx packets |
| 822 | */ |
| 823 | if (edma_per_prec_stats_enable) { |
Manish Verma | 924d3ed | 2020-01-07 12:01:36 +0530 | [diff] [blame^] | 824 | atomic64_inc(&edma_cinfo->edma_ethstats.rx_prec[priority]); |
Rakesh Nair | 888af95 | 2017-06-30 18:41:58 +0530 | [diff] [blame] | 825 | edma_cinfo->edma_ethstats.rx_ac[edma_dscp2ac_tbl[priority]]++; |
Rakesh Nair | 1c6a18c | 2017-08-02 21:27:06 +0530 | [diff] [blame] | 826 | |
| 827 | if (edma_iad_stats_enable) { |
| 828 | if (edma_dscp2ac_tbl[priority] == EDMA_AC_VI) |
| 829 | edma_iad_process_flow(edma_cinfo, skb, EDMA_INGRESS_DIR, priority); |
| 830 | } |
Rakesh Nair | 888af95 | 2017-06-30 18:41:58 +0530 | [diff] [blame] | 831 | } |
| 832 | |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 833 | /* At this point skb should go to stack */ |
| 834 | napi_gro_receive(napi, skb); |
| 835 | } |
| 836 | |
| 837 | /* Check if we still have NAPI budget */ |
| 838 | if (!work_to_do) |
| 839 | break; |
| 840 | |
| 841 | /* Read index once again since we still have NAPI budget */ |
| 842 | edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), &data); |
| 843 | hw_next_to_clean = (data >> EDMA_RFD_CONS_IDX_SHIFT) & |
| 844 | EDMA_RFD_CONS_IDX_MASK; |
| 845 | } while (hw_next_to_clean != sw_next_to_clean); |
| 846 | |
| 847 | erdr->sw_next_to_clean = sw_next_to_clean; |
| 848 | |
| 849 | /* Refill here in case refill threshold wasn't reached */ |
| 850 | if (likely(cleaned_count)) { |
| 851 | ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id); |
Rakesh Nair | 03b586c | 2017-04-03 18:28:58 +0530 | [diff] [blame] | 852 | erdr->pending_fill = ret_count; |
| 853 | if (ret_count) { |
| 854 | if(net_ratelimit()) |
| 855 | dev_dbg(&pdev->dev, "Edma not getting memory for descriptors.\n"); |
| 856 | } |
| 857 | |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 858 | edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id), |
| 859 | erdr->sw_next_to_clean); |
| 860 | } |
Rakesh Nair | 03b586c | 2017-04-03 18:28:58 +0530 | [diff] [blame] | 861 | |
| 862 | return erdr->pending_fill; |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 863 | } |
| 864 | |
| 865 | /* edma_delete_rfs_filter() |
| 866 | * Remove RFS filter from switch |
| 867 | */ |
| 868 | static int edma_delete_rfs_filter(struct edma_adapter *adapter, |
| 869 | struct edma_rfs_filter_node *filter_node) |
| 870 | { |
| 871 | int res = -1; |
| 872 | |
| 873 | if (likely(adapter->set_rfs_rule)) |
| 874 | res = (*adapter->set_rfs_rule)(adapter->netdev, |
| 875 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21)) |
| 876 | filter_node->keys.src, |
| 877 | filter_node->keys.dst, filter_node->keys.port16[0], |
| 878 | filter_node->keys.port16[1], |
| 879 | filter_node->keys.ip_proto, |
| 880 | #else |
| 881 | filter_node->keys.addrs.v4addrs.src, |
| 882 | filter_node->keys.addrs.v4addrs.dst, filter_node->keys.ports.src, |
| 883 | filter_node->keys.ports.dst, |
| 884 | filter_node->keys.basic.ip_proto, |
| 885 | #endif |
| 886 | filter_node->rq_id, |
| 887 | 0); |
| 888 | |
| 889 | return res; |
| 890 | } |
| 891 | |
| 892 | /* edma_add_rfs_filter() |
| 893 | * Add RFS filter to switch |
| 894 | */ |
| 895 | static int edma_add_rfs_filter(struct edma_adapter *adapter, |
| 896 | struct flow_keys *keys, u16 rq, |
| 897 | struct edma_rfs_filter_node *filter_node) |
| 898 | { |
| 899 | int res = -1; |
| 900 | |
| 901 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21)) |
| 902 | filter_node->keys.src = keys->src; |
| 903 | filter_node->keys.dst = keys->dst; |
| 904 | filter_node->keys.ports = keys->ports; |
| 905 | filter_node->keys.ip_proto = keys->ip_proto; |
| 906 | #else |
| 907 | filter_node->keys.addrs.v4addrs.src = keys->addrs.v4addrs.src; |
| 908 | filter_node->keys.addrs.v4addrs.dst = keys->addrs.v4addrs.dst; |
| 909 | filter_node->keys.ports.ports = keys->ports.ports; |
| 910 | filter_node->keys.basic.ip_proto = keys->basic.ip_proto; |
| 911 | #endif |
| 912 | |
| 913 | /* Call callback registered by ESS driver */ |
| 914 | if (likely(adapter->set_rfs_rule)) |
| 915 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21)) |
| 916 | res = (*adapter->set_rfs_rule)(adapter->netdev, keys->src, |
| 917 | keys->dst, keys->port16[0], keys->port16[1], |
| 918 | keys->ip_proto, rq, 1); |
| 919 | #else |
| 920 | res = (*adapter->set_rfs_rule)(adapter->netdev, keys->addrs.v4addrs.src, |
| 921 | keys->addrs.v4addrs.dst, keys->ports.src, keys->ports.dst, |
| 922 | keys->basic.ip_proto, rq, 1); |
| 923 | #endif |
| 924 | |
| 925 | return res; |
| 926 | } |
| 927 | |
| 928 | /* edma_rfs_key_search() |
| 929 | * Look for existing RFS entry |
| 930 | */ |
| 931 | static struct edma_rfs_filter_node *edma_rfs_key_search(struct hlist_head *h, |
| 932 | struct flow_keys *key) |
| 933 | { |
| 934 | struct edma_rfs_filter_node *p; |
| 935 | |
| 936 | hlist_for_each_entry(p, h, node) |
| 937 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21)) |
| 938 | if (p->keys.src == key->src && |
| 939 | p->keys.dst == key->dst && |
| 940 | p->keys.ports == key->ports && |
| 941 | p->keys.ip_proto == key->ip_proto) |
| 942 | #else |
| 943 | if (p->keys.addrs.v4addrs.src == key->addrs.v4addrs.src && |
| 944 | p->keys.addrs.v4addrs.dst == key->addrs.v4addrs.dst && |
| 945 | p->keys.ports.ports == key->ports.ports && |
| 946 | p->keys.basic.ip_proto == key->basic.ip_proto) |
| 947 | #endif |
| 948 | return p; |
| 949 | return NULL; |
| 950 | } |
| 951 | |
| 952 | /* edma_initialise_rfs_flow_table() |
| 953 | * Initialise EDMA RFS flow table |
| 954 | */ |
| 955 | static void edma_initialise_rfs_flow_table(struct edma_adapter *adapter) |
| 956 | { |
| 957 | int i; |
| 958 | |
| 959 | spin_lock_init(&adapter->rfs.rfs_ftab_lock); |
| 960 | |
| 961 | /* Initialize EDMA flow hash table */ |
| 962 | for (i = 0; i < EDMA_RFS_FLOW_ENTRIES; i++) |
| 963 | INIT_HLIST_HEAD(&adapter->rfs.hlist_head[i]); |
| 964 | |
| 965 | adapter->rfs.max_num_filter = EDMA_RFS_FLOW_ENTRIES; |
| 966 | adapter->rfs.filter_available = adapter->rfs.max_num_filter; |
| 967 | adapter->rfs.hashtoclean = 0; |
| 968 | |
| 969 | /* Add timer to get periodic RFS updates from OS */ |
| 970 | init_timer(&adapter->rfs.expire_rfs); |
| 971 | adapter->rfs.expire_rfs.function = edma_flow_may_expire; |
| 972 | adapter->rfs.expire_rfs.data = (unsigned long)adapter; |
| 973 | mod_timer(&adapter->rfs.expire_rfs, jiffies + HZ/4); |
| 974 | } |
| 975 | |
| 976 | /* edma_free_rfs_flow_table() |
| 977 | * Free EDMA RFS flow table |
| 978 | */ |
| 979 | static void edma_free_rfs_flow_table(struct edma_adapter *adapter) |
| 980 | { |
| 981 | int i; |
| 982 | |
| 983 | /* Remove sync timer */ |
| 984 | del_timer_sync(&adapter->rfs.expire_rfs); |
| 985 | spin_lock_bh(&adapter->rfs.rfs_ftab_lock); |
| 986 | |
| 987 | /* Free EDMA RFS table entries */ |
| 988 | adapter->rfs.filter_available = 0; |
| 989 | |
| 990 | /* Clean-up EDMA flow hash table */ |
| 991 | for (i = 0; i < EDMA_RFS_FLOW_ENTRIES; i++) { |
| 992 | struct hlist_head *hhead; |
| 993 | struct hlist_node *tmp; |
| 994 | struct edma_rfs_filter_node *filter_node; |
| 995 | int res; |
| 996 | |
| 997 | hhead = &adapter->rfs.hlist_head[i]; |
| 998 | hlist_for_each_entry_safe(filter_node, tmp, hhead, node) { |
| 999 | res = edma_delete_rfs_filter(adapter, filter_node); |
| 1000 | if (res < 0) |
| 1001 | dev_warn(&adapter->netdev->dev, |
| 1002 | "EDMA going down but RFS entry %d not allowed to be flushed by Switch", |
| 1003 | filter_node->flow_id); |
| 1004 | hlist_del(&filter_node->node); |
| 1005 | kfree(filter_node); |
| 1006 | } |
| 1007 | } |
| 1008 | spin_unlock_bh(&adapter->rfs.rfs_ftab_lock); |
| 1009 | } |
| 1010 | |
| 1011 | /* edma_tx_unmap_and_free() |
| 1012 | * clean TX buffer |
| 1013 | */ |
| 1014 | static inline void edma_tx_unmap_and_free(struct platform_device *pdev, |
| 1015 | struct edma_sw_desc *sw_desc) |
| 1016 | { |
| 1017 | struct sk_buff *skb = sw_desc->skb; |
| 1018 | |
| 1019 | if (likely((sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_HEAD) || |
| 1020 | (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_FRAGLIST))) |
| 1021 | /* unmap_single for skb head area */ |
| 1022 | dma_unmap_single(&pdev->dev, sw_desc->dma, |
| 1023 | sw_desc->length, DMA_TO_DEVICE); |
| 1024 | else if (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_FRAG) |
| 1025 | /* unmap page for paged fragments */ |
| 1026 | dma_unmap_page(&pdev->dev, sw_desc->dma, |
| 1027 | sw_desc->length, DMA_TO_DEVICE); |
| 1028 | |
| 1029 | if (likely(sw_desc->flags & EDMA_SW_DESC_FLAG_LAST)) |
| 1030 | dev_kfree_skb_any(skb); |
| 1031 | |
| 1032 | sw_desc->flags = 0; |
| 1033 | } |
| 1034 | |
| 1035 | /* edma_tx_complete() |
| 1036 | * Used to clean tx queues and update hardware and consumer index |
| 1037 | */ |
| 1038 | static void edma_tx_complete(struct edma_common_info *edma_cinfo, int queue_id) |
| 1039 | { |
| 1040 | struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id]; |
| 1041 | struct edma_sw_desc *sw_desc; |
| 1042 | struct platform_device *pdev = edma_cinfo->pdev; |
| 1043 | int i; |
| 1044 | |
| 1045 | u16 sw_next_to_clean = etdr->sw_next_to_clean; |
| 1046 | u16 hw_next_to_clean; |
| 1047 | u32 data = 0; |
| 1048 | |
| 1049 | edma_read_reg(EDMA_REG_TPD_IDX_Q(queue_id), &data); |
| 1050 | hw_next_to_clean = (data >> EDMA_TPD_CONS_IDX_SHIFT) & EDMA_TPD_CONS_IDX_MASK; |
| 1051 | |
| 1052 | /* clean the buffer here */ |
| 1053 | while (sw_next_to_clean != hw_next_to_clean) { |
| 1054 | sw_desc = &etdr->sw_desc[sw_next_to_clean]; |
| 1055 | edma_tx_unmap_and_free(pdev, sw_desc); |
| 1056 | sw_next_to_clean = (sw_next_to_clean + 1) & (etdr->count - 1); |
| 1057 | } |
| 1058 | |
| 1059 | etdr->sw_next_to_clean = sw_next_to_clean; |
| 1060 | |
| 1061 | /* update the TPD consumer index register */ |
| 1062 | edma_write_reg(EDMA_REG_TX_SW_CONS_IDX_Q(queue_id), sw_next_to_clean); |
| 1063 | |
| 1064 | /* Wake the queue if queue is stopped and netdev link is up */ |
| 1065 | for (i = 0; i < EDMA_MAX_NETDEV_PER_QUEUE && etdr->nq[i] ; i++) { |
| 1066 | if (netif_tx_queue_stopped(etdr->nq[i])) { |
| 1067 | if ((etdr->netdev[i]) && netif_carrier_ok(etdr->netdev[i])) |
| 1068 | netif_tx_wake_queue(etdr->nq[i]); |
| 1069 | } |
| 1070 | } |
| 1071 | } |
| 1072 | |
| 1073 | /* edma_get_tx_buffer() |
| 1074 | * Get sw_desc corresponding to the TPD |
| 1075 | */ |
| 1076 | static struct edma_sw_desc *edma_get_tx_buffer(struct edma_common_info *edma_cinfo, |
| 1077 | struct edma_tx_desc *tpd, int queue_id) |
| 1078 | { |
| 1079 | struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id]; |
| 1080 | |
| 1081 | return &etdr->sw_desc[tpd - (struct edma_tx_desc *)etdr->hw_desc]; |
| 1082 | } |
| 1083 | |
| 1084 | /* edma_get_next_tpd() |
| 1085 | * Return a TPD descriptor for transfer |
| 1086 | */ |
| 1087 | static struct edma_tx_desc *edma_get_next_tpd(struct edma_common_info *edma_cinfo, |
| 1088 | int queue_id) |
| 1089 | { |
| 1090 | struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id]; |
| 1091 | u16 sw_next_to_fill = etdr->sw_next_to_fill; |
| 1092 | struct edma_tx_desc *tpd_desc = |
| 1093 | (&((struct edma_tx_desc *)(etdr->hw_desc))[sw_next_to_fill]); |
| 1094 | |
| 1095 | etdr->sw_next_to_fill = (etdr->sw_next_to_fill + 1) & (etdr->count - 1); |
| 1096 | |
| 1097 | return tpd_desc; |
| 1098 | } |
| 1099 | |
| 1100 | /* edma_tpd_available() |
| 1101 | * Check number of free TPDs |
| 1102 | */ |
| 1103 | static inline u16 edma_tpd_available(struct edma_common_info *edma_cinfo, |
| 1104 | int queue_id) |
| 1105 | { |
| 1106 | struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id]; |
| 1107 | |
| 1108 | u16 sw_next_to_fill; |
| 1109 | u16 sw_next_to_clean; |
| 1110 | u16 count = 0; |
| 1111 | |
| 1112 | sw_next_to_clean = etdr->sw_next_to_clean; |
| 1113 | sw_next_to_fill = etdr->sw_next_to_fill; |
| 1114 | |
| 1115 | if (likely(sw_next_to_clean <= sw_next_to_fill)) |
| 1116 | count = etdr->count; |
| 1117 | |
| 1118 | return count + sw_next_to_clean - sw_next_to_fill - 1; |
| 1119 | } |
| 1120 | |
| 1121 | /* edma_tx_queue_get() |
| 1122 | * Get the starting number of the queue |
| 1123 | */ |
| 1124 | static inline int edma_tx_queue_get(struct edma_adapter *adapter, |
| 1125 | struct sk_buff *skb, int txq_id) |
| 1126 | { |
| 1127 | /* skb->priority is used as an index to skb priority table |
| 1128 | * and based on packet priority, correspong queue is assigned. |
| 1129 | */ |
| 1130 | return adapter->tx_start_offset[txq_id] + edma_skb_priority_offset(skb); |
| 1131 | } |
| 1132 | |
| 1133 | /* edma_tx_update_hw_idx() |
| 1134 | * update the producer index for the ring transmitted |
| 1135 | */ |
| 1136 | static void edma_tx_update_hw_idx(struct edma_common_info *edma_cinfo, |
| 1137 | struct sk_buff *skb, int queue_id) |
| 1138 | { |
| 1139 | struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id]; |
| 1140 | u32 tpd_idx_data; |
| 1141 | |
| 1142 | /* Read and update the producer index */ |
| 1143 | edma_read_reg(EDMA_REG_TPD_IDX_Q(queue_id), &tpd_idx_data); |
| 1144 | tpd_idx_data &= ~EDMA_TPD_PROD_IDX_BITS; |
| 1145 | tpd_idx_data |= (etdr->sw_next_to_fill & EDMA_TPD_PROD_IDX_MASK) |
| 1146 | << EDMA_TPD_PROD_IDX_SHIFT; |
| 1147 | |
| 1148 | edma_write_reg(EDMA_REG_TPD_IDX_Q(queue_id), tpd_idx_data); |
| 1149 | } |
| 1150 | |
| 1151 | /* edma_rollback_tx() |
| 1152 | * Function to retrieve tx resources in case of error |
| 1153 | */ |
| 1154 | static void edma_rollback_tx(struct edma_adapter *adapter, |
| 1155 | struct edma_tx_desc *start_tpd, int queue_id) |
| 1156 | { |
| 1157 | struct edma_tx_desc_ring *etdr = adapter->edma_cinfo->tpd_ring[queue_id]; |
| 1158 | struct edma_sw_desc *sw_desc; |
| 1159 | struct edma_tx_desc *tpd = NULL; |
| 1160 | u16 start_index, index; |
| 1161 | |
| 1162 | start_index = start_tpd - (struct edma_tx_desc *)(etdr->hw_desc); |
| 1163 | |
| 1164 | index = start_index; |
| 1165 | while (index != etdr->sw_next_to_fill) { |
| 1166 | tpd = (&((struct edma_tx_desc *)(etdr->hw_desc))[index]); |
| 1167 | sw_desc = &etdr->sw_desc[index]; |
| 1168 | edma_tx_unmap_and_free(adapter->pdev, sw_desc); |
| 1169 | memset(tpd, 0, sizeof(struct edma_tx_desc)); |
| 1170 | if (++index == etdr->count) |
| 1171 | index = 0; |
| 1172 | } |
| 1173 | etdr->sw_next_to_fill = start_index; |
| 1174 | } |
| 1175 | |
Rakesh Nair | 7e05353 | 2017-08-18 17:53:25 +0530 | [diff] [blame] | 1176 | /* edma_get_v4_precedence() |
| 1177 | * Function to retrieve precedence for IPv4 |
| 1178 | */ |
| 1179 | static inline int edma_get_v4_precedence(struct sk_buff *skb, int nh_offset, u8 *precedence) |
| 1180 | { |
| 1181 | const struct iphdr *iph; |
| 1182 | struct iphdr iph_hdr; |
| 1183 | |
| 1184 | iph = skb_header_pointer(skb, nh_offset, sizeof(iph_hdr), &iph_hdr); |
| 1185 | |
| 1186 | if (!iph || iph->ihl < 5) |
| 1187 | return -1; |
| 1188 | |
| 1189 | *precedence = iph->tos >> EDMA_DSCP_PREC_SHIFT; |
| 1190 | |
| 1191 | return 0; |
| 1192 | } |
| 1193 | |
| 1194 | /* edma_get_v6_precedence() |
| 1195 | * Function to retrieve precedence for IPv6 |
| 1196 | */ |
| 1197 | static inline int edma_get_v6_precedence(struct sk_buff *skb, int nh_offset, u8 *precedence) |
| 1198 | { |
| 1199 | const struct ipv6hdr *iph; |
| 1200 | struct ipv6hdr iph_hdr; |
| 1201 | |
| 1202 | iph = skb_header_pointer(skb, nh_offset, sizeof(iph_hdr), &iph_hdr); |
| 1203 | |
| 1204 | if (!iph) |
| 1205 | return -1; |
| 1206 | |
| 1207 | *precedence = iph->priority >> EDMA_DSCP6_PREC_SHIFT; |
| 1208 | |
| 1209 | return 0; |
| 1210 | } |
| 1211 | |
| 1212 | /* edma_get_skb_precedence() |
| 1213 | * Function to retrieve precedence from skb |
| 1214 | */ |
| 1215 | static int edma_get_skb_precedence(struct sk_buff *skb, u8 *precedence) |
| 1216 | { |
| 1217 | int nhoff = skb_network_offset(skb); |
| 1218 | __be16 proto = skb->protocol; |
| 1219 | int ret; |
| 1220 | struct pppoeh_proto *pppoeh, ppp_hdr; |
| 1221 | |
| 1222 | switch(proto) { |
| 1223 | case __constant_htons(ETH_P_IP): { |
| 1224 | ret = edma_get_v4_precedence(skb, nhoff, precedence); |
| 1225 | if (ret) |
| 1226 | return -1; |
| 1227 | break; |
| 1228 | } |
| 1229 | case __constant_htons(ETH_P_IPV6): { |
| 1230 | ret = edma_get_v6_precedence(skb, nhoff, precedence); |
| 1231 | if (ret) |
| 1232 | return -1; |
| 1233 | break; |
| 1234 | } |
| 1235 | case __constant_htons(ETH_P_PPP_SES): { |
| 1236 | pppoeh = skb_header_pointer(skb, nhoff, sizeof(ppp_hdr), &ppp_hdr); |
| 1237 | if (!pppoeh) |
| 1238 | return -1; |
| 1239 | |
| 1240 | proto = pppoeh->proto; |
| 1241 | nhoff += PPPOE_SES_HLEN; |
| 1242 | switch (proto) { |
| 1243 | case __constant_htons(PPP_IP): { |
| 1244 | ret = edma_get_v4_precedence(skb, nhoff, precedence); |
| 1245 | if (ret) |
| 1246 | return -1; |
| 1247 | break; |
| 1248 | } |
| 1249 | case __constant_htons(PPP_IPV6): { |
| 1250 | ret = edma_get_v6_precedence(skb, nhoff, precedence); |
| 1251 | if (ret) |
| 1252 | return -1; |
| 1253 | break; |
| 1254 | } |
| 1255 | default: |
| 1256 | return -1; |
| 1257 | } |
| 1258 | break; |
| 1259 | } |
| 1260 | default: |
| 1261 | return -1; |
| 1262 | } |
| 1263 | |
| 1264 | return 0; |
| 1265 | } |
| 1266 | |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 1267 | /* edma_tx_map_and_fill() |
| 1268 | * gets called from edma_xmit_frame |
| 1269 | * |
| 1270 | * This is where the dma of the buffer to be transmitted |
| 1271 | * gets mapped |
| 1272 | */ |
| 1273 | static int edma_tx_map_and_fill(struct edma_common_info *edma_cinfo, |
| 1274 | struct edma_adapter *adapter, |
| 1275 | struct sk_buff *skb, int queue_id, |
| 1276 | unsigned int flags_transmit, |
| 1277 | u16 from_cpu, u16 dp_bitmap, |
| 1278 | bool packet_is_rstp, int nr_frags) |
| 1279 | { |
| 1280 | struct edma_sw_desc *sw_desc = NULL; |
| 1281 | struct platform_device *pdev = edma_cinfo->pdev; |
| 1282 | struct edma_tx_desc *tpd = NULL; |
| 1283 | struct edma_tx_desc *start_tpd = NULL; |
| 1284 | struct sk_buff *iter_skb; |
| 1285 | int i; |
| 1286 | u32 word1 = 0, word3 = 0, lso_word1 = 0, svlan_tag = 0; |
| 1287 | u16 buf_len, lso_desc_len = 0; |
| 1288 | |
| 1289 | if (skb_is_gso(skb)) { |
| 1290 | /* TODO: What additional checks need to be performed here */ |
| 1291 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { |
| 1292 | lso_word1 |= EDMA_TPD_IPV4_EN; |
| 1293 | ip_hdr(skb)->check = 0; |
| 1294 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, |
| 1295 | ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); |
| 1296 | } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { |
| 1297 | lso_word1 |= EDMA_TPD_LSO_V2_EN; |
| 1298 | ipv6_hdr(skb)->payload_len = 0; |
| 1299 | tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
| 1300 | &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); |
| 1301 | } else |
| 1302 | return -EINVAL; |
| 1303 | |
| 1304 | lso_word1 |= EDMA_TPD_LSO_EN | ((skb_shinfo(skb)->gso_size & EDMA_TPD_MSS_MASK) << EDMA_TPD_MSS_SHIFT) | |
| 1305 | (skb_transport_offset(skb) << EDMA_TPD_HDR_SHIFT); |
| 1306 | } else if (flags_transmit & EDMA_HW_CHECKSUM) { |
| 1307 | u8 css, cso; |
| 1308 | cso = skb_checksum_start_offset(skb); |
| 1309 | css = cso + skb->csum_offset; |
| 1310 | |
| 1311 | word1 |= (EDMA_TPD_CUSTOM_CSUM_EN); |
| 1312 | word1 |= (cso >> 1) << EDMA_TPD_HDR_SHIFT; |
| 1313 | word1 |= ((css >> 1) << EDMA_TPD_CUSTOM_CSUM_SHIFT); |
| 1314 | } |
| 1315 | |
| 1316 | if (skb->protocol == htons(ETH_P_PPP_SES)) |
| 1317 | word1 |= EDMA_TPD_PPPOE_EN; |
| 1318 | |
| 1319 | if (flags_transmit & EDMA_VLAN_TX_TAG_INSERT_FLAG) { |
| 1320 | switch (skb->vlan_proto) { |
| 1321 | case htons(ETH_P_8021Q): |
| 1322 | word3 |= (1 << EDMA_TX_INS_CVLAN); |
| 1323 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21)) |
| 1324 | word3 |= vlan_tx_tag_get(skb) << EDMA_TX_CVLAN_TAG_SHIFT; |
| 1325 | #else |
| 1326 | word3 |= skb_vlan_tag_get(skb) << EDMA_TX_CVLAN_TAG_SHIFT; |
| 1327 | #endif |
| 1328 | break; |
| 1329 | case htons(ETH_P_8021AD): |
| 1330 | word1 |= (1 << EDMA_TX_INS_SVLAN); |
| 1331 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21)) |
| 1332 | svlan_tag = vlan_tx_tag_get(skb) << EDMA_TX_SVLAN_TAG_SHIFT; |
| 1333 | #else |
| 1334 | svlan_tag = skb_vlan_tag_get(skb) << EDMA_TX_SVLAN_TAG_SHIFT; |
| 1335 | #endif |
| 1336 | break; |
| 1337 | default: |
| 1338 | dev_err(&pdev->dev, "no ctag or stag present\n"); |
| 1339 | goto vlan_tag_error; |
| 1340 | } |
| 1341 | } else if (flags_transmit & EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG) { |
| 1342 | word3 |= (1 << EDMA_TX_INS_CVLAN); |
| 1343 | word3 |= (adapter->default_vlan_tag) << EDMA_TX_CVLAN_TAG_SHIFT; |
| 1344 | } |
| 1345 | |
| 1346 | if (packet_is_rstp) { |
| 1347 | word3 |= dp_bitmap << EDMA_TPD_PORT_BITMAP_SHIFT; |
| 1348 | word3 |= from_cpu << EDMA_TPD_FROM_CPU_SHIFT; |
| 1349 | } else { |
| 1350 | word3 |= adapter->dp_bitmap << EDMA_TPD_PORT_BITMAP_SHIFT; |
| 1351 | } |
| 1352 | |
| 1353 | buf_len = skb_headlen(skb); |
| 1354 | |
| 1355 | if (lso_word1) { |
| 1356 | if (lso_word1 & EDMA_TPD_LSO_V2_EN) { |
| 1357 | |
| 1358 | /* IPv6 LSOv2 descriptor */ |
| 1359 | start_tpd = tpd = edma_get_next_tpd(edma_cinfo, queue_id); |
| 1360 | sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id); |
| 1361 | sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_NONE; |
| 1362 | |
| 1363 | /* LSOv2 descriptor overrides addr field to pass length */ |
| 1364 | tpd->addr = cpu_to_le16(skb->len); |
| 1365 | tpd->svlan_tag = svlan_tag; |
| 1366 | tpd->word1 = word1 | lso_word1; |
| 1367 | tpd->word3 = word3; |
| 1368 | } |
| 1369 | |
| 1370 | tpd = edma_get_next_tpd(edma_cinfo, queue_id); |
| 1371 | if (!start_tpd) |
| 1372 | start_tpd = tpd; |
| 1373 | sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id); |
| 1374 | |
| 1375 | /* The last buffer info contain the skb address, |
| 1376 | * so skb will be freed after unmap |
| 1377 | */ |
| 1378 | sw_desc->length = lso_desc_len; |
| 1379 | sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_HEAD; |
| 1380 | |
| 1381 | sw_desc->dma = dma_map_single(&adapter->pdev->dev, |
| 1382 | skb->data, buf_len, DMA_TO_DEVICE); |
| 1383 | if (dma_mapping_error(&pdev->dev, sw_desc->dma)) |
| 1384 | goto dma_error; |
| 1385 | |
| 1386 | tpd->addr = cpu_to_le32(sw_desc->dma); |
| 1387 | tpd->len = cpu_to_le16(buf_len); |
| 1388 | |
| 1389 | tpd->svlan_tag = svlan_tag; |
| 1390 | tpd->word1 = word1 | lso_word1; |
| 1391 | tpd->word3 = word3; |
| 1392 | |
| 1393 | /* The last buffer info contain the skb address, |
| 1394 | * so it will be freed after unmap |
| 1395 | */ |
| 1396 | sw_desc->length = lso_desc_len; |
| 1397 | sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_HEAD; |
| 1398 | |
| 1399 | buf_len = 0; |
| 1400 | } |
| 1401 | |
| 1402 | if (likely(buf_len)) { |
| 1403 | |
| 1404 | /* TODO Do not dequeue descriptor if there is a potential error */ |
| 1405 | tpd = edma_get_next_tpd(edma_cinfo, queue_id); |
| 1406 | |
| 1407 | if (!start_tpd) |
| 1408 | start_tpd = tpd; |
| 1409 | |
| 1410 | sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id); |
| 1411 | |
| 1412 | /* The last buffer info contain the skb address, |
| 1413 | * so it will be free after unmap |
| 1414 | */ |
| 1415 | sw_desc->length = buf_len; |
| 1416 | sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_HEAD; |
| 1417 | sw_desc->dma = dma_map_single(&adapter->pdev->dev, |
| 1418 | skb->data, buf_len, DMA_TO_DEVICE); |
| 1419 | if (dma_mapping_error(&pdev->dev, sw_desc->dma)) |
| 1420 | goto dma_error; |
| 1421 | |
| 1422 | tpd->addr = cpu_to_le32(sw_desc->dma); |
| 1423 | tpd->len = cpu_to_le16(buf_len); |
| 1424 | |
| 1425 | tpd->svlan_tag = svlan_tag; |
| 1426 | tpd->word1 = word1 | lso_word1; |
| 1427 | tpd->word3 = word3; |
| 1428 | } |
| 1429 | |
| 1430 | i = 0; |
| 1431 | |
| 1432 | /* Walk through paged frags for head skb */ |
| 1433 | while (nr_frags--) { |
| 1434 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
| 1435 | buf_len = skb_frag_size(frag); |
| 1436 | tpd = edma_get_next_tpd(edma_cinfo, queue_id); |
| 1437 | sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id); |
| 1438 | sw_desc->length = buf_len; |
| 1439 | sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_FRAG; |
| 1440 | |
| 1441 | sw_desc->dma = skb_frag_dma_map(&pdev->dev, frag, 0, buf_len, DMA_TO_DEVICE); |
| 1442 | |
| 1443 | if (dma_mapping_error(NULL, sw_desc->dma)) |
| 1444 | goto dma_error; |
| 1445 | |
| 1446 | tpd->addr = cpu_to_le32(sw_desc->dma); |
| 1447 | tpd->len = cpu_to_le16(buf_len); |
| 1448 | |
| 1449 | tpd->svlan_tag = svlan_tag; |
| 1450 | tpd->word1 = word1 | lso_word1; |
| 1451 | tpd->word3 = word3; |
| 1452 | i++; |
| 1453 | } |
| 1454 | |
| 1455 | /* Walk through all fraglist skbs */ |
| 1456 | skb_walk_frags(skb, iter_skb) { |
| 1457 | buf_len = iter_skb->len; |
| 1458 | tpd = edma_get_next_tpd(edma_cinfo, queue_id); |
| 1459 | sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id); |
| 1460 | sw_desc->length = buf_len; |
| 1461 | sw_desc->dma = dma_map_single(&adapter->pdev->dev, |
| 1462 | iter_skb->data, buf_len, DMA_TO_DEVICE); |
| 1463 | |
| 1464 | if (dma_mapping_error(NULL, sw_desc->dma)) |
| 1465 | goto dma_error; |
| 1466 | |
| 1467 | tpd->addr = cpu_to_le32(sw_desc->dma); |
| 1468 | tpd->len = cpu_to_le16(buf_len); |
| 1469 | tpd->svlan_tag = svlan_tag; |
| 1470 | tpd->word1 = word1 | lso_word1; |
| 1471 | tpd->word3 = word3; |
| 1472 | sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_FRAGLIST; |
| 1473 | |
| 1474 | i = 0; |
| 1475 | |
| 1476 | nr_frags = skb_shinfo(iter_skb)->nr_frags; |
| 1477 | |
| 1478 | /* Walk through paged frags for this fraglist skb */ |
| 1479 | while (nr_frags--) { |
| 1480 | skb_frag_t *frag = &skb_shinfo(iter_skb)->frags[i]; |
| 1481 | buf_len = skb_frag_size(frag); |
| 1482 | tpd = edma_get_next_tpd(edma_cinfo, queue_id); |
| 1483 | sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id); |
| 1484 | sw_desc->length = buf_len; |
| 1485 | sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_FRAG; |
| 1486 | |
| 1487 | sw_desc->dma = skb_frag_dma_map(&pdev->dev, frag, |
| 1488 | 0, buf_len, DMA_TO_DEVICE); |
| 1489 | if (dma_mapping_error(NULL, sw_desc->dma)) |
| 1490 | goto dma_error; |
| 1491 | |
| 1492 | tpd->addr = cpu_to_le32(sw_desc->dma); |
| 1493 | tpd->len = cpu_to_le16(buf_len); |
| 1494 | tpd->svlan_tag = svlan_tag; |
| 1495 | tpd->word1 = word1 | lso_word1; |
| 1496 | tpd->word3 = word3; |
| 1497 | i++; |
| 1498 | } |
| 1499 | } |
| 1500 | |
Rakesh Nair | 888af95 | 2017-06-30 18:41:58 +0530 | [diff] [blame] | 1501 | /* If sysctl support for per-precedence stats are enabled */ |
| 1502 | if (edma_per_prec_stats_enable) { |
Rakesh Nair | 7e05353 | 2017-08-18 17:53:25 +0530 | [diff] [blame] | 1503 | uint8_t precedence = 0; |
Rakesh Nair | 888af95 | 2017-06-30 18:41:58 +0530 | [diff] [blame] | 1504 | |
Rakesh Nair | 7e05353 | 2017-08-18 17:53:25 +0530 | [diff] [blame] | 1505 | if(!edma_get_skb_precedence(skb, &precedence)) { |
Rakesh Nair | 888af95 | 2017-06-30 18:41:58 +0530 | [diff] [blame] | 1506 | /* Increment per-precedence counters for tx packets |
| 1507 | * and set the precedence in the TPD. |
| 1508 | */ |
Manish Verma | 924d3ed | 2020-01-07 12:01:36 +0530 | [diff] [blame^] | 1509 | atomic64_inc(&edma_cinfo->edma_ethstats.tx_prec[precedence]); |
Rakesh Nair | 888af95 | 2017-06-30 18:41:58 +0530 | [diff] [blame] | 1510 | edma_cinfo->edma_ethstats.tx_ac[edma_dscp2ac_tbl[precedence]]++; |
Rakesh Nair | dadf1fb | 2017-09-07 11:58:28 +0530 | [diff] [blame] | 1511 | if (tpd) |
| 1512 | tpd->word3 |= precedence << EDMA_TPD_PRIO_SHIFT; |
Rakesh Nair | 888af95 | 2017-06-30 18:41:58 +0530 | [diff] [blame] | 1513 | } |
Rakesh Nair | 1c6a18c | 2017-08-02 21:27:06 +0530 | [diff] [blame] | 1514 | |
| 1515 | /* If sysctl support for IAD stats are enabled */ |
| 1516 | if (edma_iad_stats_enable) { |
| 1517 | if (edma_dscp2ac_tbl[precedence] == EDMA_AC_VI) |
| 1518 | edma_iad_process_flow(edma_cinfo, skb, EDMA_EGRESS_DIR, precedence); |
| 1519 | } |
Rakesh Nair | 888af95 | 2017-06-30 18:41:58 +0530 | [diff] [blame] | 1520 | } |
| 1521 | |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 1522 | /* If tpd or sw_desc is still unitiialized then we need to return */ |
| 1523 | if ((!tpd) || (!sw_desc)) |
| 1524 | return -EINVAL; |
| 1525 | |
| 1526 | tpd->word1 |= 1 << EDMA_TPD_EOP_SHIFT; |
| 1527 | |
| 1528 | sw_desc->skb = skb; |
| 1529 | sw_desc->flags |= EDMA_SW_DESC_FLAG_LAST; |
| 1530 | |
| 1531 | return 0; |
| 1532 | |
| 1533 | dma_error: |
| 1534 | edma_rollback_tx(adapter, start_tpd, queue_id); |
| 1535 | dev_err(&pdev->dev, "TX DMA map failed\n"); |
| 1536 | vlan_tag_error: |
| 1537 | return -ENOMEM; |
| 1538 | } |
| 1539 | |
| 1540 | /* edma_check_link() |
| 1541 | * check Link status |
| 1542 | */ |
| 1543 | static int edma_check_link(struct edma_adapter *adapter) |
| 1544 | { |
| 1545 | struct phy_device *phydev = adapter->phydev; |
| 1546 | |
| 1547 | if (!(adapter->poll_required)) |
| 1548 | return __EDMA_LINKUP; |
| 1549 | |
| 1550 | if (phydev->link) |
| 1551 | return __EDMA_LINKUP; |
| 1552 | |
| 1553 | return __EDMA_LINKDOWN; |
| 1554 | } |
| 1555 | |
| 1556 | /* edma_adjust_link() |
| 1557 | * check for edma link status |
| 1558 | */ |
| 1559 | void edma_adjust_link(struct net_device *netdev) |
| 1560 | { |
| 1561 | int status; |
| 1562 | struct edma_adapter *adapter = netdev_priv(netdev); |
| 1563 | struct phy_device *phydev = adapter->phydev; |
| 1564 | |
| 1565 | if (!test_bit(__EDMA_UP, &adapter->state_flags)) |
| 1566 | return; |
| 1567 | |
| 1568 | status = edma_check_link(adapter); |
| 1569 | |
| 1570 | if (status == __EDMA_LINKUP && adapter->link_state == __EDMA_LINKDOWN) { |
| 1571 | dev_info(&adapter->pdev->dev, "%s: GMAC Link is up with phy_speed=%d\n", netdev->name, phydev->speed); |
| 1572 | adapter->link_state = __EDMA_LINKUP; |
| 1573 | netif_carrier_on(netdev); |
| 1574 | if (netif_running(netdev)) |
| 1575 | netif_tx_wake_all_queues(netdev); |
| 1576 | } else if (status == __EDMA_LINKDOWN && adapter->link_state == __EDMA_LINKUP) { |
| 1577 | dev_info(&adapter->pdev->dev, "%s: GMAC Link is down\n", netdev->name); |
| 1578 | adapter->link_state = __EDMA_LINKDOWN; |
| 1579 | netif_carrier_off(netdev); |
| 1580 | netif_tx_stop_all_queues(netdev); |
| 1581 | } |
| 1582 | } |
| 1583 | |
Bhaskar Valaboju | e429bab | 2017-03-15 09:01:23 +0530 | [diff] [blame] | 1584 | /* edma_get_stats64() |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 1585 | * Statistics api used to retreive the tx/rx statistics |
| 1586 | */ |
Bhaskar Valaboju | e429bab | 2017-03-15 09:01:23 +0530 | [diff] [blame] | 1587 | struct rtnl_link_stats64 *edma_get_stats64(struct net_device *netdev, |
| 1588 | struct rtnl_link_stats64 *stats) |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 1589 | { |
| 1590 | struct edma_adapter *adapter = netdev_priv(netdev); |
| 1591 | |
Bhaskar Valaboju | e429bab | 2017-03-15 09:01:23 +0530 | [diff] [blame] | 1592 | memcpy(stats, &adapter->stats, sizeof(*stats)); |
| 1593 | |
| 1594 | return stats; |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 1595 | } |
| 1596 | |
| 1597 | /* edma_xmit() |
| 1598 | * Main api to be called by the core for packet transmission |
| 1599 | */ |
| 1600 | netdev_tx_t edma_xmit(struct sk_buff *skb, |
| 1601 | struct net_device *net_dev) |
| 1602 | { |
| 1603 | struct edma_adapter *adapter = netdev_priv(net_dev); |
| 1604 | struct edma_common_info *edma_cinfo = adapter->edma_cinfo; |
| 1605 | struct edma_tx_desc_ring *etdr; |
| 1606 | u16 from_cpu = 0, dp_bitmap = 0, txq_id; |
| 1607 | int ret, nr_frags_first = 0, num_tpds_needed = 1, queue_id = 0; |
| 1608 | unsigned int flags_transmit = 0; |
| 1609 | bool packet_is_rstp = false; |
| 1610 | struct netdev_queue *nq = NULL; |
| 1611 | |
| 1612 | if (skb_shinfo(skb)->nr_frags) { |
| 1613 | nr_frags_first = skb_shinfo(skb)->nr_frags; |
| 1614 | |
| 1615 | /* It is unlikely below check hits, BUG_ON */ |
| 1616 | BUG_ON(nr_frags_first > MAX_SKB_FRAGS); |
| 1617 | |
| 1618 | num_tpds_needed += nr_frags_first; |
| 1619 | } |
| 1620 | |
| 1621 | if (skb_has_frag_list(skb)) { |
| 1622 | struct sk_buff *iter_skb; |
| 1623 | |
| 1624 | /* Walk through fraglist skbs making a note of nr_frags */ |
| 1625 | skb_walk_frags(skb, iter_skb) { |
| 1626 | unsigned char nr_frags = skb_shinfo(iter_skb)->nr_frags; |
| 1627 | |
| 1628 | /* It is unlikely below check hits, BUG_ON */ |
| 1629 | BUG_ON(nr_frags > MAX_SKB_FRAGS); |
| 1630 | |
| 1631 | /* One TPD for skb->data and more for nr_frags */ |
| 1632 | num_tpds_needed += (1 + nr_frags); |
| 1633 | } |
| 1634 | } |
| 1635 | |
| 1636 | if (edma_stp_rstp) { |
| 1637 | u16 ath_hdr, ath_eth_type; |
| 1638 | u8 mac_addr[EDMA_ETH_HDR_LEN]; |
| 1639 | ath_eth_type = ntohs(*(uint16_t *)&skb->data[12]); |
| 1640 | if (ath_eth_type == edma_ath_eth_type) { |
| 1641 | packet_is_rstp = true; |
| 1642 | ath_hdr = htons(*(uint16_t *)&skb->data[14]); |
| 1643 | dp_bitmap = ath_hdr & EDMA_TX_ATH_HDR_PORT_BITMAP_MASK; |
| 1644 | from_cpu = (ath_hdr & EDMA_TX_ATH_HDR_FROM_CPU_MASK) >> EDMA_TX_ATH_HDR_FROM_CPU_SHIFT; |
| 1645 | memcpy(mac_addr, skb->data, EDMA_ETH_HDR_LEN); |
| 1646 | |
| 1647 | skb_pull(skb, 4); |
| 1648 | |
| 1649 | memcpy(skb->data, mac_addr, EDMA_ETH_HDR_LEN); |
| 1650 | } |
| 1651 | } |
| 1652 | |
| 1653 | /* this will be one of the 4 TX queues exposed to linux kernel */ |
| 1654 | txq_id = skb_get_queue_mapping(skb); |
| 1655 | queue_id = edma_tx_queue_get(adapter, skb, txq_id); |
| 1656 | etdr = edma_cinfo->tpd_ring[queue_id]; |
| 1657 | nq = netdev_get_tx_queue(net_dev, txq_id); |
| 1658 | |
| 1659 | local_bh_disable(); |
| 1660 | /* Tx is not handled in bottom half context. Hence, we need to protect |
| 1661 | * Tx from tasks and bottom half |
| 1662 | */ |
| 1663 | |
| 1664 | if (num_tpds_needed > edma_tpd_available(edma_cinfo, queue_id)) { |
Rakesh Nair | d4a1150 | 2017-11-07 17:02:11 +0530 | [diff] [blame] | 1665 | if (edma_disable_queue_stop) { |
| 1666 | local_bh_enable(); |
| 1667 | dev_dbg(&net_dev->dev, "Packet dropped as queue is full"); |
| 1668 | dev_kfree_skb_any(skb); |
| 1669 | adapter->stats.tx_errors++; |
| 1670 | return NETDEV_TX_OK; |
| 1671 | } |
| 1672 | |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 1673 | /* not enough descriptor, just stop queue */ |
| 1674 | netif_tx_stop_queue(nq); |
| 1675 | local_bh_enable(); |
| 1676 | dev_dbg(&net_dev->dev, "Not enough descriptors available"); |
| 1677 | edma_cinfo->edma_ethstats.tx_desc_error++; |
| 1678 | return NETDEV_TX_BUSY; |
| 1679 | } |
| 1680 | |
| 1681 | /* Check and mark VLAN tag offload */ |
| 1682 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21)) |
| 1683 | if (vlan_tx_tag_present(skb)) |
| 1684 | #else |
| 1685 | if (skb_vlan_tag_present(skb)) |
| 1686 | #endif |
| 1687 | flags_transmit |= EDMA_VLAN_TX_TAG_INSERT_FLAG; |
| 1688 | else if (adapter->default_vlan_tag) |
| 1689 | flags_transmit |= EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG; |
| 1690 | |
| 1691 | /* Check and mark checksum offload */ |
| 1692 | if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) |
| 1693 | flags_transmit |= EDMA_HW_CHECKSUM; |
| 1694 | |
| 1695 | /* Map and fill descriptor for Tx */ |
| 1696 | ret = edma_tx_map_and_fill(edma_cinfo, adapter, skb, queue_id, |
| 1697 | flags_transmit, from_cpu, dp_bitmap, |
| 1698 | packet_is_rstp, nr_frags_first); |
| 1699 | if (ret) { |
| 1700 | dev_kfree_skb_any(skb); |
| 1701 | adapter->stats.tx_errors++; |
| 1702 | goto netdev_okay; |
| 1703 | } |
| 1704 | |
| 1705 | /* Update SW producer index */ |
| 1706 | edma_tx_update_hw_idx(edma_cinfo, skb, queue_id); |
| 1707 | |
| 1708 | /* update tx statistics */ |
| 1709 | adapter->stats.tx_packets++; |
| 1710 | adapter->stats.tx_bytes += skb->len; |
| 1711 | |
| 1712 | netdev_okay: |
| 1713 | local_bh_enable(); |
| 1714 | return NETDEV_TX_OK; |
| 1715 | } |
| 1716 | |
| 1717 | /* |
| 1718 | * edma_flow_may_expire() |
| 1719 | * Timer function called periodically to delete the node |
| 1720 | */ |
| 1721 | void edma_flow_may_expire(unsigned long data) |
| 1722 | { |
| 1723 | struct edma_adapter *adapter = (struct edma_adapter *)data; |
| 1724 | int j; |
| 1725 | |
| 1726 | spin_lock_bh(&adapter->rfs.rfs_ftab_lock); |
| 1727 | for (j = 0; j < EDMA_RFS_EXPIRE_COUNT_PER_CALL; j++) { |
| 1728 | struct hlist_head *hhead; |
| 1729 | struct hlist_node *tmp; |
| 1730 | struct edma_rfs_filter_node *n; |
| 1731 | bool res; |
| 1732 | |
| 1733 | hhead = &adapter->rfs.hlist_head[adapter->rfs.hashtoclean++]; |
| 1734 | hlist_for_each_entry_safe(n, tmp, hhead, node) { |
| 1735 | res = rps_may_expire_flow(adapter->netdev, n->rq_id, |
| 1736 | n->flow_id, n->filter_id); |
| 1737 | if (res) { |
| 1738 | res = edma_delete_rfs_filter(adapter, n); |
| 1739 | if (res < 0) |
| 1740 | dev_dbg(&adapter->netdev->dev, |
| 1741 | "RFS entry %d not allowed to be flushed by Switch", |
| 1742 | n->flow_id); |
| 1743 | else { |
| 1744 | hlist_del(&n->node); |
| 1745 | kfree(n); |
| 1746 | adapter->rfs.filter_available++; |
| 1747 | } |
| 1748 | } |
| 1749 | } |
| 1750 | } |
| 1751 | |
| 1752 | adapter->rfs.hashtoclean = adapter->rfs.hashtoclean & (EDMA_RFS_FLOW_ENTRIES - 1); |
| 1753 | spin_unlock_bh(&adapter->rfs.rfs_ftab_lock); |
| 1754 | mod_timer(&adapter->rfs.expire_rfs, jiffies + HZ/4); |
| 1755 | } |
| 1756 | |
| 1757 | /* edma_rx_flow_steer() |
| 1758 | * Called by core to to steer the flow to CPU |
| 1759 | */ |
| 1760 | int edma_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, |
| 1761 | u16 rxq, u32 flow_id) |
| 1762 | { |
| 1763 | struct flow_keys keys; |
| 1764 | struct edma_rfs_filter_node *filter_node; |
| 1765 | struct edma_adapter *adapter = netdev_priv(dev); |
| 1766 | u16 hash_tblid; |
| 1767 | int res; |
| 1768 | |
| 1769 | if (skb->protocol == htons(ETH_P_IPV6)) { |
| 1770 | res = -EPROTONOSUPPORT; |
| 1771 | goto no_protocol_err; |
| 1772 | } |
| 1773 | |
| 1774 | /* Dissect flow parameters |
| 1775 | * We only support IPv4 + TCP/UDP |
| 1776 | */ |
| 1777 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21)) |
| 1778 | res = skb_flow_dissect(skb, &keys); |
| 1779 | if (!((keys.ip_proto == IPPROTO_TCP) || (keys.ip_proto == IPPROTO_UDP))) { |
| 1780 | #else |
| 1781 | res = skb_flow_dissect_flow_keys(skb, &keys, 0); |
| 1782 | if (!((keys.basic.ip_proto == IPPROTO_TCP) || (keys.basic.ip_proto == IPPROTO_UDP))) { |
| 1783 | #endif |
| 1784 | res = -EPROTONOSUPPORT; |
| 1785 | goto no_protocol_err; |
| 1786 | } |
| 1787 | |
| 1788 | /* Check if table entry exists */ |
| 1789 | hash_tblid = skb_get_hash_raw(skb) & EDMA_RFS_FLOW_ENTRIES_MASK; |
| 1790 | |
| 1791 | spin_lock_bh(&adapter->rfs.rfs_ftab_lock); |
| 1792 | filter_node = edma_rfs_key_search(&adapter->rfs.hlist_head[hash_tblid], &keys); |
| 1793 | |
| 1794 | if (filter_node) { |
| 1795 | if (rxq == filter_node->rq_id) { |
| 1796 | res = -EEXIST; |
| 1797 | goto out; |
| 1798 | } else { |
| 1799 | res = edma_delete_rfs_filter(adapter, filter_node); |
| 1800 | if (res < 0) |
| 1801 | dev_warn(&adapter->netdev->dev, |
| 1802 | "Cannot steer flow %d to different queue", |
| 1803 | filter_node->flow_id); |
| 1804 | else { |
| 1805 | adapter->rfs.filter_available++; |
| 1806 | res = edma_add_rfs_filter(adapter, &keys, rxq, filter_node); |
| 1807 | if (res < 0) { |
| 1808 | dev_warn(&adapter->netdev->dev, |
| 1809 | "Cannot steer flow %d to different queue", |
| 1810 | filter_node->flow_id); |
| 1811 | } else { |
| 1812 | adapter->rfs.filter_available--; |
| 1813 | filter_node->rq_id = rxq; |
| 1814 | filter_node->filter_id = res; |
| 1815 | } |
| 1816 | } |
| 1817 | } |
| 1818 | } else { |
| 1819 | if (adapter->rfs.filter_available == 0) { |
| 1820 | res = -EBUSY; |
| 1821 | goto out; |
| 1822 | } |
| 1823 | |
| 1824 | filter_node = kmalloc(sizeof(*filter_node), GFP_ATOMIC); |
| 1825 | if (!filter_node) { |
| 1826 | res = -ENOMEM; |
| 1827 | goto out; |
| 1828 | } |
| 1829 | |
| 1830 | res = edma_add_rfs_filter(adapter, &keys, rxq, filter_node); |
| 1831 | if (res < 0) { |
| 1832 | kfree(filter_node); |
| 1833 | goto out; |
| 1834 | } |
| 1835 | |
| 1836 | adapter->rfs.filter_available--; |
| 1837 | filter_node->rq_id = rxq; |
| 1838 | filter_node->filter_id = res; |
| 1839 | filter_node->flow_id = flow_id; |
| 1840 | filter_node->keys = keys; |
| 1841 | INIT_HLIST_NODE(&filter_node->node); |
| 1842 | hlist_add_head(&filter_node->node, &adapter->rfs.hlist_head[hash_tblid]); |
| 1843 | } |
| 1844 | |
| 1845 | out: |
| 1846 | spin_unlock_bh(&adapter->rfs.rfs_ftab_lock); |
| 1847 | no_protocol_err: |
| 1848 | return res; |
| 1849 | } |
| 1850 | |
| 1851 | #ifdef CONFIG_RFS_ACCEL |
| 1852 | /* edma_register_rfs_filter() |
| 1853 | * Add RFS filter callback |
| 1854 | */ |
| 1855 | int edma_register_rfs_filter(struct net_device *netdev, |
| 1856 | set_rfs_filter_callback_t set_filter) |
| 1857 | { |
| 1858 | struct edma_adapter *adapter = netdev_priv(netdev); |
| 1859 | |
| 1860 | spin_lock_bh(&adapter->rfs.rfs_ftab_lock); |
| 1861 | |
| 1862 | if (adapter->set_rfs_rule) { |
| 1863 | spin_unlock_bh(&adapter->rfs.rfs_ftab_lock); |
| 1864 | return -1; |
| 1865 | } |
| 1866 | |
| 1867 | adapter->set_rfs_rule = set_filter; |
| 1868 | spin_unlock_bh(&adapter->rfs.rfs_ftab_lock); |
| 1869 | |
| 1870 | return 0; |
| 1871 | } |
| 1872 | #endif |
| 1873 | |
| 1874 | /* edma_select_xps_queue() |
| 1875 | * Called by Linux TX stack to populate Linux TX queue |
| 1876 | */ |
| 1877 | u16 edma_select_xps_queue(struct net_device *dev, struct sk_buff *skb, |
| 1878 | void *accel_priv, select_queue_fallback_t fallback) |
| 1879 | { |
| 1880 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21)) |
| 1881 | return smp_processor_id(); |
| 1882 | #else |
| 1883 | int cpu = get_cpu(); |
| 1884 | put_cpu(); |
| 1885 | |
| 1886 | return cpu; |
| 1887 | #endif |
| 1888 | } |
| 1889 | |
| 1890 | /* edma_alloc_tx_rings() |
| 1891 | * Allocate rx rings |
| 1892 | */ |
| 1893 | int edma_alloc_tx_rings(struct edma_common_info *edma_cinfo) |
| 1894 | { |
| 1895 | struct platform_device *pdev = edma_cinfo->pdev; |
| 1896 | int i, err = 0; |
| 1897 | |
| 1898 | for (i = 0; i < edma_cinfo->num_tx_queues; i++) { |
| 1899 | err = edma_alloc_tx_ring(edma_cinfo, edma_cinfo->tpd_ring[i]); |
| 1900 | if (err) { |
| 1901 | dev_err(&pdev->dev, "Tx Queue alloc %u failed\n", i); |
| 1902 | return err; |
| 1903 | } |
| 1904 | } |
| 1905 | |
| 1906 | return 0; |
| 1907 | } |
| 1908 | |
| 1909 | /* edma_free_tx_rings() |
| 1910 | * Free tx rings |
| 1911 | */ |
| 1912 | void edma_free_tx_rings(struct edma_common_info *edma_cinfo) |
| 1913 | { |
| 1914 | int i; |
| 1915 | |
| 1916 | for (i = 0; i < edma_cinfo->num_tx_queues; i++) |
| 1917 | edma_free_tx_ring(edma_cinfo, edma_cinfo->tpd_ring[i]); |
| 1918 | } |
| 1919 | |
| 1920 | /* edma_free_tx_resources() |
| 1921 | * Free buffers associated with tx rings |
| 1922 | */ |
| 1923 | void edma_free_tx_resources(struct edma_common_info *edma_cinfo) |
| 1924 | { |
| 1925 | struct edma_tx_desc_ring *etdr; |
| 1926 | struct edma_sw_desc *sw_desc; |
| 1927 | struct platform_device *pdev = edma_cinfo->pdev; |
| 1928 | int i, j; |
| 1929 | |
| 1930 | for (i = 0; i < edma_cinfo->num_tx_queues; i++) { |
| 1931 | etdr = edma_cinfo->tpd_ring[i]; |
Rakesh Nair | 3a75688 | 2017-11-15 12:18:21 +0530 | [diff] [blame] | 1932 | for (j = 0; j < edma_cinfo->tx_ring_count; j++) { |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 1933 | sw_desc = &etdr->sw_desc[j]; |
| 1934 | if (sw_desc->flags & (EDMA_SW_DESC_FLAG_SKB_HEAD | |
| 1935 | EDMA_SW_DESC_FLAG_SKB_FRAG | EDMA_SW_DESC_FLAG_SKB_FRAGLIST)) |
| 1936 | edma_tx_unmap_and_free(pdev, sw_desc); |
| 1937 | } |
| 1938 | } |
| 1939 | } |
| 1940 | |
| 1941 | /* edma_alloc_rx_rings() |
| 1942 | * Allocate rx rings |
| 1943 | */ |
| 1944 | int edma_alloc_rx_rings(struct edma_common_info *edma_cinfo) |
| 1945 | { |
| 1946 | struct platform_device *pdev = edma_cinfo->pdev; |
| 1947 | int i, j, err = 0; |
| 1948 | |
| 1949 | for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) { |
| 1950 | err = edma_alloc_rx_ring(edma_cinfo, edma_cinfo->rfd_ring[j]); |
| 1951 | if (err) { |
| 1952 | dev_err(&pdev->dev, "Rx Queue alloc%u failed\n", i); |
| 1953 | return err; |
| 1954 | } |
| 1955 | j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1); |
| 1956 | } |
| 1957 | |
| 1958 | return 0; |
| 1959 | } |
| 1960 | |
| 1961 | /* edma_free_rx_rings() |
| 1962 | * free rx rings |
| 1963 | */ |
| 1964 | void edma_free_rx_rings(struct edma_common_info *edma_cinfo) |
| 1965 | { |
| 1966 | int i, j; |
| 1967 | |
| 1968 | for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) { |
| 1969 | edma_free_rx_ring(edma_cinfo, edma_cinfo->rfd_ring[j]); |
| 1970 | j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1); |
| 1971 | } |
| 1972 | } |
| 1973 | |
| 1974 | /* edma_free_queues() |
| 1975 | * Free the queues allocaated |
| 1976 | */ |
| 1977 | void edma_free_queues(struct edma_common_info *edma_cinfo) |
| 1978 | { |
| 1979 | int i , j; |
| 1980 | |
| 1981 | for (i = 0; i < edma_cinfo->num_tx_queues; i++) { |
| 1982 | if (edma_cinfo->tpd_ring[i]) |
| 1983 | kfree(edma_cinfo->tpd_ring[i]); |
| 1984 | edma_cinfo->tpd_ring[i] = NULL; |
| 1985 | } |
| 1986 | |
| 1987 | for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) { |
| 1988 | if (edma_cinfo->rfd_ring[j]) |
| 1989 | kfree(edma_cinfo->rfd_ring[j]); |
| 1990 | edma_cinfo->rfd_ring[j] = NULL; |
| 1991 | j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1); |
| 1992 | } |
| 1993 | |
| 1994 | edma_cinfo->num_rx_queues = 0; |
| 1995 | edma_cinfo->num_tx_queues = 0; |
| 1996 | |
| 1997 | return; |
| 1998 | } |
| 1999 | |
| 2000 | /* edma_free_rx_resources() |
| 2001 | * Free buffers associated with tx rings |
| 2002 | */ |
| 2003 | void edma_free_rx_resources(struct edma_common_info *edma_cinfo) |
| 2004 | { |
| 2005 | struct edma_rfd_desc_ring *erdr; |
| 2006 | struct platform_device *pdev = edma_cinfo->pdev; |
| 2007 | int i, j, k; |
| 2008 | |
| 2009 | for (i = 0, k = 0; i < edma_cinfo->num_rx_queues; i++) { |
| 2010 | erdr = edma_cinfo->rfd_ring[k]; |
Rakesh Nair | 3a75688 | 2017-11-15 12:18:21 +0530 | [diff] [blame] | 2011 | for (j = 0; j < edma_cinfo->rx_ring_count; j++) { |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 2012 | /* unmap all descriptors while cleaning */ |
| 2013 | edma_clean_rfd(pdev, erdr, j, 1); |
| 2014 | } |
| 2015 | k += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1); |
| 2016 | |
| 2017 | } |
| 2018 | } |
| 2019 | |
| 2020 | /* edma_alloc_queues_tx() |
| 2021 | * Allocate memory for all rings |
| 2022 | */ |
| 2023 | int edma_alloc_queues_tx(struct edma_common_info *edma_cinfo) |
| 2024 | { |
| 2025 | int i; |
| 2026 | |
| 2027 | for (i = 0; i < edma_cinfo->num_tx_queues; i++) { |
| 2028 | struct edma_tx_desc_ring *etdr; |
| 2029 | etdr = kzalloc(sizeof(struct edma_tx_desc_ring), GFP_KERNEL); |
| 2030 | if (!etdr) |
| 2031 | goto err; |
| 2032 | etdr->count = edma_cinfo->tx_ring_count; |
| 2033 | edma_cinfo->tpd_ring[i] = etdr; |
| 2034 | } |
| 2035 | |
| 2036 | return 0; |
| 2037 | err: |
| 2038 | edma_free_queues(edma_cinfo); |
| 2039 | return -1; |
| 2040 | } |
| 2041 | |
| 2042 | /* edma_alloc_queues_rx() |
| 2043 | * Allocate memory for all rings |
| 2044 | */ |
| 2045 | int edma_alloc_queues_rx(struct edma_common_info *edma_cinfo) |
| 2046 | { |
| 2047 | int i, j; |
| 2048 | |
| 2049 | for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) { |
| 2050 | struct edma_rfd_desc_ring *rfd_ring; |
| 2051 | rfd_ring = kzalloc(sizeof(struct edma_rfd_desc_ring), |
| 2052 | GFP_KERNEL); |
| 2053 | if (!rfd_ring) |
| 2054 | goto err; |
| 2055 | rfd_ring->count = edma_cinfo->rx_ring_count; |
| 2056 | edma_cinfo->rfd_ring[j] = rfd_ring; |
| 2057 | j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1); |
| 2058 | } |
| 2059 | return 0; |
| 2060 | err: |
| 2061 | edma_free_queues(edma_cinfo); |
| 2062 | return -1; |
| 2063 | } |
| 2064 | |
| 2065 | /* edma_clear_irq_status() |
| 2066 | * Clear interrupt status |
| 2067 | */ |
| 2068 | void edma_clear_irq_status(void) |
| 2069 | { |
| 2070 | edma_write_reg(EDMA_REG_RX_ISR, 0xff); |
| 2071 | edma_write_reg(EDMA_REG_TX_ISR, 0xffff); |
| 2072 | edma_write_reg(EDMA_REG_MISC_ISR, 0x1fff); |
| 2073 | edma_write_reg(EDMA_REG_WOL_ISR, 0x1); |
| 2074 | }; |
| 2075 | |
| 2076 | /* edma_configure() |
| 2077 | * Configure skb, edma interrupts and control register. |
| 2078 | */ |
| 2079 | int edma_configure(struct edma_common_info *edma_cinfo) |
| 2080 | { |
| 2081 | struct edma_hw *hw = &edma_cinfo->hw; |
| 2082 | u32 intr_modrt_data; |
| 2083 | u32 intr_ctrl_data = 0; |
| 2084 | int i, j, ret_count; |
| 2085 | |
| 2086 | edma_read_reg(EDMA_REG_INTR_CTRL, &intr_ctrl_data); |
| 2087 | intr_ctrl_data &= ~(1 << EDMA_INTR_SW_IDX_W_TYP_SHIFT); |
| 2088 | intr_ctrl_data |= hw->intr_sw_idx_w << EDMA_INTR_SW_IDX_W_TYP_SHIFT; |
| 2089 | edma_write_reg(EDMA_REG_INTR_CTRL, intr_ctrl_data); |
| 2090 | |
| 2091 | edma_clear_irq_status(); |
| 2092 | |
| 2093 | /* Clear any WOL status */ |
| 2094 | edma_write_reg(EDMA_REG_WOL_CTRL, 0); |
| 2095 | intr_modrt_data = (EDMA_TX_IMT << EDMA_IRQ_MODRT_TX_TIMER_SHIFT); |
| 2096 | intr_modrt_data |= (EDMA_RX_IMT << EDMA_IRQ_MODRT_RX_TIMER_SHIFT); |
| 2097 | edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, intr_modrt_data); |
| 2098 | edma_configure_tx(edma_cinfo); |
| 2099 | edma_configure_rx(edma_cinfo); |
| 2100 | |
| 2101 | /* Allocate the RX buffer */ |
| 2102 | for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) { |
| 2103 | struct edma_rfd_desc_ring *ring = edma_cinfo->rfd_ring[j]; |
| 2104 | ret_count = edma_alloc_rx_buf(edma_cinfo, ring, ring->count, j); |
| 2105 | if (ret_count) |
| 2106 | dev_dbg(&edma_cinfo->pdev->dev, "not all rx buffers allocated\n"); |
Rakesh Nair | 03b586c | 2017-04-03 18:28:58 +0530 | [diff] [blame] | 2107 | ring->pending_fill = ret_count; |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 2108 | j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1); |
| 2109 | } |
| 2110 | |
| 2111 | /* Configure descriptor Ring */ |
| 2112 | edma_init_desc(edma_cinfo); |
| 2113 | return 0; |
| 2114 | } |
| 2115 | |
| 2116 | /* edma_irq_enable() |
| 2117 | * Enable default interrupt generation settings |
| 2118 | */ |
| 2119 | void edma_irq_enable(struct edma_common_info *edma_cinfo) |
| 2120 | { |
| 2121 | struct edma_hw *hw = &edma_cinfo->hw; |
| 2122 | int i, j; |
| 2123 | |
| 2124 | edma_write_reg(EDMA_REG_RX_ISR, 0xff); |
| 2125 | for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) { |
| 2126 | edma_write_reg(EDMA_REG_RX_INT_MASK_Q(j), hw->rx_intr_mask); |
| 2127 | j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1); |
| 2128 | } |
| 2129 | edma_write_reg(EDMA_REG_TX_ISR, 0xffff); |
| 2130 | for (i = 0; i < edma_cinfo->num_tx_queues; i++) |
| 2131 | edma_write_reg(EDMA_REG_TX_INT_MASK_Q(i), hw->tx_intr_mask); |
| 2132 | } |
| 2133 | |
| 2134 | /* edma_irq_disable() |
| 2135 | * Disable Interrupt |
| 2136 | */ |
| 2137 | void edma_irq_disable(struct edma_common_info *edma_cinfo) |
| 2138 | { |
| 2139 | int i; |
| 2140 | |
| 2141 | for (i = 0; i < EDMA_MAX_RECEIVE_QUEUE; i++) |
| 2142 | edma_write_reg(EDMA_REG_RX_INT_MASK_Q(i), 0x0); |
| 2143 | |
| 2144 | for (i = 0; i < EDMA_MAX_TRANSMIT_QUEUE; i++) |
| 2145 | edma_write_reg(EDMA_REG_TX_INT_MASK_Q(i), 0x0); |
| 2146 | edma_write_reg(EDMA_REG_MISC_IMR, 0); |
| 2147 | edma_write_reg(EDMA_REG_WOL_IMR, 0); |
| 2148 | } |
| 2149 | |
| 2150 | /* edma_free_irqs() |
| 2151 | * Free All IRQs |
| 2152 | */ |
| 2153 | void edma_free_irqs(struct edma_adapter *adapter) |
| 2154 | { |
| 2155 | struct edma_common_info *edma_cinfo = adapter->edma_cinfo; |
| 2156 | int i, j; |
| 2157 | int k = ((edma_cinfo->num_rx_queues == 4) ? 1 : 2); |
| 2158 | |
| 2159 | for (i = 0; i < CONFIG_NR_CPUS; i++) { |
Rakesh Nair | 8016fbd | 2018-01-03 15:46:06 +0530 | [diff] [blame] | 2160 | for (j = edma_cinfo->edma_percpu_info[i].tx_comp_start; j < (edma_cinfo->edma_percpu_info[i].tx_comp_start + 4); j++) |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 2161 | free_irq(edma_cinfo->tx_irq[j], &edma_cinfo->edma_percpu_info[i]); |
| 2162 | |
| 2163 | for (j = edma_cinfo->edma_percpu_info[i].rx_start; j < (edma_cinfo->edma_percpu_info[i].rx_start + k); j++) |
| 2164 | free_irq(edma_cinfo->rx_irq[j], &edma_cinfo->edma_percpu_info[i]); |
| 2165 | } |
| 2166 | } |
| 2167 | |
| 2168 | /* edma_enable_rx_ctrl() |
| 2169 | * Enable RX queue control |
| 2170 | */ |
| 2171 | void edma_enable_rx_ctrl(struct edma_hw *hw) |
| 2172 | { |
| 2173 | u32 data; |
| 2174 | |
| 2175 | edma_read_reg(EDMA_REG_RXQ_CTRL, &data); |
| 2176 | data |= EDMA_RXQ_CTRL_EN; |
| 2177 | edma_write_reg(EDMA_REG_RXQ_CTRL, data); |
| 2178 | } |
| 2179 | |
| 2180 | |
| 2181 | /* edma_enable_tx_ctrl() |
| 2182 | * Enable TX queue control |
| 2183 | */ |
| 2184 | void edma_enable_tx_ctrl(struct edma_hw *hw) |
| 2185 | { |
| 2186 | u32 data; |
| 2187 | |
| 2188 | edma_read_reg(EDMA_REG_TXQ_CTRL, &data); |
| 2189 | data |= EDMA_TXQ_CTRL_TXQ_EN; |
| 2190 | edma_write_reg(EDMA_REG_TXQ_CTRL, data); |
| 2191 | } |
| 2192 | |
| 2193 | /* edma_stop_rx_tx() |
| 2194 | * Disable RX/TQ Queue control |
| 2195 | */ |
| 2196 | void edma_stop_rx_tx(struct edma_hw *hw) |
| 2197 | { |
| 2198 | u32 data; |
| 2199 | |
| 2200 | edma_read_reg(EDMA_REG_RXQ_CTRL, &data); |
| 2201 | data &= ~EDMA_RXQ_CTRL_EN; |
| 2202 | edma_write_reg(EDMA_REG_RXQ_CTRL, data); |
| 2203 | edma_read_reg(EDMA_REG_TXQ_CTRL, &data); |
| 2204 | data &= ~EDMA_TXQ_CTRL_TXQ_EN; |
| 2205 | edma_write_reg(EDMA_REG_TXQ_CTRL, data); |
| 2206 | } |
| 2207 | |
| 2208 | /* edma_reset() |
| 2209 | * Reset the EDMA |
| 2210 | */ |
| 2211 | int edma_reset(struct edma_common_info *edma_cinfo) |
| 2212 | { |
| 2213 | struct edma_hw *hw = &edma_cinfo->hw; |
| 2214 | |
| 2215 | edma_irq_disable(edma_cinfo); |
| 2216 | |
| 2217 | edma_clear_irq_status(); |
| 2218 | |
| 2219 | edma_stop_rx_tx(hw); |
| 2220 | |
| 2221 | return 0; |
| 2222 | } |
| 2223 | |
| 2224 | /* edma_fill_netdev() |
| 2225 | * Fill netdev for each etdr |
| 2226 | */ |
| 2227 | int edma_fill_netdev(struct edma_common_info *edma_cinfo, int queue_id, |
| 2228 | int dev, int txq_id) |
| 2229 | { |
| 2230 | struct edma_tx_desc_ring *etdr; |
| 2231 | int i = 0; |
| 2232 | |
| 2233 | etdr = edma_cinfo->tpd_ring[queue_id]; |
| 2234 | |
| 2235 | while (etdr->netdev[i]) |
| 2236 | i++; |
| 2237 | |
| 2238 | if (i >= EDMA_MAX_NETDEV_PER_QUEUE) |
| 2239 | return -1; |
| 2240 | |
| 2241 | /* Populate the netdev associated with the tpd ring */ |
| 2242 | etdr->netdev[i] = edma_netdev[dev]; |
| 2243 | etdr->nq[i] = netdev_get_tx_queue(edma_netdev[dev], txq_id); |
| 2244 | |
| 2245 | return 0; |
| 2246 | } |
| 2247 | |
| 2248 | /* edma_change_mtu() |
| 2249 | * change the MTU of the NIC. |
| 2250 | */ |
| 2251 | int edma_change_mtu(struct net_device *netdev, int new_mtu) |
| 2252 | { |
| 2253 | struct edma_adapter *adapter = netdev_priv(netdev); |
| 2254 | struct edma_common_info *edma_cinfo = adapter->edma_cinfo; |
| 2255 | int old_mtu = netdev->mtu; |
| 2256 | int max_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + (2 * VLAN_HLEN); |
| 2257 | |
| 2258 | if ((max_frame_size < ETH_ZLEN + ETH_FCS_LEN) || |
| 2259 | (max_frame_size > EDMA_MAX_JUMBO_FRAME_SIZE)) { |
| 2260 | dev_err(&edma_cinfo->pdev->dev, "MTU setting not correct\n"); |
| 2261 | return -EINVAL; |
| 2262 | } |
| 2263 | |
| 2264 | /* set MTU */ |
| 2265 | if (old_mtu != new_mtu) { |
| 2266 | netdev->mtu = new_mtu; |
| 2267 | netdev_update_features(netdev); |
| 2268 | } |
| 2269 | |
| 2270 | return 0; |
| 2271 | } |
| 2272 | |
| 2273 | /* edma_set_mac() |
| 2274 | * Change the Ethernet Address of the NIC |
| 2275 | */ |
| 2276 | int edma_set_mac_addr(struct net_device *netdev, void *p) |
| 2277 | { |
| 2278 | struct sockaddr *addr = p; |
| 2279 | |
| 2280 | if (!is_valid_ether_addr(addr->sa_data)) |
| 2281 | return -EINVAL; |
| 2282 | |
| 2283 | if (netif_running(netdev)) |
| 2284 | return -EBUSY; |
| 2285 | |
| 2286 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); |
| 2287 | return 0; |
| 2288 | } |
| 2289 | |
| 2290 | /* edma_set_stp_rstp() |
| 2291 | * set stp/rstp |
| 2292 | */ |
| 2293 | void edma_set_stp_rstp(bool rstp) |
| 2294 | { |
| 2295 | edma_stp_rstp = rstp; |
| 2296 | } |
| 2297 | |
Sourav Poddar | 44858a7 | 2019-12-09 22:51:30 +0530 | [diff] [blame] | 2298 | /* edma_set_jumbo_multi_segment() |
| 2299 | * Enable jumbo multi segment |
| 2300 | */ |
| 2301 | void edma_set_jumbo_multi_segment(bool jumbo_multi_segment) |
| 2302 | { |
| 2303 | edma_jumbo_multi_segment = jumbo_multi_segment; |
| 2304 | } |
| 2305 | |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 2306 | /* edma_assign_ath_hdr_type() |
| 2307 | * assign atheros header eth type |
| 2308 | */ |
| 2309 | void edma_assign_ath_hdr_type(int eth_type) |
| 2310 | { |
| 2311 | edma_ath_eth_type = eth_type & EDMA_ETH_TYPE_MASK; |
| 2312 | } |
| 2313 | |
| 2314 | /* edma_get_default_vlan_tag() |
| 2315 | * Used by other modules to get the default vlan tag |
| 2316 | */ |
| 2317 | int edma_get_default_vlan_tag(struct net_device *netdev) |
| 2318 | { |
| 2319 | struct edma_adapter *adapter = netdev_priv(netdev); |
| 2320 | |
| 2321 | if (adapter->default_vlan_tag) |
| 2322 | return adapter->default_vlan_tag; |
| 2323 | |
| 2324 | return 0; |
| 2325 | } |
| 2326 | |
| 2327 | /* edma_open() |
| 2328 | * gets called when netdevice is up, start the queue. |
| 2329 | */ |
| 2330 | int edma_open(struct net_device *netdev) |
| 2331 | { |
| 2332 | struct edma_adapter *adapter = netdev_priv(netdev); |
| 2333 | struct platform_device *pdev = adapter->edma_cinfo->pdev; |
Rakesh Nair | afaf9ab | 2018-03-01 21:17:01 +0530 | [diff] [blame] | 2334 | struct edma_common_info *edma_cinfo = adapter->edma_cinfo; |
| 2335 | |
| 2336 | edma_initialise_rfs_flow_table(adapter); |
| 2337 | |
| 2338 | /* We enable irq only in first edma_open call. |
| 2339 | * We do this to make sure we do not receive |
| 2340 | * packets before affinities are set in the |
| 2341 | * qca-edma script |
| 2342 | */ |
| 2343 | if (!edma_cinfo->is_first_open_done) { |
| 2344 | /* Enable all 16 tx and 8 rx irq mask */ |
| 2345 | edma_irq_enable(edma_cinfo); |
| 2346 | edma_cinfo->is_first_open_done = true; |
| 2347 | } |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 2348 | |
| 2349 | netif_tx_start_all_queues(netdev); |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 2350 | set_bit(__EDMA_UP, &adapter->state_flags); |
| 2351 | |
| 2352 | /* if Link polling is enabled, in our case enabled for WAN, then |
| 2353 | * do a phy start, else always set link as UP |
| 2354 | */ |
Rakesh Nair | ed29f6b | 2017-04-04 15:48:08 +0530 | [diff] [blame] | 2355 | mutex_lock(&adapter->poll_mutex); |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 2356 | if (adapter->poll_required) { |
| 2357 | if (!IS_ERR(adapter->phydev)) { |
| 2358 | phy_start(adapter->phydev); |
| 2359 | phy_start_aneg(adapter->phydev); |
| 2360 | adapter->link_state = __EDMA_LINKDOWN; |
| 2361 | } else { |
| 2362 | dev_dbg(&pdev->dev, "Invalid PHY device for a link polled interface\n"); |
| 2363 | } |
| 2364 | } else { |
| 2365 | adapter->link_state = __EDMA_LINKUP; |
| 2366 | netif_carrier_on(netdev); |
| 2367 | } |
Rakesh Nair | ed29f6b | 2017-04-04 15:48:08 +0530 | [diff] [blame] | 2368 | mutex_unlock(&adapter->poll_mutex); |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 2369 | |
| 2370 | return 0; |
| 2371 | } |
| 2372 | |
| 2373 | |
| 2374 | /* edma_close() |
| 2375 | * gets called when netdevice is down, stops the queue. |
| 2376 | */ |
| 2377 | int edma_close(struct net_device *netdev) |
| 2378 | { |
| 2379 | struct edma_adapter *adapter = netdev_priv(netdev); |
| 2380 | |
| 2381 | edma_free_rfs_flow_table(adapter); |
| 2382 | netif_carrier_off(netdev); |
| 2383 | netif_tx_stop_all_queues(netdev); |
| 2384 | |
Rakesh Nair | ed29f6b | 2017-04-04 15:48:08 +0530 | [diff] [blame] | 2385 | mutex_lock(&adapter->poll_mutex); |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 2386 | if (adapter->poll_required) { |
| 2387 | if (!IS_ERR(adapter->phydev)) |
| 2388 | phy_stop(adapter->phydev); |
| 2389 | } |
Rakesh Nair | ed29f6b | 2017-04-04 15:48:08 +0530 | [diff] [blame] | 2390 | mutex_unlock(&adapter->poll_mutex); |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 2391 | |
| 2392 | adapter->link_state = __EDMA_LINKDOWN; |
| 2393 | |
| 2394 | /* Set GMAC state to UP before link state is checked |
| 2395 | */ |
| 2396 | clear_bit(__EDMA_UP, &adapter->state_flags); |
| 2397 | |
| 2398 | return 0; |
| 2399 | } |
| 2400 | |
| 2401 | /* edma_poll |
| 2402 | * polling function that gets called when the napi gets scheduled. |
| 2403 | * |
| 2404 | * Main sequence of task performed in this api |
| 2405 | * is clear irq status -> clear_tx_irq -> clean_rx_irq-> |
| 2406 | * enable interrupts. |
| 2407 | */ |
| 2408 | int edma_poll(struct napi_struct *napi, int budget) |
| 2409 | { |
| 2410 | struct edma_per_cpu_queues_info *edma_percpu_info = container_of(napi, |
| 2411 | struct edma_per_cpu_queues_info, napi); |
| 2412 | struct edma_common_info *edma_cinfo = edma_percpu_info->edma_cinfo; |
| 2413 | u32 reg_data; |
| 2414 | u32 shadow_rx_status, shadow_tx_status; |
| 2415 | int queue_id; |
| 2416 | int i, work_done = 0; |
Rakesh Nair | 03b586c | 2017-04-03 18:28:58 +0530 | [diff] [blame] | 2417 | u16 rx_pending_fill; |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 2418 | |
| 2419 | /* Store the Rx/Tx status by ANDing it with |
| 2420 | * appropriate CPU RX?TX mask |
| 2421 | */ |
| 2422 | edma_read_reg(EDMA_REG_RX_ISR, ®_data); |
| 2423 | edma_percpu_info->rx_status |= reg_data & edma_percpu_info->rx_mask; |
| 2424 | shadow_rx_status = edma_percpu_info->rx_status; |
| 2425 | edma_read_reg(EDMA_REG_TX_ISR, ®_data); |
| 2426 | edma_percpu_info->tx_status |= reg_data & edma_percpu_info->tx_mask; |
| 2427 | shadow_tx_status = edma_percpu_info->tx_status; |
| 2428 | |
| 2429 | /* Every core will have a start, which will be computed |
| 2430 | * in probe and stored in edma_percpu_info->tx_start variable. |
| 2431 | * We will shift the status bit by tx_start to obtain |
| 2432 | * status bits for the core on which the current processing |
| 2433 | * is happening. Since, there are 4 tx queues per core, |
| 2434 | * we will run the loop till we get the correct queue to clear. |
| 2435 | */ |
| 2436 | while (edma_percpu_info->tx_status) { |
| 2437 | queue_id = ffs(edma_percpu_info->tx_status) - 1; |
| 2438 | edma_tx_complete(edma_cinfo, queue_id); |
| 2439 | edma_percpu_info->tx_status &= ~(1 << queue_id); |
| 2440 | } |
| 2441 | |
| 2442 | /* Every core will have a start, which will be computed |
| 2443 | * in probe and stored in edma_percpu_info->tx_start variable. |
| 2444 | * We will shift the status bit by tx_start to obtain |
| 2445 | * status bits for the core on which the current processing |
| 2446 | * is happening. Since, there are 4 tx queues per core, we |
| 2447 | * will run the loop till we get the correct queue to clear. |
| 2448 | */ |
| 2449 | while (edma_percpu_info->rx_status) { |
| 2450 | queue_id = ffs(edma_percpu_info->rx_status) - 1; |
Rakesh Nair | 03b586c | 2017-04-03 18:28:58 +0530 | [diff] [blame] | 2451 | rx_pending_fill = edma_rx_complete(edma_cinfo, &work_done, |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 2452 | budget, queue_id, napi); |
| 2453 | |
Rakesh Nair | 03b586c | 2017-04-03 18:28:58 +0530 | [diff] [blame] | 2454 | if (likely(work_done < budget)) { |
| 2455 | if (rx_pending_fill) { |
| 2456 | work_done = budget; |
| 2457 | break; |
| 2458 | } |
| 2459 | |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 2460 | edma_percpu_info->rx_status &= ~(1 << queue_id); |
Rakesh Nair | 03b586c | 2017-04-03 18:28:58 +0530 | [diff] [blame] | 2461 | } |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 2462 | else |
| 2463 | break; |
| 2464 | } |
| 2465 | |
| 2466 | /* Clear the status register, to avoid the interrupts to |
| 2467 | * reoccur.This clearing of interrupt status register is |
| 2468 | * done here as writing to status register only takes place |
| 2469 | * once the producer/consumer index has been updated to |
| 2470 | * reflect that the packet transmission/reception went fine. |
| 2471 | */ |
| 2472 | edma_write_reg(EDMA_REG_RX_ISR, shadow_rx_status); |
| 2473 | edma_write_reg(EDMA_REG_TX_ISR, shadow_tx_status); |
| 2474 | |
| 2475 | /* If budget not fully consumed, exit the polling mode */ |
| 2476 | if (likely(work_done < budget)) { |
| 2477 | napi_complete(napi); |
| 2478 | |
| 2479 | /* re-enable the interrupts */ |
| 2480 | for (i = 0; i < edma_cinfo->num_rxq_per_core; i++) |
| 2481 | edma_write_reg(EDMA_REG_RX_INT_MASK_Q(edma_percpu_info->rx_start + i), 0x1); |
| 2482 | for (i = 0; i < edma_cinfo->num_txq_per_core; i++) |
Rakesh Nair | 8016fbd | 2018-01-03 15:46:06 +0530 | [diff] [blame] | 2483 | edma_write_reg(EDMA_REG_TX_INT_MASK_Q(edma_percpu_info->tx_comp_start + i), 0x1); |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 2484 | } |
| 2485 | |
| 2486 | return work_done; |
| 2487 | } |
| 2488 | |
| 2489 | /* edma interrupt() |
| 2490 | * interrupt handler |
| 2491 | */ |
| 2492 | irqreturn_t edma_interrupt(int irq, void *dev) |
| 2493 | { |
| 2494 | struct edma_per_cpu_queues_info *edma_percpu_info = (struct edma_per_cpu_queues_info *) dev; |
| 2495 | struct edma_common_info *edma_cinfo = edma_percpu_info->edma_cinfo; |
| 2496 | int i; |
| 2497 | |
| 2498 | /* Unmask the TX/RX interrupt register */ |
| 2499 | for (i = 0; i < edma_cinfo->num_rxq_per_core; i++) |
| 2500 | edma_write_reg(EDMA_REG_RX_INT_MASK_Q(edma_percpu_info->rx_start + i), 0x0); |
| 2501 | |
| 2502 | for (i = 0; i < edma_cinfo->num_txq_per_core; i++) |
Rakesh Nair | 8016fbd | 2018-01-03 15:46:06 +0530 | [diff] [blame] | 2503 | edma_write_reg(EDMA_REG_TX_INT_MASK_Q(edma_percpu_info->tx_comp_start + i), 0x0); |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 2504 | |
| 2505 | napi_schedule(&edma_percpu_info->napi); |
| 2506 | |
| 2507 | return IRQ_HANDLED; |
| 2508 | } |