Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 1 | /* |
Rakesh Nair | 8016fbd | 2018-01-03 15:46:06 +0530 | [diff] [blame] | 2 | * Copyright (c) 2014 - 2018, The Linux Foundation. All rights reserved. |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 3 | * |
| 4 | * Permission to use, copy, modify, and/or distribute this software for |
| 5 | * any purpose with or without fee is hereby granted, provided that the |
| 6 | * above copyright notice and this permission notice appear in all copies. |
| 7 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
| 8 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
| 9 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
| 10 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
| 11 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
| 12 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT |
| 13 | * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
| 14 | */ |
| 15 | |
| 16 | #ifndef _EDMA_H_ |
| 17 | #define _EDMA_H_ |
| 18 | |
| 19 | #include <linux/init.h> |
| 20 | #include <linux/interrupt.h> |
| 21 | #include <linux/types.h> |
| 22 | #include <linux/errno.h> |
| 23 | #include <linux/module.h> |
| 24 | #include <linux/netdevice.h> |
| 25 | #include <linux/etherdevice.h> |
| 26 | #include <linux/skbuff.h> |
| 27 | #include <linux/io.h> |
| 28 | #include <linux/vmalloc.h> |
| 29 | #include <linux/pagemap.h> |
| 30 | #include <linux/smp.h> |
| 31 | #include <linux/platform_device.h> |
| 32 | #include <linux/of.h> |
| 33 | #include <linux/of_device.h> |
| 34 | #include <linux/kernel.h> |
| 35 | #include <linux/device.h> |
| 36 | #include <linux/sysctl.h> |
| 37 | #include <linux/phy.h> |
| 38 | #include <linux/of_net.h> |
| 39 | #include <net/checksum.h> |
| 40 | #include <net/ip6_checksum.h> |
| 41 | #include <asm-generic/bug.h> |
| 42 | #include <linux/version.h> |
Rakesh Nair | 7e05353 | 2017-08-18 17:53:25 +0530 | [diff] [blame] | 43 | #include <linux/ppp_defs.h> |
| 44 | #include <linux/if_pppox.h> |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 45 | #include "ess_edma.h" |
| 46 | |
| 47 | #define EDMA_CPU_CORES_SUPPORTED 4 |
| 48 | #define EDMA_MAX_PORTID_SUPPORTED 5 |
| 49 | #define EDMA_MAX_VLAN_SUPPORTED EDMA_MAX_PORTID_SUPPORTED |
| 50 | #define EDMA_MAX_PORTID_BITMAP_INDEX (EDMA_MAX_PORTID_SUPPORTED + 1) |
| 51 | #define EDMA_MAX_PORTID_BITMAP_SUPPORTED 0x1f /* 0001_1111 = 0x1f */ |
| 52 | #define EDMA_MAX_NETDEV_PER_QUEUE 4 /* 3 Netdev per queue, 1 space for indexing */ |
| 53 | |
| 54 | #define EDMA_MAX_RECEIVE_QUEUE 8 |
| 55 | #define EDMA_MAX_TRANSMIT_QUEUE 16 |
| 56 | |
| 57 | /* WAN/LAN adapter number */ |
| 58 | #define EDMA_WAN 0 |
| 59 | #define EDMA_LAN 1 |
| 60 | |
| 61 | /* VLAN tag */ |
| 62 | #define EDMA_LAN_DEFAULT_VLAN 1 |
| 63 | #define EDMA_WAN_DEFAULT_VLAN 2 |
| 64 | |
| 65 | #define EDMA_DEFAULT_GROUP1_VLAN 2 |
| 66 | #define EDMA_DEFAULT_GROUP2_VLAN 1 |
| 67 | #define EDMA_DEFAULT_GROUP3_VLAN 3 |
| 68 | #define EDMA_DEFAULT_GROUP4_VLAN 4 |
| 69 | #define EDMA_DEFAULT_GROUP5_VLAN 5 |
| 70 | |
| 71 | #define EDMA_DEFAULT_GROUP1_BMP 0x20 |
| 72 | #define EDMA_DEFAULT_GROUP2_BMP 0x1e |
| 73 | |
| 74 | #define EDMA_DEFAULT_DISABLE_RSS 0 |
Rakesh Nair | d4a1150 | 2017-11-07 17:02:11 +0530 | [diff] [blame] | 75 | #define EDMA_DEFAULT_DISABLE_QUEUE_STOP 0 |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 76 | #define EDMA_RSS_DISABLE 1 |
| 77 | #define EDMA_RSS_ENABLE 0 |
| 78 | |
| 79 | /* Queues exposed to linux kernel */ |
| 80 | #define EDMA_NETDEV_TX_QUEUE 4 |
| 81 | #define EDMA_NETDEV_RX_QUEUE 4 |
| 82 | |
| 83 | /* Number of queues per core */ |
| 84 | #define EDMA_NUM_TXQ_PER_CORE 4 |
| 85 | #define EDMA_NUM_RXQ_PER_CORE 2 |
| 86 | |
| 87 | #define EDMA_TPD_EOP_SHIFT 31 |
| 88 | |
| 89 | #define EDMA_PORT_ID_SHIFT 12 |
| 90 | #define EDMA_PORT_ID_MASK 0x7 |
| 91 | |
| 92 | /* tpd word 3 bit 18-28 */ |
| 93 | #define EDMA_TPD_PORT_BITMAP_SHIFT 18 |
| 94 | |
Rakesh Nair | 888af95 | 2017-06-30 18:41:58 +0530 | [diff] [blame] | 95 | /* tpd word 3 bit 29-31 */ |
| 96 | #define EDMA_TPD_PRIO_SHIFT 29 |
| 97 | |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 98 | #define EDMA_TPD_FROM_CPU_SHIFT 25 |
| 99 | |
| 100 | #define EDMA_FROM_CPU_MASK 0x80 |
| 101 | #define EDMA_SKB_PRIORITY_MASK 0x38 |
| 102 | |
| 103 | /* TX/RX descriptor ring count */ |
| 104 | /* should be a power of 2 */ |
| 105 | #define EDMA_RX_RING_SIZE 512 |
| 106 | #define EDMA_TX_RING_SIZE 512 |
| 107 | |
| 108 | /* Flags used in paged/non paged mode */ |
| 109 | #define EDMA_RX_HEAD_BUFF_SIZE_JUMBO 256 |
| 110 | #define EDMA_RX_HEAD_BUFF_SIZE 1540 |
| 111 | |
| 112 | /* MAX frame size supported by switch */ |
| 113 | #define EDMA_MAX_JUMBO_FRAME_SIZE 9216 |
| 114 | |
| 115 | /* Configurations */ |
| 116 | #define EDMA_INTR_CLEAR_TYPE 0 |
| 117 | #define EDMA_INTR_SW_IDX_W_TYPE 0 |
| 118 | #define EDMA_FIFO_THRESH_TYPE 0 |
| 119 | #define EDMA_RSS_TYPE 0 |
| 120 | #define EDMA_RX_IMT 0x0020 |
| 121 | #define EDMA_TX_IMT 0x0050 |
| 122 | #define EDMA_TPD_BURST 5 |
| 123 | #define EDMA_TXF_BURST 0x100 |
| 124 | #define EDMA_RFD_BURST 8 |
| 125 | #define EDMA_RFD_THR 16 |
| 126 | #define EDMA_RFD_LTHR 0 |
| 127 | |
| 128 | /* RX/TX per CPU based mask/shift */ |
| 129 | #define EDMA_TX_PER_CPU_MASK 0xF |
| 130 | #define EDMA_RX_PER_CPU_MASK 0x3 |
| 131 | #define EDMA_TX_PER_CPU_MASK_SHIFT 0x2 |
| 132 | #define EDMA_RX_PER_CPU_MASK_SHIFT 0x1 |
| 133 | #define EDMA_TX_CPU_START_SHIFT 0x2 |
| 134 | #define EDMA_RX_CPU_START_SHIFT 0x1 |
| 135 | |
| 136 | /* FLags used in transmit direction */ |
| 137 | #define EDMA_HW_CHECKSUM 0x00000001 |
| 138 | #define EDMA_VLAN_TX_TAG_INSERT_FLAG 0x00000002 |
| 139 | #define EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG 0x00000004 |
| 140 | |
| 141 | #define EDMA_SW_DESC_FLAG_LAST 0x1 |
| 142 | #define EDMA_SW_DESC_FLAG_SKB_HEAD 0x2 |
| 143 | #define EDMA_SW_DESC_FLAG_SKB_FRAG 0x4 |
| 144 | #define EDMA_SW_DESC_FLAG_SKB_FRAGLIST 0x8 |
| 145 | #define EDMA_SW_DESC_FLAG_SKB_NONE 0x10 |
| 146 | #define EDMA_SW_DESC_FLAG_SKB_REUSE 0x20 |
| 147 | |
| 148 | |
| 149 | #define EDMA_MAX_SKB_FRAGS (MAX_SKB_FRAGS + 1) |
| 150 | |
| 151 | /* Ethtool specific list of EDMA supported features */ |
| 152 | #define EDMA_SUPPORTED_FEATURES (SUPPORTED_10baseT_Half \ |
| 153 | | SUPPORTED_10baseT_Full \ |
| 154 | | SUPPORTED_100baseT_Half \ |
| 155 | | SUPPORTED_100baseT_Full \ |
| 156 | | SUPPORTED_1000baseT_Full) |
| 157 | |
| 158 | /* Recevie side atheros Header */ |
| 159 | #define EDMA_RX_ATH_HDR_VERSION 0x2 |
| 160 | #define EDMA_RX_ATH_HDR_VERSION_SHIFT 14 |
| 161 | #define EDMA_RX_ATH_HDR_PRIORITY_SHIFT 11 |
| 162 | #define EDMA_RX_ATH_PORT_TYPE_SHIFT 6 |
| 163 | #define EDMA_RX_ATH_HDR_RSTP_PORT_TYPE 0x4 |
| 164 | |
| 165 | /* Transmit side atheros Header */ |
| 166 | #define EDMA_TX_ATH_HDR_PORT_BITMAP_MASK 0x7F |
| 167 | #define EDMA_TX_ATH_HDR_FROM_CPU_MASK 0x80 |
| 168 | #define EDMA_TX_ATH_HDR_FROM_CPU_SHIFT 7 |
| 169 | |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 170 | #define EDMA_ETH_HDR_LEN 12 |
| 171 | #define EDMA_ETH_TYPE_MASK 0xFFFF |
| 172 | |
| 173 | #define EDMA_RX_BUFFER_WRITE 16 |
| 174 | #define EDMA_RFD_AVAIL_THR 80 |
| 175 | |
| 176 | #define EDMA_GMAC_NO_MDIO_PHY PHY_MAX_ADDR |
| 177 | |
Rakesh Nair | 888af95 | 2017-06-30 18:41:58 +0530 | [diff] [blame] | 178 | #define EDMA_PRECEDENCE_MAX 8 |
| 179 | |
| 180 | #define EDMA_AC_BK 0 /* Access Category: Background */ |
| 181 | #define EDMA_AC_BE 1 /* Access Category: Best Effort */ |
| 182 | #define EDMA_AC_VI 2 /* Access Category: Video */ |
| 183 | #define EDMA_AC_VO 3 /* Access Category: Voice */ |
| 184 | #define EDMA_AC_MAX 4 |
| 185 | |
| 186 | #define EDMA_DSCP2AC_INPUT_PARAMS_MAX 2 |
| 187 | |
Rakesh Nair | 1c6a18c | 2017-08-02 21:27:06 +0530 | [diff] [blame] | 188 | #define EDMA_INGRESS_DIR 0 |
| 189 | #define EDMA_EGRESS_DIR 1 |
| 190 | #define EDMA_MAX_IAD_FLOW_STATS_SUPPORTED 8 |
| 191 | |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 192 | extern int ssdk_rfs_ipct_rule_set(__be32 ip_src, __be32 ip_dst, |
| 193 | __be16 sport, __be16 dport, |
| 194 | uint8_t proto, u16 loadbalance, bool action); |
Rakesh Nair | 03824d5 | 2017-07-31 17:10:49 +0530 | [diff] [blame] | 195 | |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 196 | struct edma_ethtool_statistics { |
Bhaskar Valaboju | e429bab | 2017-03-15 09:01:23 +0530 | [diff] [blame] | 197 | u64 tx_q0_pkt; |
| 198 | u64 tx_q1_pkt; |
| 199 | u64 tx_q2_pkt; |
| 200 | u64 tx_q3_pkt; |
| 201 | u64 tx_q4_pkt; |
| 202 | u64 tx_q5_pkt; |
| 203 | u64 tx_q6_pkt; |
| 204 | u64 tx_q7_pkt; |
| 205 | u64 tx_q8_pkt; |
| 206 | u64 tx_q9_pkt; |
| 207 | u64 tx_q10_pkt; |
| 208 | u64 tx_q11_pkt; |
| 209 | u64 tx_q12_pkt; |
| 210 | u64 tx_q13_pkt; |
| 211 | u64 tx_q14_pkt; |
| 212 | u64 tx_q15_pkt; |
| 213 | u64 tx_q0_byte; |
| 214 | u64 tx_q1_byte; |
| 215 | u64 tx_q2_byte; |
| 216 | u64 tx_q3_byte; |
| 217 | u64 tx_q4_byte; |
| 218 | u64 tx_q5_byte; |
| 219 | u64 tx_q6_byte; |
| 220 | u64 tx_q7_byte; |
| 221 | u64 tx_q8_byte; |
| 222 | u64 tx_q9_byte; |
| 223 | u64 tx_q10_byte; |
| 224 | u64 tx_q11_byte; |
| 225 | u64 tx_q12_byte; |
| 226 | u64 tx_q13_byte; |
| 227 | u64 tx_q14_byte; |
| 228 | u64 tx_q15_byte; |
| 229 | u64 rx_q0_pkt; |
| 230 | u64 rx_q1_pkt; |
| 231 | u64 rx_q2_pkt; |
| 232 | u64 rx_q3_pkt; |
| 233 | u64 rx_q4_pkt; |
| 234 | u64 rx_q5_pkt; |
| 235 | u64 rx_q6_pkt; |
| 236 | u64 rx_q7_pkt; |
| 237 | u64 rx_q0_byte; |
| 238 | u64 rx_q1_byte; |
| 239 | u64 rx_q2_byte; |
| 240 | u64 rx_q3_byte; |
| 241 | u64 rx_q4_byte; |
| 242 | u64 rx_q5_byte; |
| 243 | u64 rx_q6_byte; |
| 244 | u64 rx_q7_byte; |
| 245 | u64 tx_desc_error; |
Rakesh Nair | 03b586c | 2017-04-03 18:28:58 +0530 | [diff] [blame] | 246 | u64 rx_alloc_fail_ctr; |
Rakesh Nair | c402d75 | 2018-01-20 16:21:54 +0530 | [diff] [blame^] | 247 | atomic64_t tx_prec[EDMA_PRECEDENCE_MAX]; |
| 248 | atomic64_t rx_prec[EDMA_PRECEDENCE_MAX]; |
Rakesh Nair | 888af95 | 2017-06-30 18:41:58 +0530 | [diff] [blame] | 249 | u64 rx_ac[EDMA_AC_MAX]; |
| 250 | u64 tx_ac[EDMA_AC_MAX]; |
Rakesh Nair | 1c6a18c | 2017-08-02 21:27:06 +0530 | [diff] [blame] | 251 | u64 rx_flow_iad[EDMA_MAX_IAD_FLOW_STATS_SUPPORTED]; |
| 252 | u64 rx_flow_delta_start_ts[EDMA_MAX_IAD_FLOW_STATS_SUPPORTED]; |
| 253 | u64 tx_flow_iad[EDMA_MAX_IAD_FLOW_STATS_SUPPORTED]; |
| 254 | u64 tx_flow_delta_start_ts[EDMA_MAX_IAD_FLOW_STATS_SUPPORTED]; |
| 255 | }; |
| 256 | |
| 257 | /* |
| 258 | * struct edma_video_delay_stats |
| 259 | * edma video delay statistics |
| 260 | */ |
| 261 | struct edma_video_delay_stats |
| 262 | { |
| 263 | uint64_t start_ts; /* tick value when first frame is received for a particular group */ |
| 264 | uint64_t max_interframe_delay; /* maximum interframe delay */ |
| 265 | uint64_t max_delay; /* ticks value in absolute terms when the max_interframe delay is observed */ |
| 266 | uint64_t last_frame_ts; /* last frame tick value */ |
| 267 | }; |
| 268 | |
| 269 | /* |
| 270 | * struct edma_flow_attrib |
| 271 | * per-flow table for dest-addr/dest-port info |
| 272 | */ |
| 273 | struct edma_flow_attrib { |
| 274 | u16 dport; |
| 275 | u16 sport; |
| 276 | u32 saddr; |
| 277 | u32 daddr; |
| 278 | u8 ip_version; |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 279 | }; |
| 280 | |
| 281 | struct edma_mdio_data { |
| 282 | struct mii_bus *mii_bus; |
| 283 | void __iomem *membase; |
| 284 | int phy_irq[PHY_MAX_ADDR]; |
| 285 | }; |
| 286 | |
| 287 | /* EDMA LINK state */ |
| 288 | enum edma_link_state { |
| 289 | __EDMA_LINKUP, /* Indicate link is UP */ |
| 290 | __EDMA_LINKDOWN /* Indicate link is down */ |
| 291 | }; |
| 292 | |
| 293 | /* EDMA GMAC state */ |
| 294 | enum edma_gmac_state { |
| 295 | __EDMA_UP /* use to indicate GMAC is up */ |
| 296 | }; |
| 297 | |
| 298 | /* edma transmit descriptor */ |
| 299 | struct edma_tx_desc { |
| 300 | __le16 len; /* full packet including CRC */ |
| 301 | __le16 svlan_tag; /* vlan tag */ |
| 302 | __le32 word1; /* byte 4-7 */ |
| 303 | __le32 addr; /* address of buffer */ |
| 304 | __le32 word3; /* byte 12 */ |
| 305 | }; |
| 306 | |
| 307 | /* edma receive return descriptor */ |
| 308 | struct edma_rx_return_desc { |
| 309 | u16 rrd0; |
| 310 | u16 rrd1; |
| 311 | u16 rrd2; |
| 312 | u16 rrd3; |
| 313 | u16 rrd4; |
| 314 | u16 rrd5; |
| 315 | u16 rrd6; |
| 316 | u16 rrd7; |
| 317 | }; |
| 318 | |
| 319 | /* RFD descriptor */ |
| 320 | struct edma_rx_free_desc { |
| 321 | __le32 buffer_addr; /* buffer address */ |
| 322 | }; |
| 323 | |
| 324 | /* edma hw specific data */ |
| 325 | struct edma_hw { |
| 326 | u32 __iomem *hw_addr; /* inner register address */ |
| 327 | struct edma_adapter *adapter; /* netdevice adapter */ |
| 328 | u32 rx_intr_mask; /*rx interrupt mask */ |
| 329 | u32 tx_intr_mask; /* tx interrupt nask */ |
| 330 | u32 misc_intr_mask; /* misc interrupt mask */ |
| 331 | u32 wol_intr_mask; /* wake on lan interrupt mask */ |
| 332 | bool intr_clear_type; /* interrupt clear */ |
| 333 | bool intr_sw_idx_w; /* interrupt software index */ |
| 334 | u32 rx_head_buff_size; /* Rx buffer size */ |
| 335 | u8 rss_type; /* rss protocol type */ |
| 336 | }; |
| 337 | |
| 338 | /* edma_sw_desc stores software descriptor |
| 339 | * SW descriptor has 1:1 map with HW descriptor |
| 340 | */ |
| 341 | struct edma_sw_desc { |
| 342 | struct sk_buff *skb; |
| 343 | dma_addr_t dma; /* dma address */ |
| 344 | u16 length; /* Tx/Rx buffer length */ |
| 345 | u32 flags; |
| 346 | }; |
| 347 | |
| 348 | /* per core related information */ |
| 349 | struct edma_per_cpu_queues_info { |
| 350 | struct napi_struct napi; /* napi associated with the core */ |
| 351 | u32 tx_mask; /* tx interrupt mask */ |
| 352 | u32 rx_mask; /* rx interrupt mask */ |
| 353 | u32 tx_status; /* tx interrupt status */ |
| 354 | u32 rx_status; /* rx interrupt status */ |
Rakesh Nair | 8016fbd | 2018-01-03 15:46:06 +0530 | [diff] [blame] | 355 | u32 tx_comp_start; /* tx completion queue start */ |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 356 | u32 rx_start; /* rx queue start */ |
| 357 | struct edma_common_info *edma_cinfo; /* edma common info */ |
| 358 | }; |
| 359 | |
| 360 | /* edma specific common info */ |
| 361 | struct edma_common_info { |
| 362 | struct edma_tx_desc_ring *tpd_ring[16]; /* 16 Tx queues */ |
| 363 | struct edma_rfd_desc_ring *rfd_ring[8]; /* 8 Rx queues */ |
| 364 | struct platform_device *pdev; /* device structure */ |
| 365 | struct net_device *netdev[EDMA_MAX_PORTID_SUPPORTED]; |
| 366 | struct net_device *portid_netdev_lookup_tbl[EDMA_MAX_PORTID_BITMAP_INDEX]; |
| 367 | struct ctl_table_header *edma_ctl_table_hdr; |
| 368 | int num_gmac; |
| 369 | struct edma_ethtool_statistics edma_ethstats; /* ethtool stats */ |
| 370 | u32 num_rx_queues; /* number of rx queue */ |
| 371 | u32 num_tx_queues; /* number of tx queue */ |
| 372 | u32 tx_irq[16]; /* number of tx irq */ |
| 373 | u32 rx_irq[8]; /* number of rx irq */ |
| 374 | u32 from_cpu; /* from CPU TPD field */ |
| 375 | u32 num_rxq_per_core; /* Rx queues per core */ |
| 376 | u32 num_txq_per_core; /* Tx queues per core */ |
| 377 | u16 tx_ring_count; /* Tx ring count */ |
| 378 | u16 rx_ring_count; /* Rx ring*/ |
| 379 | u16 rx_head_buffer_len; /* rx buffer length */ |
| 380 | u16 rx_page_buffer_len; /* rx buffer length */ |
| 381 | u32 page_mode; /* Jumbo frame supported flag */ |
| 382 | u32 fraglist_mode; /* fraglist supported flag */ |
| 383 | struct edma_hw hw; /* edma hw specific structure */ |
| 384 | struct edma_per_cpu_queues_info edma_percpu_info[CONFIG_NR_CPUS]; /* per cpu information */ |
| 385 | spinlock_t stats_lock; /* protect edma stats area for updation */ |
| 386 | }; |
| 387 | |
| 388 | /* transimit packet descriptor (tpd) ring */ |
| 389 | struct edma_tx_desc_ring { |
| 390 | struct netdev_queue *nq[EDMA_MAX_NETDEV_PER_QUEUE]; /* Linux queue index */ |
| 391 | struct net_device *netdev[EDMA_MAX_NETDEV_PER_QUEUE]; |
| 392 | /* Array of netdevs associated with the tpd ring */ |
| 393 | void *hw_desc; /* descriptor ring virtual address */ |
| 394 | struct edma_sw_desc *sw_desc; /* buffer associated with ring */ |
| 395 | int netdev_bmp; /* Bitmap for per-ring netdevs */ |
| 396 | u32 size; /* descriptor ring length in bytes */ |
| 397 | u16 count; /* number of descriptors in the ring */ |
| 398 | dma_addr_t dma; /* descriptor ring physical address */ |
| 399 | u16 sw_next_to_fill; /* next Tx descriptor to fill */ |
| 400 | u16 sw_next_to_clean; /* next Tx descriptor to clean */ |
| 401 | }; |
| 402 | |
| 403 | /* receive free descriptor (rfd) ring */ |
| 404 | struct edma_rfd_desc_ring { |
| 405 | struct edma_rx_free_desc *hw_desc; /* descriptor ring virtual address */ |
| 406 | struct edma_sw_desc *sw_desc; /* buffer associated with ring */ |
| 407 | u16 size; /* bytes allocated to sw_desc */ |
| 408 | u16 count; /* number of descriptors in the ring */ |
| 409 | dma_addr_t dma; /* descriptor ring physical address */ |
| 410 | u16 sw_next_to_fill; /* next descriptor to fill */ |
| 411 | u16 sw_next_to_clean; /* next descriptor to clean */ |
Rakesh Nair | 03b586c | 2017-04-03 18:28:58 +0530 | [diff] [blame] | 412 | u16 pending_fill; /* fill pending from previous iteration */ |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 413 | }; |
| 414 | |
| 415 | /* edma_rfs_flter_node - rfs filter node in hash table */ |
| 416 | struct edma_rfs_filter_node { |
| 417 | struct flow_keys keys; |
| 418 | u32 flow_id; /* flow_id of filter provided by kernel */ |
| 419 | u16 filter_id; /* filter id of filter returned by adaptor */ |
| 420 | u16 rq_id; /* desired rq index */ |
| 421 | struct hlist_node node; /* edma rfs list node */ |
| 422 | }; |
| 423 | |
| 424 | /* edma_rfs_flow_tbl - rfs flow table */ |
| 425 | struct edma_rfs_flow_table { |
| 426 | u16 max_num_filter; /* Maximum number of filters edma supports */ |
| 427 | u16 hashtoclean; /* hash table index to clean next */ |
| 428 | int filter_available; /* Number of free filters available */ |
| 429 | struct hlist_head hlist_head[EDMA_RFS_FLOW_ENTRIES]; |
| 430 | spinlock_t rfs_ftab_lock; |
| 431 | struct timer_list expire_rfs; /* timer function for edma_rps_may_expire_flow */ |
| 432 | }; |
| 433 | |
| 434 | /* EDMA net device structure */ |
| 435 | struct edma_adapter { |
| 436 | struct net_device *netdev; /* netdevice */ |
| 437 | struct platform_device *pdev; /* platform device */ |
| 438 | struct edma_common_info *edma_cinfo; /* edma common info */ |
| 439 | struct phy_device *phydev; /* Phy device */ |
| 440 | struct edma_rfs_flow_table rfs; /* edma rfs flow table */ |
Bhaskar Valaboju | e429bab | 2017-03-15 09:01:23 +0530 | [diff] [blame] | 441 | struct rtnl_link_stats64 stats; /* netdev statistics */ |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 442 | #ifdef CONFIG_RFS_ACCEL |
| 443 | set_rfs_filter_callback_t set_rfs_rule; |
| 444 | #endif |
| 445 | u32 flags;/* status flags */ |
| 446 | unsigned long state_flags; /* GMAC up/down flags */ |
| 447 | u32 forced_speed; /* link force speed */ |
| 448 | u32 forced_duplex; /* link force duplex */ |
| 449 | u32 link_state; /* phy link state */ |
| 450 | u32 phy_mdio_addr; /* PHY device address on MII interface */ |
| 451 | u32 poll_required; /* check if link polling is required */ |
| 452 | u32 poll_required_dynamic; /* dynamic polling flag */ |
| 453 | u32 tx_start_offset[CONFIG_NR_CPUS]; /* tx queue start */ |
| 454 | u32 default_vlan_tag; /* vlan tag */ |
| 455 | u32 dp_bitmap; |
| 456 | uint8_t phy_id[MII_BUS_ID_SIZE + 3]; |
Rakesh Nair | ed29f6b | 2017-04-04 15:48:08 +0530 | [diff] [blame] | 457 | struct mutex poll_mutex; /* Lock to protect polling flag change */ |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 458 | }; |
| 459 | |
Rakesh Nair | 7e05353 | 2017-08-18 17:53:25 +0530 | [diff] [blame] | 460 | /* PPPoE header info */ |
| 461 | struct pppoeh_proto { |
| 462 | struct pppoe_hdr hdr; |
| 463 | __be16 proto; |
| 464 | }; |
| 465 | |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 466 | int edma_alloc_queues_tx(struct edma_common_info *edma_cinfo); |
| 467 | int edma_alloc_queues_rx(struct edma_common_info *edma_cinfo); |
| 468 | int edma_open(struct net_device *netdev); |
| 469 | int edma_close(struct net_device *netdev); |
| 470 | void edma_free_tx_resources(struct edma_common_info *edma_c_info); |
| 471 | void edma_free_rx_resources(struct edma_common_info *edma_c_info); |
| 472 | int edma_alloc_tx_rings(struct edma_common_info *edma_cinfo); |
| 473 | int edma_alloc_rx_rings(struct edma_common_info *edma_cinfo); |
| 474 | void edma_free_tx_rings(struct edma_common_info *edma_cinfo); |
| 475 | void edma_free_rx_rings(struct edma_common_info *edma_cinfo); |
| 476 | void edma_free_queues(struct edma_common_info *edma_cinfo); |
| 477 | void edma_irq_disable(struct edma_common_info *edma_cinfo); |
| 478 | int edma_reset(struct edma_common_info *edma_cinfo); |
| 479 | int edma_poll(struct napi_struct *napi, int budget); |
| 480 | netdev_tx_t edma_xmit(struct sk_buff *skb, |
| 481 | struct net_device *netdev); |
| 482 | int edma_configure(struct edma_common_info *edma_cinfo); |
| 483 | void edma_irq_enable(struct edma_common_info *edma_cinfo); |
| 484 | void edma_enable_tx_ctrl(struct edma_hw *hw); |
| 485 | void edma_enable_rx_ctrl(struct edma_hw *hw); |
| 486 | void edma_stop_rx_tx(struct edma_hw *hw); |
| 487 | void edma_free_irqs(struct edma_adapter *adapter); |
| 488 | irqreturn_t edma_interrupt(int irq, void *dev); |
| 489 | void edma_write_reg(u16 reg_addr, u32 reg_value); |
| 490 | void edma_read_reg(u16 reg_addr, volatile u32 *reg_value); |
Bhaskar Valaboju | e429bab | 2017-03-15 09:01:23 +0530 | [diff] [blame] | 491 | struct rtnl_link_stats64 *edma_get_stats64(struct net_device *dev, |
| 492 | struct rtnl_link_stats64 *stats); |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 493 | int edma_set_mac_addr(struct net_device *netdev, void *p); |
| 494 | int edma_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, |
| 495 | u16 rxq, u32 flow_id); |
| 496 | #ifdef CONFIG_RFS_ACCEL |
| 497 | int edma_register_rfs_filter(struct net_device *netdev, |
| 498 | set_rfs_filter_callback_t set_filter); |
| 499 | #endif |
| 500 | void edma_flow_may_expire(unsigned long data); |
| 501 | void edma_set_ethtool_ops(struct net_device *netdev); |
| 502 | int edma_change_mtu(struct net_device *netdev, int new_mtu); |
| 503 | void edma_set_stp_rstp(bool tag); |
| 504 | void edma_assign_ath_hdr_type(int tag); |
| 505 | int edma_get_default_vlan_tag(struct net_device *netdev); |
| 506 | void edma_adjust_link(struct net_device *netdev); |
| 507 | int edma_fill_netdev(struct edma_common_info *edma_cinfo, int qid, int num, int txq_id); |
| 508 | u16 edma_select_xps_queue(struct net_device *dev, struct sk_buff *skb, |
| 509 | void *accel_priv, select_queue_fallback_t fallback); |
| 510 | void edma_read_append_stats(struct edma_common_info *edma_cinfo); |
| 511 | void edma_change_tx_coalesce(int usecs); |
| 512 | void edma_change_rx_coalesce(int usecs); |
| 513 | void edma_get_tx_rx_coalesce(u32 *reg_val); |
| 514 | void edma_clear_irq_status(void); |
Rakesh Nair | 888af95 | 2017-06-30 18:41:58 +0530 | [diff] [blame] | 515 | |
| 516 | int edma_dscp2ac_mapping_update(struct ctl_table *table, int write, |
| 517 | void __user *buffer, size_t *lenp, |
| 518 | loff_t *ppos); |
| 519 | int edma_per_prec_stats_enable_handler(struct ctl_table *table, int write, |
| 520 | void __user *buffer, size_t *lenp, |
| 521 | loff_t *ppos); |
| 522 | int edma_prec_stats_reset_handler(struct ctl_table *table, int write, |
| 523 | void __user *buffer, size_t *lenp, |
| 524 | loff_t *ppos); |
Rakesh Nair | 1c6a18c | 2017-08-02 21:27:06 +0530 | [diff] [blame] | 525 | int edma_iad_stats_enable_handler(struct ctl_table *table, int write, |
| 526 | void __user *buffer, size_t *lenp, |
| 527 | loff_t *ppos); |
| 528 | int edma_iad_stats_reset_handler(struct ctl_table *table, int write, |
| 529 | void __user *buffer, size_t *lenp, |
| 530 | loff_t *ppos); |
| 531 | int edma_print_flow_table_handler(struct ctl_table *table, int write, |
| 532 | void __user *buffer, size_t *lenp, |
| 533 | loff_t *ppos); |
| 534 | int edma_max_valid_ifd_usec_handler(struct ctl_table *table, int write, |
| 535 | void __user *buffer, size_t *lenp, |
| 536 | loff_t *ppos); |
| 537 | void edma_iad_process_flow(struct edma_common_info *edma_cinfo, |
| 538 | struct sk_buff *skb, u8 dir, u8 precedence); |
| 539 | |
Rakesh Nair | 9bcf260 | 2017-01-06 16:02:16 +0530 | [diff] [blame] | 540 | #endif /* _EDMA_H_ */ |