blob: 6729c4e2a83846f722c4bc932ed9d84544e1ec76 [file] [log] [blame]
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301/*
2 * Copyright (c) 2014 - 2017, The Linux Foundation. All rights reserved.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14 */
15
16#ifndef _EDMA_H_
17#define _EDMA_H_
18
19#include <linux/init.h>
20#include <linux/interrupt.h>
21#include <linux/types.h>
22#include <linux/errno.h>
23#include <linux/module.h>
24#include <linux/netdevice.h>
25#include <linux/etherdevice.h>
26#include <linux/skbuff.h>
27#include <linux/io.h>
28#include <linux/vmalloc.h>
29#include <linux/pagemap.h>
30#include <linux/smp.h>
31#include <linux/platform_device.h>
32#include <linux/of.h>
33#include <linux/of_device.h>
34#include <linux/kernel.h>
35#include <linux/device.h>
36#include <linux/sysctl.h>
37#include <linux/phy.h>
38#include <linux/of_net.h>
39#include <net/checksum.h>
40#include <net/ip6_checksum.h>
41#include <asm-generic/bug.h>
42#include <linux/version.h>
Rakesh Nair7e053532017-08-18 17:53:25 +053043#include <linux/ppp_defs.h>
44#include <linux/if_pppox.h>
Rakesh Nair9bcf2602017-01-06 16:02:16 +053045#include "ess_edma.h"
46
47#define EDMA_CPU_CORES_SUPPORTED 4
48#define EDMA_MAX_PORTID_SUPPORTED 5
49#define EDMA_MAX_VLAN_SUPPORTED EDMA_MAX_PORTID_SUPPORTED
50#define EDMA_MAX_PORTID_BITMAP_INDEX (EDMA_MAX_PORTID_SUPPORTED + 1)
51#define EDMA_MAX_PORTID_BITMAP_SUPPORTED 0x1f /* 0001_1111 = 0x1f */
52#define EDMA_MAX_NETDEV_PER_QUEUE 4 /* 3 Netdev per queue, 1 space for indexing */
53
54#define EDMA_MAX_RECEIVE_QUEUE 8
55#define EDMA_MAX_TRANSMIT_QUEUE 16
56
57/* WAN/LAN adapter number */
58#define EDMA_WAN 0
59#define EDMA_LAN 1
60
61/* VLAN tag */
62#define EDMA_LAN_DEFAULT_VLAN 1
63#define EDMA_WAN_DEFAULT_VLAN 2
64
65#define EDMA_DEFAULT_GROUP1_VLAN 2
66#define EDMA_DEFAULT_GROUP2_VLAN 1
67#define EDMA_DEFAULT_GROUP3_VLAN 3
68#define EDMA_DEFAULT_GROUP4_VLAN 4
69#define EDMA_DEFAULT_GROUP5_VLAN 5
70
71#define EDMA_DEFAULT_GROUP1_BMP 0x20
72#define EDMA_DEFAULT_GROUP2_BMP 0x1e
73
74#define EDMA_DEFAULT_DISABLE_RSS 0
75#define EDMA_RSS_DISABLE 1
76#define EDMA_RSS_ENABLE 0
77
78/* Queues exposed to linux kernel */
79#define EDMA_NETDEV_TX_QUEUE 4
80#define EDMA_NETDEV_RX_QUEUE 4
81
82/* Number of queues per core */
83#define EDMA_NUM_TXQ_PER_CORE 4
84#define EDMA_NUM_RXQ_PER_CORE 2
85
86#define EDMA_TPD_EOP_SHIFT 31
87
88#define EDMA_PORT_ID_SHIFT 12
89#define EDMA_PORT_ID_MASK 0x7
90
91/* tpd word 3 bit 18-28 */
92#define EDMA_TPD_PORT_BITMAP_SHIFT 18
93
Rakesh Nair888af952017-06-30 18:41:58 +053094/* tpd word 3 bit 29-31 */
95#define EDMA_TPD_PRIO_SHIFT 29
96
Rakesh Nair9bcf2602017-01-06 16:02:16 +053097#define EDMA_TPD_FROM_CPU_SHIFT 25
98
99#define EDMA_FROM_CPU_MASK 0x80
100#define EDMA_SKB_PRIORITY_MASK 0x38
101
102/* TX/RX descriptor ring count */
103/* should be a power of 2 */
104#define EDMA_RX_RING_SIZE 512
105#define EDMA_TX_RING_SIZE 512
106
107/* Flags used in paged/non paged mode */
108#define EDMA_RX_HEAD_BUFF_SIZE_JUMBO 256
109#define EDMA_RX_HEAD_BUFF_SIZE 1540
110
111/* MAX frame size supported by switch */
112#define EDMA_MAX_JUMBO_FRAME_SIZE 9216
113
114/* Configurations */
115#define EDMA_INTR_CLEAR_TYPE 0
116#define EDMA_INTR_SW_IDX_W_TYPE 0
117#define EDMA_FIFO_THRESH_TYPE 0
118#define EDMA_RSS_TYPE 0
119#define EDMA_RX_IMT 0x0020
120#define EDMA_TX_IMT 0x0050
121#define EDMA_TPD_BURST 5
122#define EDMA_TXF_BURST 0x100
123#define EDMA_RFD_BURST 8
124#define EDMA_RFD_THR 16
125#define EDMA_RFD_LTHR 0
126
127/* RX/TX per CPU based mask/shift */
128#define EDMA_TX_PER_CPU_MASK 0xF
129#define EDMA_RX_PER_CPU_MASK 0x3
130#define EDMA_TX_PER_CPU_MASK_SHIFT 0x2
131#define EDMA_RX_PER_CPU_MASK_SHIFT 0x1
132#define EDMA_TX_CPU_START_SHIFT 0x2
133#define EDMA_RX_CPU_START_SHIFT 0x1
134
135/* FLags used in transmit direction */
136#define EDMA_HW_CHECKSUM 0x00000001
137#define EDMA_VLAN_TX_TAG_INSERT_FLAG 0x00000002
138#define EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG 0x00000004
139
140#define EDMA_SW_DESC_FLAG_LAST 0x1
141#define EDMA_SW_DESC_FLAG_SKB_HEAD 0x2
142#define EDMA_SW_DESC_FLAG_SKB_FRAG 0x4
143#define EDMA_SW_DESC_FLAG_SKB_FRAGLIST 0x8
144#define EDMA_SW_DESC_FLAG_SKB_NONE 0x10
145#define EDMA_SW_DESC_FLAG_SKB_REUSE 0x20
146
147
148#define EDMA_MAX_SKB_FRAGS (MAX_SKB_FRAGS + 1)
149
150/* Ethtool specific list of EDMA supported features */
151#define EDMA_SUPPORTED_FEATURES (SUPPORTED_10baseT_Half \
152 | SUPPORTED_10baseT_Full \
153 | SUPPORTED_100baseT_Half \
154 | SUPPORTED_100baseT_Full \
155 | SUPPORTED_1000baseT_Full)
156
157/* Recevie side atheros Header */
158#define EDMA_RX_ATH_HDR_VERSION 0x2
159#define EDMA_RX_ATH_HDR_VERSION_SHIFT 14
160#define EDMA_RX_ATH_HDR_PRIORITY_SHIFT 11
161#define EDMA_RX_ATH_PORT_TYPE_SHIFT 6
162#define EDMA_RX_ATH_HDR_RSTP_PORT_TYPE 0x4
163
164/* Transmit side atheros Header */
165#define EDMA_TX_ATH_HDR_PORT_BITMAP_MASK 0x7F
166#define EDMA_TX_ATH_HDR_FROM_CPU_MASK 0x80
167#define EDMA_TX_ATH_HDR_FROM_CPU_SHIFT 7
168
169#define EDMA_TXQ_START_CORE0 8
170#define EDMA_TXQ_START_CORE1 12
171#define EDMA_TXQ_START_CORE2 0
172#define EDMA_TXQ_START_CORE3 4
173
174#define EDMA_TXQ_IRQ_MASK_CORE0 0x0F00
175#define EDMA_TXQ_IRQ_MASK_CORE1 0xF000
176#define EDMA_TXQ_IRQ_MASK_CORE2 0x000F
177#define EDMA_TXQ_IRQ_MASK_CORE3 0x00F0
178
179#define EDMA_ETH_HDR_LEN 12
180#define EDMA_ETH_TYPE_MASK 0xFFFF
181
182#define EDMA_RX_BUFFER_WRITE 16
183#define EDMA_RFD_AVAIL_THR 80
184
185#define EDMA_GMAC_NO_MDIO_PHY PHY_MAX_ADDR
186
Rakesh Nair888af952017-06-30 18:41:58 +0530187#define EDMA_PRECEDENCE_MAX 8
188
189#define EDMA_AC_BK 0 /* Access Category: Background */
190#define EDMA_AC_BE 1 /* Access Category: Best Effort */
191#define EDMA_AC_VI 2 /* Access Category: Video */
192#define EDMA_AC_VO 3 /* Access Category: Voice */
193#define EDMA_AC_MAX 4
194
195#define EDMA_DSCP2AC_INPUT_PARAMS_MAX 2
196
Rakesh Nair1c6a18c2017-08-02 21:27:06 +0530197#define EDMA_INGRESS_DIR 0
198#define EDMA_EGRESS_DIR 1
199#define EDMA_MAX_IAD_FLOW_STATS_SUPPORTED 8
200
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530201extern int ssdk_rfs_ipct_rule_set(__be32 ip_src, __be32 ip_dst,
202 __be16 sport, __be16 dport,
203 uint8_t proto, u16 loadbalance, bool action);
Rakesh Nair03824d52017-07-31 17:10:49 +0530204
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530205struct edma_ethtool_statistics {
Bhaskar Valabojue429bab2017-03-15 09:01:23 +0530206 u64 tx_q0_pkt;
207 u64 tx_q1_pkt;
208 u64 tx_q2_pkt;
209 u64 tx_q3_pkt;
210 u64 tx_q4_pkt;
211 u64 tx_q5_pkt;
212 u64 tx_q6_pkt;
213 u64 tx_q7_pkt;
214 u64 tx_q8_pkt;
215 u64 tx_q9_pkt;
216 u64 tx_q10_pkt;
217 u64 tx_q11_pkt;
218 u64 tx_q12_pkt;
219 u64 tx_q13_pkt;
220 u64 tx_q14_pkt;
221 u64 tx_q15_pkt;
222 u64 tx_q0_byte;
223 u64 tx_q1_byte;
224 u64 tx_q2_byte;
225 u64 tx_q3_byte;
226 u64 tx_q4_byte;
227 u64 tx_q5_byte;
228 u64 tx_q6_byte;
229 u64 tx_q7_byte;
230 u64 tx_q8_byte;
231 u64 tx_q9_byte;
232 u64 tx_q10_byte;
233 u64 tx_q11_byte;
234 u64 tx_q12_byte;
235 u64 tx_q13_byte;
236 u64 tx_q14_byte;
237 u64 tx_q15_byte;
238 u64 rx_q0_pkt;
239 u64 rx_q1_pkt;
240 u64 rx_q2_pkt;
241 u64 rx_q3_pkt;
242 u64 rx_q4_pkt;
243 u64 rx_q5_pkt;
244 u64 rx_q6_pkt;
245 u64 rx_q7_pkt;
246 u64 rx_q0_byte;
247 u64 rx_q1_byte;
248 u64 rx_q2_byte;
249 u64 rx_q3_byte;
250 u64 rx_q4_byte;
251 u64 rx_q5_byte;
252 u64 rx_q6_byte;
253 u64 rx_q7_byte;
254 u64 tx_desc_error;
Rakesh Nair03b586c2017-04-03 18:28:58 +0530255 u64 rx_alloc_fail_ctr;
Rakesh Nair888af952017-06-30 18:41:58 +0530256 u64 tx_prec[EDMA_PRECEDENCE_MAX];
257 u64 rx_prec[EDMA_PRECEDENCE_MAX];
258 u64 rx_ac[EDMA_AC_MAX];
259 u64 tx_ac[EDMA_AC_MAX];
Rakesh Nair1c6a18c2017-08-02 21:27:06 +0530260 u64 rx_flow_iad[EDMA_MAX_IAD_FLOW_STATS_SUPPORTED];
261 u64 rx_flow_delta_start_ts[EDMA_MAX_IAD_FLOW_STATS_SUPPORTED];
262 u64 tx_flow_iad[EDMA_MAX_IAD_FLOW_STATS_SUPPORTED];
263 u64 tx_flow_delta_start_ts[EDMA_MAX_IAD_FLOW_STATS_SUPPORTED];
264};
265
266/*
267 * struct edma_video_delay_stats
268 * edma video delay statistics
269 */
270struct edma_video_delay_stats
271{
272 uint64_t start_ts; /* tick value when first frame is received for a particular group */
273 uint64_t max_interframe_delay; /* maximum interframe delay */
274 uint64_t max_delay; /* ticks value in absolute terms when the max_interframe delay is observed */
275 uint64_t last_frame_ts; /* last frame tick value */
276};
277
278/*
279 * struct edma_flow_attrib
280 * per-flow table for dest-addr/dest-port info
281 */
282struct edma_flow_attrib {
283 u16 dport;
284 u16 sport;
285 u32 saddr;
286 u32 daddr;
287 u8 ip_version;
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530288};
289
290struct edma_mdio_data {
291 struct mii_bus *mii_bus;
292 void __iomem *membase;
293 int phy_irq[PHY_MAX_ADDR];
294};
295
296/* EDMA LINK state */
297enum edma_link_state {
298 __EDMA_LINKUP, /* Indicate link is UP */
299 __EDMA_LINKDOWN /* Indicate link is down */
300};
301
302/* EDMA GMAC state */
303enum edma_gmac_state {
304 __EDMA_UP /* use to indicate GMAC is up */
305};
306
307/* edma transmit descriptor */
308struct edma_tx_desc {
309 __le16 len; /* full packet including CRC */
310 __le16 svlan_tag; /* vlan tag */
311 __le32 word1; /* byte 4-7 */
312 __le32 addr; /* address of buffer */
313 __le32 word3; /* byte 12 */
314};
315
316/* edma receive return descriptor */
317struct edma_rx_return_desc {
318 u16 rrd0;
319 u16 rrd1;
320 u16 rrd2;
321 u16 rrd3;
322 u16 rrd4;
323 u16 rrd5;
324 u16 rrd6;
325 u16 rrd7;
326};
327
328/* RFD descriptor */
329struct edma_rx_free_desc {
330 __le32 buffer_addr; /* buffer address */
331};
332
333/* edma hw specific data */
334struct edma_hw {
335 u32 __iomem *hw_addr; /* inner register address */
336 struct edma_adapter *adapter; /* netdevice adapter */
337 u32 rx_intr_mask; /*rx interrupt mask */
338 u32 tx_intr_mask; /* tx interrupt nask */
339 u32 misc_intr_mask; /* misc interrupt mask */
340 u32 wol_intr_mask; /* wake on lan interrupt mask */
341 bool intr_clear_type; /* interrupt clear */
342 bool intr_sw_idx_w; /* interrupt software index */
343 u32 rx_head_buff_size; /* Rx buffer size */
344 u8 rss_type; /* rss protocol type */
345};
346
347/* edma_sw_desc stores software descriptor
348 * SW descriptor has 1:1 map with HW descriptor
349 */
350struct edma_sw_desc {
351 struct sk_buff *skb;
352 dma_addr_t dma; /* dma address */
353 u16 length; /* Tx/Rx buffer length */
354 u32 flags;
355};
356
357/* per core related information */
358struct edma_per_cpu_queues_info {
359 struct napi_struct napi; /* napi associated with the core */
360 u32 tx_mask; /* tx interrupt mask */
361 u32 rx_mask; /* rx interrupt mask */
362 u32 tx_status; /* tx interrupt status */
363 u32 rx_status; /* rx interrupt status */
364 u32 tx_start; /* tx queue start */
365 u32 rx_start; /* rx queue start */
366 struct edma_common_info *edma_cinfo; /* edma common info */
367};
368
369/* edma specific common info */
370struct edma_common_info {
371 struct edma_tx_desc_ring *tpd_ring[16]; /* 16 Tx queues */
372 struct edma_rfd_desc_ring *rfd_ring[8]; /* 8 Rx queues */
373 struct platform_device *pdev; /* device structure */
374 struct net_device *netdev[EDMA_MAX_PORTID_SUPPORTED];
375 struct net_device *portid_netdev_lookup_tbl[EDMA_MAX_PORTID_BITMAP_INDEX];
376 struct ctl_table_header *edma_ctl_table_hdr;
377 int num_gmac;
378 struct edma_ethtool_statistics edma_ethstats; /* ethtool stats */
379 u32 num_rx_queues; /* number of rx queue */
380 u32 num_tx_queues; /* number of tx queue */
381 u32 tx_irq[16]; /* number of tx irq */
382 u32 rx_irq[8]; /* number of rx irq */
383 u32 from_cpu; /* from CPU TPD field */
384 u32 num_rxq_per_core; /* Rx queues per core */
385 u32 num_txq_per_core; /* Tx queues per core */
386 u16 tx_ring_count; /* Tx ring count */
387 u16 rx_ring_count; /* Rx ring*/
388 u16 rx_head_buffer_len; /* rx buffer length */
389 u16 rx_page_buffer_len; /* rx buffer length */
390 u32 page_mode; /* Jumbo frame supported flag */
391 u32 fraglist_mode; /* fraglist supported flag */
392 struct edma_hw hw; /* edma hw specific structure */
393 struct edma_per_cpu_queues_info edma_percpu_info[CONFIG_NR_CPUS]; /* per cpu information */
394 spinlock_t stats_lock; /* protect edma stats area for updation */
Rakesh Nair03824d52017-07-31 17:10:49 +0530395 u32 num_cores;
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530396};
397
398/* transimit packet descriptor (tpd) ring */
399struct edma_tx_desc_ring {
400 struct netdev_queue *nq[EDMA_MAX_NETDEV_PER_QUEUE]; /* Linux queue index */
401 struct net_device *netdev[EDMA_MAX_NETDEV_PER_QUEUE];
402 /* Array of netdevs associated with the tpd ring */
403 void *hw_desc; /* descriptor ring virtual address */
404 struct edma_sw_desc *sw_desc; /* buffer associated with ring */
405 int netdev_bmp; /* Bitmap for per-ring netdevs */
406 u32 size; /* descriptor ring length in bytes */
407 u16 count; /* number of descriptors in the ring */
408 dma_addr_t dma; /* descriptor ring physical address */
409 u16 sw_next_to_fill; /* next Tx descriptor to fill */
410 u16 sw_next_to_clean; /* next Tx descriptor to clean */
411};
412
413/* receive free descriptor (rfd) ring */
414struct edma_rfd_desc_ring {
415 struct edma_rx_free_desc *hw_desc; /* descriptor ring virtual address */
416 struct edma_sw_desc *sw_desc; /* buffer associated with ring */
417 u16 size; /* bytes allocated to sw_desc */
418 u16 count; /* number of descriptors in the ring */
419 dma_addr_t dma; /* descriptor ring physical address */
420 u16 sw_next_to_fill; /* next descriptor to fill */
421 u16 sw_next_to_clean; /* next descriptor to clean */
Rakesh Nair03b586c2017-04-03 18:28:58 +0530422 u16 pending_fill; /* fill pending from previous iteration */
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530423};
424
425/* edma_rfs_flter_node - rfs filter node in hash table */
426struct edma_rfs_filter_node {
427 struct flow_keys keys;
428 u32 flow_id; /* flow_id of filter provided by kernel */
429 u16 filter_id; /* filter id of filter returned by adaptor */
430 u16 rq_id; /* desired rq index */
431 struct hlist_node node; /* edma rfs list node */
432};
433
434/* edma_rfs_flow_tbl - rfs flow table */
435struct edma_rfs_flow_table {
436 u16 max_num_filter; /* Maximum number of filters edma supports */
437 u16 hashtoclean; /* hash table index to clean next */
438 int filter_available; /* Number of free filters available */
439 struct hlist_head hlist_head[EDMA_RFS_FLOW_ENTRIES];
440 spinlock_t rfs_ftab_lock;
441 struct timer_list expire_rfs; /* timer function for edma_rps_may_expire_flow */
442};
443
444/* EDMA net device structure */
445struct edma_adapter {
446 struct net_device *netdev; /* netdevice */
447 struct platform_device *pdev; /* platform device */
448 struct edma_common_info *edma_cinfo; /* edma common info */
449 struct phy_device *phydev; /* Phy device */
450 struct edma_rfs_flow_table rfs; /* edma rfs flow table */
Bhaskar Valabojue429bab2017-03-15 09:01:23 +0530451 struct rtnl_link_stats64 stats; /* netdev statistics */
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530452#ifdef CONFIG_RFS_ACCEL
453 set_rfs_filter_callback_t set_rfs_rule;
454#endif
455 u32 flags;/* status flags */
456 unsigned long state_flags; /* GMAC up/down flags */
457 u32 forced_speed; /* link force speed */
458 u32 forced_duplex; /* link force duplex */
459 u32 link_state; /* phy link state */
460 u32 phy_mdio_addr; /* PHY device address on MII interface */
461 u32 poll_required; /* check if link polling is required */
462 u32 poll_required_dynamic; /* dynamic polling flag */
463 u32 tx_start_offset[CONFIG_NR_CPUS]; /* tx queue start */
464 u32 default_vlan_tag; /* vlan tag */
465 u32 dp_bitmap;
466 uint8_t phy_id[MII_BUS_ID_SIZE + 3];
Rakesh Naired29f6b2017-04-04 15:48:08 +0530467 struct mutex poll_mutex; /* Lock to protect polling flag change */
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530468};
469
Rakesh Nair7e053532017-08-18 17:53:25 +0530470/* PPPoE header info */
471struct pppoeh_proto {
472 struct pppoe_hdr hdr;
473 __be16 proto;
474};
475
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530476int edma_alloc_queues_tx(struct edma_common_info *edma_cinfo);
477int edma_alloc_queues_rx(struct edma_common_info *edma_cinfo);
478int edma_open(struct net_device *netdev);
479int edma_close(struct net_device *netdev);
480void edma_free_tx_resources(struct edma_common_info *edma_c_info);
481void edma_free_rx_resources(struct edma_common_info *edma_c_info);
482int edma_alloc_tx_rings(struct edma_common_info *edma_cinfo);
483int edma_alloc_rx_rings(struct edma_common_info *edma_cinfo);
484void edma_free_tx_rings(struct edma_common_info *edma_cinfo);
485void edma_free_rx_rings(struct edma_common_info *edma_cinfo);
486void edma_free_queues(struct edma_common_info *edma_cinfo);
487void edma_irq_disable(struct edma_common_info *edma_cinfo);
488int edma_reset(struct edma_common_info *edma_cinfo);
489int edma_poll(struct napi_struct *napi, int budget);
490netdev_tx_t edma_xmit(struct sk_buff *skb,
491 struct net_device *netdev);
492int edma_configure(struct edma_common_info *edma_cinfo);
493void edma_irq_enable(struct edma_common_info *edma_cinfo);
494void edma_enable_tx_ctrl(struct edma_hw *hw);
495void edma_enable_rx_ctrl(struct edma_hw *hw);
496void edma_stop_rx_tx(struct edma_hw *hw);
497void edma_free_irqs(struct edma_adapter *adapter);
498irqreturn_t edma_interrupt(int irq, void *dev);
499void edma_write_reg(u16 reg_addr, u32 reg_value);
500void edma_read_reg(u16 reg_addr, volatile u32 *reg_value);
Bhaskar Valabojue429bab2017-03-15 09:01:23 +0530501struct rtnl_link_stats64 *edma_get_stats64(struct net_device *dev,
502 struct rtnl_link_stats64 *stats);
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530503int edma_set_mac_addr(struct net_device *netdev, void *p);
504int edma_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
505 u16 rxq, u32 flow_id);
506#ifdef CONFIG_RFS_ACCEL
507int edma_register_rfs_filter(struct net_device *netdev,
508 set_rfs_filter_callback_t set_filter);
509#endif
510void edma_flow_may_expire(unsigned long data);
511void edma_set_ethtool_ops(struct net_device *netdev);
512int edma_change_mtu(struct net_device *netdev, int new_mtu);
513void edma_set_stp_rstp(bool tag);
514void edma_assign_ath_hdr_type(int tag);
515int edma_get_default_vlan_tag(struct net_device *netdev);
516void edma_adjust_link(struct net_device *netdev);
517int edma_fill_netdev(struct edma_common_info *edma_cinfo, int qid, int num, int txq_id);
518u16 edma_select_xps_queue(struct net_device *dev, struct sk_buff *skb,
519 void *accel_priv, select_queue_fallback_t fallback);
520void edma_read_append_stats(struct edma_common_info *edma_cinfo);
521void edma_change_tx_coalesce(int usecs);
522void edma_change_rx_coalesce(int usecs);
523void edma_get_tx_rx_coalesce(u32 *reg_val);
524void edma_clear_irq_status(void);
Rakesh Nair888af952017-06-30 18:41:58 +0530525
526int edma_dscp2ac_mapping_update(struct ctl_table *table, int write,
527 void __user *buffer, size_t *lenp,
528 loff_t *ppos);
529int edma_per_prec_stats_enable_handler(struct ctl_table *table, int write,
530 void __user *buffer, size_t *lenp,
531 loff_t *ppos);
532int edma_prec_stats_reset_handler(struct ctl_table *table, int write,
533 void __user *buffer, size_t *lenp,
534 loff_t *ppos);
Rakesh Nair1c6a18c2017-08-02 21:27:06 +0530535int edma_iad_stats_enable_handler(struct ctl_table *table, int write,
536 void __user *buffer, size_t *lenp,
537 loff_t *ppos);
538int edma_iad_stats_reset_handler(struct ctl_table *table, int write,
539 void __user *buffer, size_t *lenp,
540 loff_t *ppos);
541int edma_print_flow_table_handler(struct ctl_table *table, int write,
542 void __user *buffer, size_t *lenp,
543 loff_t *ppos);
544int edma_max_valid_ifd_usec_handler(struct ctl_table *table, int write,
545 void __user *buffer, size_t *lenp,
546 loff_t *ppos);
547void edma_iad_process_flow(struct edma_common_info *edma_cinfo,
548 struct sk_buff *skb, u8 dir, u8 precedence);
549
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530550#endif /* _EDMA_H_ */