blob: 492d27eef827692f1cb8bdf3b666a477833625ef [file] [log] [blame]
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301/*
2 * Copyright (c) 2014 - 2017, The Linux Foundation. All rights reserved.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14 */
15
16#include <linux/cpu_rmap.h>
17#include <linux/of_net.h>
18#include <linux/timer.h>
19#include <linux/bitmap.h>
20#include "edma.h"
21#include "ess_edma.h"
22
23/* Weight round robin and virtual QID mask */
24#define EDMA_WRR_VID_SCTL_MASK 0xffff
25
26/* Weight round robin and virtual QID shift */
27#define EDMA_WRR_VID_SCTL_SHIFT 16
28
Rakesh Nairc9fc4cf2017-09-11 19:43:23 +053029static const u32 edma_idt_tbl[EDMA_CPU_CORES_SUPPORTED][EDMA_NUM_IDT] = {
Rakesh Nair03824d52017-07-31 17:10:49 +053030
31 /* For 1 core */
32 {
Rakesh Nairc9fc4cf2017-09-11 19:43:23 +053033 0x0, 0x0, 0x0, 0x0,
34 0x0, 0x0, 0x0, 0x0,
35 0x0, 0x0, 0x0, 0x0,
36 0x0, 0x0, 0x0, 0x0
Rakesh Nair03824d52017-07-31 17:10:49 +053037 },
38
39 /* For 2 cores */
40 {
41 0x20202020, 0x20202020, 0x20202020, 0x20202020,
42 0x20202020, 0x20202020, 0x20202020, 0x20202020,
43 0x20202020, 0x20202020, 0x20202020, 0x20202020,
44 0x20202020, 0x20202020, 0x20202020, 0x20202020
45 },
46
47 /* For 3 cores */
48 {
49 0x20420420, 0x04204204, 0x42042042, 0x20420420,
50 0x04204204, 0x42042042, 0x20420420, 0x04204204,
51 0x42042042, 0x20420420, 0x04204204, 0x42042042,
52 0x20420420, 0x04204204, 0x42042042, 0x20420420
53 },
54
55 /* For 4 cores */
56 {
57 0x64206420, 0x64206420, 0x64206420, 0x64206420,
58 0x64206420, 0x64206420, 0x64206420, 0x64206420,
59 0x64206420, 0x64206420, 0x64206420, 0x64206420,
60 0x64206420, 0x64206420, 0x64206420, 0x64206420
61 }
62};
63
Rakesh Nair9bcf2602017-01-06 16:02:16 +053064char edma_axi_driver_name[] = "ess_edma";
65static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
66 NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP;
67
68static u32 edma_hw_addr;
69
70struct timer_list edma_stats_timer;
71static struct mii_bus *miibus_gb;
72
73char edma_tx_irq[16][64];
74char edma_rx_irq[8][64];
75struct net_device *edma_netdev[EDMA_MAX_PORTID_SUPPORTED];
Rakesh Nair888af952017-06-30 18:41:58 +053076
77extern u8 edma_dscp2ac_tbl[EDMA_PRECEDENCE_MAX];
Rakesh Nair1c6a18c2017-08-02 21:27:06 +053078extern u32 edma_per_prec_stats_enable;
79extern u32 edma_prec_stats_reset;
80extern u32 edma_iad_stats_enable;
81extern u32 edma_iad_stats_reset;
82extern u32 edma_max_valid_ifd_usec;
83static u32 edma_print_flow_table __read_mostly = 0;
84extern struct edma_flow_attrib edma_flow_tbl[EDMA_MAX_IAD_FLOW_STATS_SUPPORTED];
Rakesh Nair888af952017-06-30 18:41:58 +053085
Rakesh Nair9bcf2602017-01-06 16:02:16 +053086static struct phy_device *edma_phydev[EDMA_MAX_PORTID_SUPPORTED];
87static int edma_link_detect_bmp;
88static int phy_dev_state[EDMA_MAX_PORTID_SUPPORTED];
89static u16 tx_start[4] = {EDMA_TXQ_START_CORE0, EDMA_TXQ_START_CORE1,
90 EDMA_TXQ_START_CORE2, EDMA_TXQ_START_CORE3};
91static u32 tx_mask[4] = {EDMA_TXQ_IRQ_MASK_CORE0, EDMA_TXQ_IRQ_MASK_CORE1,
92 EDMA_TXQ_IRQ_MASK_CORE2, EDMA_TXQ_IRQ_MASK_CORE3};
93
94static u32 edma_default_ltag __read_mostly = EDMA_LAN_DEFAULT_VLAN;
95static u32 edma_default_wtag __read_mostly = EDMA_WAN_DEFAULT_VLAN;
96static u32 edma_default_group1_vtag __read_mostly = EDMA_DEFAULT_GROUP1_VLAN;
97static u32 edma_default_group2_vtag __read_mostly = EDMA_DEFAULT_GROUP2_VLAN;
98static u32 edma_default_group3_vtag __read_mostly = EDMA_DEFAULT_GROUP3_VLAN;
99static u32 edma_default_group4_vtag __read_mostly = EDMA_DEFAULT_GROUP4_VLAN;
100static u32 edma_default_group5_vtag __read_mostly = EDMA_DEFAULT_GROUP5_VLAN;
101
102static u32 edma_default_group1_bmp __read_mostly = EDMA_DEFAULT_GROUP1_BMP;
103static u32 edma_default_group2_bmp __read_mostly = EDMA_DEFAULT_GROUP2_BMP;
104static u32 edma_disable_rss __read_mostly = EDMA_DEFAULT_DISABLE_RSS;
105
Rakesh Naird4a11502017-11-07 17:02:11 +0530106u32 edma_disable_queue_stop __read_mostly = EDMA_DEFAULT_DISABLE_QUEUE_STOP;
107
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530108static int edma_weight_assigned_to_q __read_mostly;
109static int edma_queue_to_virtual_q __read_mostly;
110static bool edma_enable_rstp __read_mostly;
111static int edma_athr_hdr_eth_type __read_mostly;
112
113static int page_mode;
114module_param(page_mode, int, 0);
115MODULE_PARM_DESC(page_mode, "enable page mode");
116
117static int overwrite_mode;
118module_param(overwrite_mode, int, 0);
119MODULE_PARM_DESC(overwrite_mode, "overwrite default page_mode setting");
120
121static int jumbo_mru = EDMA_RX_HEAD_BUFF_SIZE;
122module_param(jumbo_mru, int, 0);
123MODULE_PARM_DESC(jumbo_mru, "enable fraglist support");
124
125static int num_rxq = 4;
126module_param(num_rxq, int, 0);
127MODULE_PARM_DESC(num_rxq, "change the number of rx queues");
128
129void edma_write_reg(u16 reg_addr, u32 reg_value)
130{
131 writel(reg_value, ((void __iomem *)(edma_hw_addr + reg_addr)));
132}
133
134void edma_read_reg(u16 reg_addr, volatile u32 *reg_value)
135{
136 *reg_value = readl((void __iomem *)(edma_hw_addr + reg_addr));
137}
138
139/* edma_change_tx_coalesce()
140 * change tx interrupt moderation timer
141 */
142void edma_change_tx_coalesce(int usecs)
143{
144 u32 reg_value;
145
146 /* Here, we right shift the value from the user by 1, this is
147 * done because IMT resolution timer is 2usecs. 1 count
148 * of this register corresponds to 2 usecs.
149 */
150 edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, &reg_value);
151 reg_value = ((reg_value & 0xffff) | ((usecs >> 1) << 16));
152 edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, reg_value);
153}
154
155/* edma_change_rx_coalesce()
156 * change rx interrupt moderation timer
157 */
158void edma_change_rx_coalesce(int usecs)
159{
160 u32 reg_value;
161
162 /* Here, we right shift the value from the user by 1, this is
163 * done because IMT resolution timer is 2usecs. 1 count
164 * of this register corresponds to 2 usecs.
165 */
166 edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, &reg_value);
167 reg_value = ((reg_value & 0xffff0000) | (usecs >> 1));
168 edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, reg_value);
169}
170
171/* edma_get_tx_rx_coalesce()
172 * Get tx/rx interrupt moderation value
173 */
174void edma_get_tx_rx_coalesce(u32 *reg_val)
175{
176 edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, reg_val);
177}
178
179void edma_read_append_stats(struct edma_common_info *edma_cinfo)
180{
Bhaskar Valabojue429bab2017-03-15 09:01:23 +0530181 u64 *p;
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530182 int i;
183 u32 stat;
184
185 spin_lock(&edma_cinfo->stats_lock);
Bhaskar Valabojue429bab2017-03-15 09:01:23 +0530186 p = (u64 *)&(edma_cinfo->edma_ethstats);
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530187
188 for (i = 0; i < EDMA_MAX_TRANSMIT_QUEUE; i++) {
189 edma_read_reg(EDMA_REG_TX_STAT_PKT_Q(i), &stat);
190 *p += stat;
191 p++;
192 }
193
194 for (i = 0; i < EDMA_MAX_TRANSMIT_QUEUE; i++) {
195 edma_read_reg(EDMA_REG_TX_STAT_BYTE_Q(i), &stat);
196 *p += stat;
197 p++;
198 }
199
200 for (i = 0; i < EDMA_MAX_RECEIVE_QUEUE; i++) {
201 edma_read_reg(EDMA_REG_RX_STAT_PKT_Q(i), &stat);
202 *p += stat;
203 p++;
204 }
205
206 for (i = 0; i < EDMA_MAX_RECEIVE_QUEUE; i++) {
207 edma_read_reg(EDMA_REG_RX_STAT_BYTE_Q(i), &stat);
208 *p += stat;
209 p++;
210 }
211
212 spin_unlock(&edma_cinfo->stats_lock);
213}
214
215static void edma_statistics_timer(unsigned long data)
216{
217 struct edma_common_info *edma_cinfo = (struct edma_common_info *)data;
218
219 edma_read_append_stats(edma_cinfo);
220
221 mod_timer(&edma_stats_timer, jiffies + 1*HZ);
222}
223
224static int edma_enable_stp_rstp(struct ctl_table *table, int write,
225 void __user *buffer, size_t *lenp,
226 loff_t *ppos)
227{
228 int ret;
229
230 ret = proc_dointvec(table, write, buffer, lenp, ppos);
231 if (write)
232 edma_set_stp_rstp(edma_enable_rstp);
233
234 return ret;
235}
236
237static int edma_ath_hdr_eth_type(struct ctl_table *table, int write,
238 void __user *buffer, size_t *lenp,
239 loff_t *ppos)
240{
241 int ret;
242
243 ret = proc_dointvec(table, write, buffer, lenp, ppos);
244 if (write)
245 edma_assign_ath_hdr_type(edma_athr_hdr_eth_type);
246
247 return ret;
248}
249
250static int edma_get_port_from_phydev(struct phy_device *phydev)
251{
252 int i;
253
254 for (i = 0; i < EDMA_MAX_PORTID_SUPPORTED; i++) {
255 if (phydev == edma_phydev[i])
256 return i;
257 }
258
259 pr_err("Invalid PHY devive\n");
260 return -1;
261}
262
263static int edma_is_port_used(int portid)
264{
265 int used_portid_bmp;
266 used_portid_bmp = edma_link_detect_bmp >> 1;
267
268 while (used_portid_bmp) {
269 int port_bit_set = ffs(used_portid_bmp);
270 if (port_bit_set == portid)
271 return 1;
272 used_portid_bmp &= ~(1 << (port_bit_set - 1));
273 }
274
275 return 0;
276}
277
278static int edma_change_default_lan_vlan(struct ctl_table *table, int write,
279 void __user *buffer, size_t *lenp,
280 loff_t *ppos)
281{
282 struct edma_adapter *adapter;
283 int ret;
284
285 if (!edma_netdev[1]) {
286 pr_err("Netdevice for default_lan does not exist\n");
287 return -1;
288 }
289
290 adapter = netdev_priv(edma_netdev[1]);
291
292 ret = proc_dointvec(table, write, buffer, lenp, ppos);
293
294 if (write)
295 adapter->default_vlan_tag = edma_default_ltag;
296
297 return ret;
298}
299
300static int edma_change_default_wan_vlan(struct ctl_table *table, int write,
301 void __user *buffer, size_t *lenp,
302 loff_t *ppos)
303{
304 struct edma_adapter *adapter;
305 int ret;
306
307 if (!edma_netdev[0]) {
308 pr_err("Netdevice for default_wan does not exist\n");
309 return -1;
310 }
311
312 adapter = netdev_priv(edma_netdev[0]);
313
314 ret = proc_dointvec(table, write, buffer, lenp, ppos);
315
316 if (write)
317 adapter->default_vlan_tag = edma_default_wtag;
318
319 return ret;
320}
321
322static int edma_change_group1_vtag(struct ctl_table *table, int write,
323 void __user *buffer, size_t *lenp,
324 loff_t *ppos)
325{
326 struct edma_adapter *adapter;
327 struct edma_common_info *edma_cinfo;
328 int ret;
329
330 if (!edma_netdev[0]) {
331 pr_err("Netdevice for Group 1 does not exist\n");
332 return -1;
333 }
334
335 adapter = netdev_priv(edma_netdev[0]);
336 edma_cinfo = adapter->edma_cinfo;
337
338 ret = proc_dointvec(table, write, buffer, lenp, ppos);
339
340 if (write)
341 adapter->default_vlan_tag = edma_default_group1_vtag;
342
343 return ret;
344}
345
346static int edma_change_group2_vtag(struct ctl_table *table, int write,
347 void __user *buffer, size_t *lenp,
348 loff_t *ppos)
349{
350 struct edma_adapter *adapter;
351 struct edma_common_info *edma_cinfo;
352 int ret;
353
354 if (!edma_netdev[1]) {
355 pr_err("Netdevice for Group 2 does not exist\n");
356 return -1;
357 }
358
359 adapter = netdev_priv(edma_netdev[1]);
360 edma_cinfo = adapter->edma_cinfo;
361
362 ret = proc_dointvec(table, write, buffer, lenp, ppos);
363
364 if (write)
365 adapter->default_vlan_tag = edma_default_group2_vtag;
366
367 return ret;
368}
369
370static int edma_change_group3_vtag(struct ctl_table *table, int write,
371 void __user *buffer, size_t *lenp,
372 loff_t *ppos)
373{
374 struct edma_adapter *adapter;
375 struct edma_common_info *edma_cinfo;
376 int ret;
377
378 if (!edma_netdev[2]) {
379 pr_err("Netdevice for Group 3 does not exist\n");
380 return -1;
381 }
382
383 adapter = netdev_priv(edma_netdev[2]);
384 edma_cinfo = adapter->edma_cinfo;
385
386 ret = proc_dointvec(table, write, buffer, lenp, ppos);
387
388 if (write)
389 adapter->default_vlan_tag = edma_default_group3_vtag;
390
391 return ret;
392}
393
394static int edma_change_group4_vtag(struct ctl_table *table, int write,
395 void __user *buffer, size_t *lenp,
396 loff_t *ppos)
397{
398 struct edma_adapter *adapter;
399 struct edma_common_info *edma_cinfo;
400 int ret;
401
402 if (!edma_netdev[3]) {
403 pr_err("Netdevice for Group 4 does not exist\n");
404 return -1;
405 }
406
407 adapter = netdev_priv(edma_netdev[3]);
408 edma_cinfo = adapter->edma_cinfo;
409
410 ret = proc_dointvec(table, write, buffer, lenp, ppos);
411
412 if (write)
413 adapter->default_vlan_tag = edma_default_group4_vtag;
414
415 return ret;
416}
417
418static int edma_change_group5_vtag(struct ctl_table *table, int write,
419 void __user *buffer, size_t *lenp,
420 loff_t *ppos)
421{
422 struct edma_adapter *adapter;
423 struct edma_common_info *edma_cinfo;
424 int ret;
425
426 if (!edma_netdev[4]) {
427 pr_err("Netdevice for Group 5 does not exist\n");
428 return -1;
429 }
430
431 adapter = netdev_priv(edma_netdev[4]);
432 edma_cinfo = adapter->edma_cinfo;
433
434 ret = proc_dointvec(table, write, buffer, lenp, ppos);
435
436 if (write)
437 adapter->default_vlan_tag = edma_default_group5_vtag;
438
439 return ret;
440}
441
442static int edma_change_group1_bmp(struct ctl_table *table, int write,
443 void __user *buffer, size_t *lenp, loff_t *ppos)
444{
445 struct edma_adapter *adapter;
446 struct edma_common_info *edma_cinfo;
447 struct net_device *ndev;
448 struct phy_device *phydev;
449 int ret, num_ports_enabled;
450 u32 portid_bmp, port_bit, prev_bmp, port_id;
451
452 ndev = edma_netdev[0];
453 if (!ndev) {
454 pr_err("Netdevice for Group 0 does not exist\n");
455 return -1;
456 }
457
458 prev_bmp = edma_default_group1_bmp;
459
460 ret = proc_dointvec(table, write, buffer, lenp, ppos);
461 if ((!write) || (prev_bmp == edma_default_group1_bmp))
462 return ret;
463
464 adapter = netdev_priv(ndev);
465 edma_cinfo = adapter->edma_cinfo;
466
467 /* We ignore the bit for CPU Port */
468 portid_bmp = edma_default_group1_bmp >> 1;
469 port_bit = ffs(portid_bmp);
470 if (port_bit > EDMA_MAX_PORTID_SUPPORTED)
471 return -1;
472
473 /* If this group has no ports,
474 * we disable polling for the adapter, stop the queues and return
475 */
476 if (!port_bit) {
477 adapter->dp_bitmap = edma_default_group1_bmp;
478 if (adapter->poll_required) {
Rakesh Naired29f6b2017-04-04 15:48:08 +0530479 mutex_lock(&adapter->poll_mutex);
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530480 adapter->poll_required = 0;
Rakesh Naired29f6b2017-04-04 15:48:08 +0530481 mutex_unlock(&adapter->poll_mutex);
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530482 adapter->link_state = __EDMA_LINKDOWN;
483 netif_carrier_off(ndev);
484 netif_tx_stop_all_queues(ndev);
485 }
486 return 0;
487 }
488
489 /* Our array indexes are for 5 ports (0 - 4) */
490 port_bit--;
491 edma_link_detect_bmp = 0;
492
493 /* Do we have more ports in this group */
494 num_ports_enabled = bitmap_weight((long unsigned int *)&portid_bmp, 32);
495
496 /* If this group has more then one port,
497 * we disable polling for the adapter as link detection
498 * should be disabled, stop the phy state machine of previous
499 * phy adapter attached to group and start the queues
500 */
501 if (num_ports_enabled > 1) {
Rakesh Naired29f6b2017-04-04 15:48:08 +0530502 mutex_lock(&adapter->poll_mutex);
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530503 if (adapter->poll_required) {
504 adapter->poll_required = 0;
505 if (adapter->phydev) {
506 port_id = edma_get_port_from_phydev(
507 adapter->phydev);
508
509 /* We check if phydev attached to this group is
510 * already started and if yes, we stop
511 * the state machine for the phy
512 */
513 if (phy_dev_state[port_id]) {
514 phy_stop_machine(adapter->phydev);
515 phy_dev_state[port_id] = 0;
516 }
517
518 adapter->phydev = NULL;
519 }
520
521 /* Start the tx queues for this netdev
522 * with link detection disabled
523 */
524 if (adapter->link_state == __EDMA_LINKDOWN) {
525 adapter->link_state = __EDMA_LINKUP;
526 netif_tx_start_all_queues(ndev);
527 netif_carrier_on(ndev);
528 }
529 }
Rakesh Naired29f6b2017-04-04 15:48:08 +0530530 mutex_unlock(&adapter->poll_mutex);
531
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530532 goto set_bitmap;
533 }
534
Rakesh Naired29f6b2017-04-04 15:48:08 +0530535 mutex_lock(&adapter->poll_mutex);
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530536 adapter->poll_required = adapter->poll_required_dynamic;
Rakesh Naired29f6b2017-04-04 15:48:08 +0530537 mutex_unlock(&adapter->poll_mutex);
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530538
539 if (!adapter->poll_required)
540 goto set_bitmap;
541
542 phydev = adapter->phydev;
543
544 /* If this group has only one port,
545 * if phydev exists we start the phy state machine
546 * and if it doesn't we create a phydev and start it.
547 */
548 if (edma_phydev[port_bit]) {
549 adapter->phydev = edma_phydev[port_bit];
550 set_bit(port_bit, (long unsigned int*)&edma_link_detect_bmp);
551
552 /* If the Phy device has changed group,
553 * we need to reassign the netdev
554 */
555 if (adapter->phydev->attached_dev != ndev)
556 adapter->phydev->attached_dev = ndev;
557
558 if (!phy_dev_state[port_bit]) {
559 phy_start_machine(adapter->phydev);
560 phy_dev_state[port_bit] = 1;
561 }
562 } else {
563 snprintf(adapter->phy_id,
564 MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
565 miibus_gb->id,
566 port_bit);
567
568 adapter->phydev = phy_connect(ndev,
569 (const char *)adapter->phy_id,
570 &edma_adjust_link,
571 PHY_INTERFACE_MODE_SGMII);
572
573 if (IS_ERR(adapter->phydev)) {
574 adapter->phydev = phydev;
575 pr_err("PHY attach FAIL for port %d", port_bit);
576 return -1;
577 }
578
579 if (adapter->phydev->attached_dev != ndev)
580 adapter->phydev->attached_dev = ndev;
581
582 edma_phydev[port_bit] = adapter->phydev;
583 phy_dev_state[port_bit] = 1;
584 set_bit(port_bit, (long unsigned int *)&edma_link_detect_bmp);
585 adapter->phydev->advertising |=
586 (ADVERTISED_Pause |
587 ADVERTISED_Asym_Pause);
588 adapter->phydev->supported |=
589 (SUPPORTED_Pause |
590 SUPPORTED_Asym_Pause);
591 phy_start(adapter->phydev);
592 phy_start_aneg(adapter->phydev);
593 }
594
595 /* We check if this phydev is in use by other Groups
596 * and stop phy machine only if it is not stopped
597 */
598 if (phydev) {
599 port_id = edma_get_port_from_phydev(phydev);
600 if (phy_dev_state[port_id]) {
601 phy_stop_machine(phydev);
602 phy_dev_state[port_id] = 0;
603 }
604 }
605
Rakesh Naired29f6b2017-04-04 15:48:08 +0530606 mutex_lock(&adapter->poll_mutex);
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530607 adapter->poll_required = 1;
Rakesh Naired29f6b2017-04-04 15:48:08 +0530608 mutex_unlock(&adapter->poll_mutex);
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530609 adapter->link_state = __EDMA_LINKDOWN;
610
611set_bitmap:
612 while (portid_bmp) {
613 int port_bit_set = ffs(portid_bmp);
614 edma_cinfo->portid_netdev_lookup_tbl[port_bit_set] = ndev;
615 portid_bmp &= ~(1 << (port_bit_set - 1));
616 }
617
618 adapter->dp_bitmap = edma_default_group1_bmp;
619
620 return 0;
621}
622
623static int edma_change_group2_bmp(struct ctl_table *table, int write,
624 void __user *buffer, size_t *lenp, loff_t *ppos)
625{
626 struct edma_adapter *adapter;
627 struct edma_common_info *edma_cinfo;
628 struct net_device *ndev;
629 struct phy_device *phydev;
630 int ret;
631 u32 prev_bmp, portid_bmp, port_bit, num_ports_enabled, port_id;
632
633 ndev = edma_netdev[1];
634 if (!ndev) {
635 pr_err("Netdevice for Group 1 does not exist\n");
636 return -1;
637 }
638
639 prev_bmp = edma_default_group2_bmp;
640
641 ret = proc_dointvec(table, write, buffer, lenp, ppos);
642 if ((!write) || (prev_bmp == edma_default_group2_bmp))
643 return ret;
644
645 adapter = netdev_priv(ndev);
646 edma_cinfo = adapter->edma_cinfo;
647
648 /* We ignore the bit for CPU Port */
649 portid_bmp = edma_default_group2_bmp >> 1;
650 port_bit = ffs(portid_bmp);
651 if (port_bit > EDMA_MAX_PORTID_SUPPORTED)
652 return -1;
653
654 /* If this group has no ports,
655 * we disable polling for the adapter, stop the queues and return
656 */
657 if (!port_bit) {
658 adapter->dp_bitmap = edma_default_group2_bmp;
659 if (adapter->poll_required) {
Rakesh Naired29f6b2017-04-04 15:48:08 +0530660 mutex_lock(&adapter->poll_mutex);
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530661 adapter->poll_required = 0;
Rakesh Naired29f6b2017-04-04 15:48:08 +0530662 mutex_unlock(&adapter->poll_mutex);
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530663 adapter->link_state = __EDMA_LINKDOWN;
664 netif_carrier_off(ndev);
665 netif_tx_stop_all_queues(ndev);
666 }
667 return 0;
668 }
669
670 /* Our array indexes are for 5 ports (0 - 4) */
671 port_bit--;
672
673 /* Do we have more ports in this group */
674 num_ports_enabled = bitmap_weight((long unsigned int *)&portid_bmp, 32);
675
676 /* If this group has more then one port,
677 * we disable polling for the adapter as link detection
678 * should be disabled, stop the phy state machine of previous
679 * phy adapter attached to group and start the queues
680 */
681 if (num_ports_enabled > 1) {
Rakesh Naired29f6b2017-04-04 15:48:08 +0530682 mutex_lock(&adapter->poll_mutex);
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530683 if (adapter->poll_required) {
684 adapter->poll_required = 0;
685 if (adapter->phydev) {
686 port_id = edma_get_port_from_phydev(
687 adapter->phydev);
688
689 /* We check if this phydev is in use by
690 * other Groups and stop phy machine only
691 * if that is NOT the case
692 */
693 if (!edma_is_port_used(port_id)) {
694 if (phy_dev_state[port_id]) {
695 phy_stop_machine(
696 adapter->phydev);
697 phy_dev_state[port_id] = 0;
698 }
699 }
700
701 adapter->phydev = NULL;
702 }
703
704 /* Start the tx queues for this netdev
705 * with link detection disabled
706 */
707 if (adapter->link_state == __EDMA_LINKDOWN) {
708 adapter->link_state = __EDMA_LINKUP;
709 netif_carrier_on(ndev);
710 netif_tx_start_all_queues(ndev);
711 }
712 }
Rakesh Naired29f6b2017-04-04 15:48:08 +0530713 mutex_unlock(&adapter->poll_mutex);
714
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530715 goto set_bitmap;
716 }
717
Rakesh Naired29f6b2017-04-04 15:48:08 +0530718 mutex_lock(&adapter->poll_mutex);
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530719 adapter->poll_required = adapter->poll_required_dynamic;
Rakesh Naired29f6b2017-04-04 15:48:08 +0530720 mutex_unlock(&adapter->poll_mutex);
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530721
722 if (!adapter->poll_required)
723 goto set_bitmap;
724
725 phydev = adapter->phydev;
726
727 /* If this group has only one port,
728 * if phydev exists we start the phy state machine
729 * and if it doesn't we create a phydev and start it.
730 */
731 if (edma_phydev[port_bit]) {
732 adapter->phydev = edma_phydev[port_bit];
733
734 /* If the Phy device has changed group,
735 * we need to reassign the netdev
736 */
737 if (adapter->phydev->attached_dev != ndev)
738 adapter->phydev->attached_dev = ndev;
739
740 if (!phy_dev_state[port_bit]) {
741 phy_start_machine(adapter->phydev);
742 phy_dev_state[port_bit] = 1;
743 set_bit(port_bit,
744 (long unsigned int *)&edma_link_detect_bmp);
745 }
746 } else {
747 snprintf(adapter->phy_id,
748 MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
749 miibus_gb->id,
750 port_bit);
751
752 adapter->phydev = phy_connect(ndev,
753 (const char *)adapter->phy_id,
754 &edma_adjust_link,
755 PHY_INTERFACE_MODE_SGMII);
756
757 if (IS_ERR(adapter->phydev)) {
758 adapter->phydev = phydev;
759 pr_err("PHY attach FAIL for port %d", port_bit);
760 return -1;
761 }
762
763 if (adapter->phydev->attached_dev != ndev)
764 adapter->phydev->attached_dev = ndev;
765
766 edma_phydev[port_bit] = adapter->phydev;
767 phy_dev_state[port_bit] = 1;
768 set_bit(port_bit, (long unsigned int *)&edma_link_detect_bmp);
769 adapter->phydev->advertising |=
770 (ADVERTISED_Pause |
771 ADVERTISED_Asym_Pause);
772 adapter->phydev->supported |=
773 (SUPPORTED_Pause |
774 SUPPORTED_Asym_Pause);
775 phy_start(adapter->phydev);
776 phy_start_aneg(adapter->phydev);
777 }
778
779 /* We check if this phydev is in use by other Groups
780 * and stop phy machine only if that is NOT the case
781 */
782 if (phydev) {
783 port_id = edma_get_port_from_phydev(phydev);
784 if (!edma_is_port_used(port_id)) {
785 if (phy_dev_state[port_id]) {
786 phy_stop_machine(phydev);
787 phy_dev_state[port_id] = 0;
788 }
789 }
790 }
791
Rakesh Naired29f6b2017-04-04 15:48:08 +0530792 mutex_lock(&adapter->poll_mutex);
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530793 adapter->poll_required = 1;
Rakesh Naired29f6b2017-04-04 15:48:08 +0530794 mutex_unlock(&adapter->poll_mutex);
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530795 adapter->link_state = __EDMA_LINKDOWN;
796
797set_bitmap:
798 while (portid_bmp) {
799 int port_bit_set = ffs(portid_bmp);
800 edma_cinfo->portid_netdev_lookup_tbl[port_bit_set] = ndev;
801 portid_bmp &= ~(1 << (port_bit_set - 1));
802 }
803
804 adapter->dp_bitmap = edma_default_group2_bmp;
805
806 return 0;
807}
808
809static int edma_disable_rss_func(struct ctl_table *table, int write,
810 void __user *buffer, size_t *lenp,
811 loff_t *ppos)
812{
813 struct edma_adapter *adapter;
814 struct edma_common_info *edma_cinfo;
815 struct edma_hw *hw;
816 int ret;
817
818 if (!edma_netdev[0]) {
819 pr_err("Invalid Netdevice\n");
820 return -1;
821 }
822
823 adapter = netdev_priv(edma_netdev[0]);
824 edma_cinfo = adapter->edma_cinfo;
825 hw = &edma_cinfo->hw;
826
827 ret = proc_dointvec(table, write, buffer, lenp, ppos);
828
829 if ((!write) || (ret))
830 return ret;
831
832 switch (edma_disable_rss) {
833 case EDMA_RSS_DISABLE:
834 hw->rss_type = 0;
835 edma_write_reg(EDMA_REG_RSS_TYPE, hw->rss_type);
836 break;
837 case EDMA_RSS_ENABLE:
838 hw->rss_type = EDMA_RSS_TYPE_IPV4TCP |
839 EDMA_RSS_TYPE_IPV6_TCP |
840 EDMA_RSS_TYPE_IPV4_UDP |
841 EDMA_RSS_TYPE_IPV6UDP |
842 EDMA_RSS_TYPE_IPV4 |
843 EDMA_RSS_TYPE_IPV6;
844 edma_write_reg(EDMA_REG_RSS_TYPE, hw->rss_type);
845 break;
846 default:
847 pr_err("Invalid input\n");
848 ret = -1;
849 break;
850 }
851
852 return ret;
853}
854
855static int edma_weight_assigned_to_queues(struct ctl_table *table, int write,
856 void __user *buffer, size_t *lenp,
857 loff_t *ppos)
858{
859 int ret, queue_id, weight;
860 u32 reg_data, data, reg_addr;
861
862 ret = proc_dointvec(table, write, buffer, lenp, ppos);
863 if (write) {
864 queue_id = edma_weight_assigned_to_q & EDMA_WRR_VID_SCTL_MASK;
865 if (queue_id < 0 || queue_id > 15) {
866 pr_err("queue_id not within desired range\n");
867 return -EINVAL;
868 }
869
870 weight = edma_weight_assigned_to_q >> EDMA_WRR_VID_SCTL_SHIFT;
871 if (weight < 0 || weight > 0xF) {
872 pr_err("queue_id not within desired range\n");
873 return -EINVAL;
874 }
875
876 data = weight << EDMA_WRR_SHIFT(queue_id);
877
878 reg_addr = EDMA_REG_WRR_CTRL_Q0_Q3 + (queue_id & ~0x3);
879 edma_read_reg(reg_addr, &reg_data);
880 reg_data &= ~(1 << EDMA_WRR_SHIFT(queue_id));
881 edma_write_reg(reg_addr, data | reg_data);
882 }
883
884 return ret;
885}
886
887static int edma_queue_to_virtual_queue_map(struct ctl_table *table, int write,
888 void __user *buffer, size_t *lenp,
889 loff_t *ppos)
890{
891 int ret, queue_id, virtual_qid;
892 u32 reg_data, data, reg_addr;
893
894 ret = proc_dointvec(table, write, buffer, lenp, ppos);
895 if (write) {
896 queue_id = edma_queue_to_virtual_q & EDMA_WRR_VID_SCTL_MASK;
897 if (queue_id < 0 || queue_id > 15) {
898 pr_err("queue_id not within desired range\n");
899 return -EINVAL;
900 }
901
902 virtual_qid = edma_queue_to_virtual_q >>
903 EDMA_WRR_VID_SCTL_SHIFT;
Swaraj Sha28bca2e2017-11-24 14:17:22 +0530904 if (virtual_qid < 0 || virtual_qid > 7) {
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530905 pr_err("queue_id not within desired range\n");
906 return -EINVAL;
907 }
908
909 data = virtual_qid << EDMA_VQ_ID_SHIFT(queue_id);
910
Swaraj Sha28bca2e2017-11-24 14:17:22 +0530911 reg_addr = EDMA_REG_VQ_CTRL0 + ((queue_id & ~0x7) >> 1);
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530912 edma_read_reg(reg_addr, &reg_data);
Swaraj Sha28bca2e2017-11-24 14:17:22 +0530913 reg_data &= ~(0x7 << EDMA_VQ_ID_SHIFT(queue_id));
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530914 edma_write_reg(reg_addr, data | reg_data);
915 }
916
917 return ret;
918}
919
Rakesh Naird4a11502017-11-07 17:02:11 +0530920static int edma_disable_queue_stop_func(struct ctl_table *table, int write,
921 void __user *buffer, size_t *lenp,
922 loff_t *ppos)
923{
924 struct edma_adapter *adapter;
925 int ret;
926
927 adapter = netdev_priv(edma_netdev[0]);
928
929 ret = proc_dointvec(table, write, buffer, lenp, ppos);
930
931 return ret;
932}
933
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530934static struct ctl_table edma_table[] = {
935 {
936 .procname = "default_lan_tag",
937 .data = &edma_default_ltag,
938 .maxlen = sizeof(int),
939 .mode = 0644,
940 .proc_handler = edma_change_default_lan_vlan
941 },
942 {
943 .procname = "default_wan_tag",
944 .data = &edma_default_wtag,
945 .maxlen = sizeof(int),
946 .mode = 0644,
947 .proc_handler = edma_change_default_wan_vlan
948 },
949 {
950 .procname = "weight_assigned_to_queues",
951 .data = &edma_weight_assigned_to_q,
952 .maxlen = sizeof(int),
953 .mode = 0644,
954 .proc_handler = edma_weight_assigned_to_queues
955 },
956 {
957 .procname = "queue_to_virtual_queue_map",
958 .data = &edma_queue_to_virtual_q,
959 .maxlen = sizeof(int),
960 .mode = 0644,
961 .proc_handler = edma_queue_to_virtual_queue_map
962 },
963 {
964 .procname = "enable_stp_rstp",
965 .data = &edma_enable_rstp,
966 .maxlen = sizeof(int),
967 .mode = 0644,
968 .proc_handler = edma_enable_stp_rstp
969 },
970 {
971 .procname = "athr_hdr_eth_type",
972 .data = &edma_athr_hdr_eth_type,
973 .maxlen = sizeof(int),
974 .mode = 0644,
975 .proc_handler = edma_ath_hdr_eth_type
976 },
977 {
978 .procname = "default_group1_vlan_tag",
979 .data = &edma_default_group1_vtag,
980 .maxlen = sizeof(int),
981 .mode = 0644,
982 .proc_handler = edma_change_group1_vtag
983 },
984 {
985 .procname = "default_group2_vlan_tag",
986 .data = &edma_default_group2_vtag,
987 .maxlen = sizeof(int),
988 .mode = 0644,
989 .proc_handler = edma_change_group2_vtag
990 },
991 {
992 .procname = "default_group3_vlan_tag",
993 .data = &edma_default_group3_vtag,
994 .maxlen = sizeof(int),
995 .mode = 0644,
996 .proc_handler = edma_change_group3_vtag
997 },
998 {
999 .procname = "default_group4_vlan_tag",
1000 .data = &edma_default_group4_vtag,
1001 .maxlen = sizeof(int),
1002 .mode = 0644,
1003 .proc_handler = edma_change_group4_vtag
1004 },
1005 {
1006 .procname = "default_group5_vlan_tag",
1007 .data = &edma_default_group5_vtag,
1008 .maxlen = sizeof(int),
1009 .mode = 0644,
1010 .proc_handler = edma_change_group5_vtag
1011 },
1012 {
1013 .procname = "default_group1_bmp",
1014 .data = &edma_default_group1_bmp,
1015 .maxlen = sizeof(int),
1016 .mode = 0644,
1017 .proc_handler = edma_change_group1_bmp
1018 },
1019 {
1020 .procname = "default_group2_bmp",
1021 .data = &edma_default_group2_bmp,
1022 .maxlen = sizeof(int),
1023 .mode = 0644,
1024 .proc_handler = edma_change_group2_bmp
1025 },
1026 {
1027 .procname = "edma_disable_rss",
1028 .data = &edma_disable_rss,
1029 .maxlen = sizeof(int),
1030 .mode = 0644,
1031 .proc_handler = edma_disable_rss_func
1032 },
Rakesh Nair888af952017-06-30 18:41:58 +05301033 {
1034 .procname = "dscp2ac",
1035 .data = &edma_dscp2ac_tbl,
1036 .maxlen = sizeof(int),
1037 .mode = 0644,
1038 .proc_handler = edma_dscp2ac_mapping_update
1039 },
1040 {
1041 .procname = "per_prec_stats_enable",
1042 .data = &edma_per_prec_stats_enable,
1043 .maxlen = sizeof(int),
1044 .mode = 0644,
1045 .proc_handler = edma_per_prec_stats_enable_handler,
1046 },
1047 {
1048 .procname = "per_prec_stats_reset",
1049 .data = &edma_prec_stats_reset,
1050 .maxlen = sizeof(int),
1051 .mode = 0644,
1052 .proc_handler = edma_prec_stats_reset_handler,
1053 },
Rakesh Nair1c6a18c2017-08-02 21:27:06 +05301054 {
1055 .procname = "iad_stats_enable",
1056 .data = &edma_iad_stats_enable,
1057 .maxlen = sizeof(int),
1058 .mode = 0644,
1059 .proc_handler = edma_iad_stats_enable_handler,
1060 },
1061 {
1062 .procname = "iad_stats_reset",
1063 .data = &edma_iad_stats_reset,
1064 .maxlen = sizeof(int),
1065 .mode = 0644,
1066 .proc_handler = edma_iad_stats_reset_handler,
1067 },
1068 {
1069 .procname = "iad_print_flow_table",
1070 .data = &edma_print_flow_table,
1071 .maxlen = sizeof(int),
1072 .mode = 0644,
1073 .proc_handler = edma_print_flow_table_handler,
1074 },
1075 {
1076 .procname = "max_valid_ifd_usec",
1077 .data = &edma_max_valid_ifd_usec,
1078 .maxlen = sizeof(int),
1079 .mode = 0644,
1080 .proc_handler = edma_max_valid_ifd_usec_handler,
1081 },
Rakesh Naird4a11502017-11-07 17:02:11 +05301082 {
1083 .procname = "edma_disable_queue_stop",
1084 .data = &edma_disable_queue_stop,
1085 .maxlen = sizeof(int),
1086 .mode = 0644,
1087 .proc_handler = edma_disable_queue_stop_func
1088 },
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301089 {}
1090};
1091
1092/* edma_axi_netdev_ops
1093 * Describe the operations supported by registered netdevices
1094 *
1095 * static const struct net_device_ops edma_axi_netdev_ops = {
1096 * .ndo_open = edma_open,
1097 * .ndo_stop = edma_close,
1098 * .ndo_start_xmit = edma_xmit_frame,
1099 * .ndo_set_mac_address = edma_set_mac_addr,
1100 * }
1101 */
1102static const struct net_device_ops edma_axi_netdev_ops = {
1103 .ndo_open = edma_open,
1104 .ndo_stop = edma_close,
1105 .ndo_start_xmit = edma_xmit,
1106 .ndo_set_mac_address = edma_set_mac_addr,
1107 .ndo_select_queue = edma_select_xps_queue,
1108#ifdef CONFIG_RFS_ACCEL
1109 .ndo_rx_flow_steer = edma_rx_flow_steer,
1110 .ndo_register_rfs_filter = edma_register_rfs_filter,
1111 .ndo_get_default_vlan_tag = edma_get_default_vlan_tag,
1112#endif
Bhaskar Valabojue429bab2017-03-15 09:01:23 +05301113 .ndo_get_stats64 = edma_get_stats64,
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301114 .ndo_change_mtu = edma_change_mtu,
1115};
1116
1117/* edma_axi_probe()
1118 * Initialise an adapter identified by a platform_device structure.
1119 *
1120 * The OS initialization, configuring of the adapter private structure,
1121 * and a hardware reset occur in the probe.
1122 */
1123static int edma_axi_probe(struct platform_device *pdev)
1124{
1125 struct edma_common_info *edma_cinfo;
1126 struct edma_hw *hw;
1127 struct edma_adapter *adapter[EDMA_MAX_PORTID_SUPPORTED];
1128 struct resource *res;
1129 struct device_node *np = pdev->dev.of_node;
1130 struct device_node *pnp;
1131 struct device_node *mdio_node = NULL;
1132 struct platform_device *mdio_plat = NULL;
1133 struct mii_bus *miibus = NULL;
1134 struct edma_mdio_data *mdio_data = NULL;
1135 int i, j, k, err = 0;
1136 u32 portid_bmp;
1137 int idx = 0, idx_mac = 0;
1138
1139 if (CONFIG_NR_CPUS != EDMA_CPU_CORES_SUPPORTED) {
1140 dev_err(&pdev->dev, "Invalid CPU Cores\n");
1141 return -EINVAL;
1142 }
1143
1144 if ((num_rxq != 4) && (num_rxq != 8)) {
1145 dev_err(&pdev->dev, "Invalid RX queue, edma probe failed\n");
1146 return -EINVAL;
1147 }
1148 edma_cinfo = kzalloc(sizeof(struct edma_common_info), GFP_KERNEL);
1149 if (!edma_cinfo) {
1150 err = -ENOMEM;
1151 goto err_alloc;
1152 }
1153
1154 edma_cinfo->pdev = pdev;
1155
1156 of_property_read_u32(np, "qcom,num-gmac", &edma_cinfo->num_gmac);
1157 if (edma_cinfo->num_gmac > EDMA_MAX_PORTID_SUPPORTED) {
1158 pr_err("Invalid DTSI Entry for qcom,num_gmac\n");
1159 err = -EINVAL;
1160 goto err_cinfo;
1161 }
1162
1163 /* Initialize the netdev array before allocation
1164 * to avoid double free
1165 */
1166 for (i = 0 ; i < edma_cinfo->num_gmac ; i++)
1167 edma_netdev[i] = NULL;
1168
1169 for (i = 0 ; i < edma_cinfo->num_gmac ; i++) {
1170 edma_netdev[i] = alloc_etherdev_mqs(sizeof(struct edma_adapter),
1171 EDMA_NETDEV_TX_QUEUE, EDMA_NETDEV_RX_QUEUE);
1172
1173 if (!edma_netdev[i]) {
1174 dev_err(&pdev->dev,
1175 "net device alloc fails for index=%d\n", i);
1176 err = -ENODEV;
1177 goto err_ioremap;
1178 }
1179
1180 SET_NETDEV_DEV(edma_netdev[i], &pdev->dev);
1181 platform_set_drvdata(pdev, edma_netdev[i]);
1182 edma_cinfo->netdev[i] = edma_netdev[i];
1183 }
1184
1185 /* Fill ring details */
1186 edma_cinfo->num_tx_queues = EDMA_MAX_TRANSMIT_QUEUE;
1187 edma_cinfo->num_txq_per_core = (EDMA_MAX_TRANSMIT_QUEUE / 4);
1188 edma_cinfo->tx_ring_count = EDMA_TX_RING_SIZE;
1189
1190 /* Update num rx queues based on module parameter */
1191 edma_cinfo->num_rx_queues = num_rxq;
1192 edma_cinfo->num_rxq_per_core = ((num_rxq == 4) ? 1 : 2);
1193
1194 edma_cinfo->rx_ring_count = EDMA_RX_RING_SIZE;
1195
1196 hw = &edma_cinfo->hw;
1197
1198 /* Fill HW defaults */
1199 hw->tx_intr_mask = EDMA_TX_IMR_NORMAL_MASK;
1200 hw->rx_intr_mask = EDMA_RX_IMR_NORMAL_MASK;
1201
Rakesh Nair03824d52017-07-31 17:10:49 +05301202 edma_cinfo->num_cores = EDMA_CPU_CORES_SUPPORTED;
1203
1204 if (of_property_read_bool(np, "qcom,num-cores")) {
Rakesh Nairc9fc4cf2017-09-11 19:43:23 +05301205 of_property_read_u32(np, "qcom,num-cores",
1206 &edma_cinfo->num_cores);
Rakesh Nair03824d52017-07-31 17:10:49 +05301207
Rakesh Nairc9fc4cf2017-09-11 19:43:23 +05301208 if (!edma_cinfo->num_cores ||
1209 edma_cinfo->num_cores > EDMA_CPU_CORES_SUPPORTED)
Rakesh Nair03824d52017-07-31 17:10:49 +05301210 edma_cinfo->num_cores = EDMA_CPU_CORES_SUPPORTED;
1211 }
1212
1213
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301214 of_property_read_u32(np, "qcom,page-mode", &edma_cinfo->page_mode);
1215 of_property_read_u32(np, "qcom,rx-head-buf-size",
1216 &hw->rx_head_buff_size);
1217
1218 if (overwrite_mode) {
1219 dev_info(&pdev->dev, "page mode overwritten");
1220 edma_cinfo->page_mode = page_mode;
1221 }
1222
1223 if (jumbo_mru)
1224 edma_cinfo->fraglist_mode = 1;
1225
1226 if (edma_cinfo->page_mode)
1227 hw->rx_head_buff_size = EDMA_RX_HEAD_BUFF_SIZE_JUMBO;
1228 else if (edma_cinfo->fraglist_mode)
1229 hw->rx_head_buff_size = jumbo_mru;
1230 else if (!hw->rx_head_buff_size)
1231 hw->rx_head_buff_size = EDMA_RX_HEAD_BUFF_SIZE;
1232
1233 hw->misc_intr_mask = 0;
1234 hw->wol_intr_mask = 0;
1235
1236 hw->intr_clear_type = EDMA_INTR_CLEAR_TYPE;
1237 hw->intr_sw_idx_w = EDMA_INTR_SW_IDX_W_TYPE;
1238
1239 /* configure RSS type to the different protocol that can be
1240 * supported
1241 */
1242 hw->rss_type = EDMA_RSS_TYPE_IPV4TCP | EDMA_RSS_TYPE_IPV6_TCP |
1243 EDMA_RSS_TYPE_IPV4_UDP | EDMA_RSS_TYPE_IPV6UDP |
1244 EDMA_RSS_TYPE_IPV4 | EDMA_RSS_TYPE_IPV6;
1245
1246 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1247
1248 edma_cinfo->hw.hw_addr = devm_ioremap_resource(&pdev->dev, res);
1249 if (IS_ERR(edma_cinfo->hw.hw_addr)) {
1250 err = PTR_ERR(edma_cinfo->hw.hw_addr);
1251 goto err_ioremap;
1252 }
1253
1254 edma_hw_addr = (u32)edma_cinfo->hw.hw_addr;
1255
1256 /* Parse tx queue interrupt number from device tree */
1257 for (i = 0; i < edma_cinfo->num_tx_queues; i++)
1258 edma_cinfo->tx_irq[i] = platform_get_irq(pdev, i);
1259
1260 /* Parse rx queue interrupt number from device tree
1261 * Here we are setting j to point to the point where we
1262 * left tx interrupt parsing(i.e 16) and run run the loop
1263 * from 0 to 7 to parse rx interrupt number.
1264 */
1265 for (i = 0, j = edma_cinfo->num_tx_queues, k = 0;
1266 i < edma_cinfo->num_rx_queues; i++) {
1267 edma_cinfo->rx_irq[k] = platform_get_irq(pdev, j);
1268 k += ((num_rxq == 4) ? 2 : 1);
1269 j += ((num_rxq == 4) ? 2 : 1);
1270 }
1271
1272 edma_cinfo->rx_head_buffer_len = edma_cinfo->hw.rx_head_buff_size;
1273 edma_cinfo->rx_page_buffer_len = PAGE_SIZE;
1274
1275 err = edma_alloc_queues_tx(edma_cinfo);
1276 if (err) {
1277 dev_err(&pdev->dev, "Allocation of TX queue failed\n");
1278 goto err_tx_qinit;
1279 }
1280
1281 err = edma_alloc_queues_rx(edma_cinfo);
1282 if (err) {
1283 dev_err(&pdev->dev, "Allocation of RX queue failed\n");
1284 goto err_rx_qinit;
1285 }
1286
1287 err = edma_alloc_tx_rings(edma_cinfo);
1288 if (err) {
1289 dev_err(&pdev->dev, "Allocation of TX resources failed\n");
1290 goto err_tx_rinit;
1291 }
1292
1293 err = edma_alloc_rx_rings(edma_cinfo);
1294 if (err) {
1295 dev_err(&pdev->dev, "Allocation of RX resources failed\n");
1296 goto err_rx_rinit;
1297 }
1298
1299 /* Initialize netdev and netdev bitmap for transmit descriptor rings */
1300 for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
1301 struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[i];
1302 int j;
1303
1304 etdr->netdev_bmp = 0;
1305 for (j = 0; j < EDMA_MAX_NETDEV_PER_QUEUE; j++) {
1306 etdr->netdev[j] = NULL;
1307 etdr->nq[j] = NULL;
1308 }
1309 }
1310
1311 if (of_property_read_bool(np, "qcom,mdio-supported")) {
1312 mdio_node = of_find_compatible_node(NULL, NULL,
1313 "qcom,ipq40xx-mdio");
1314 if (!mdio_node) {
1315 dev_err(&pdev->dev, "cannot find mdio node by phandle");
1316 err = -EIO;
1317 goto err_mdiobus_init_fail;
1318 }
1319
1320 mdio_plat = of_find_device_by_node(mdio_node);
1321 if (!mdio_plat) {
1322 dev_err(&pdev->dev,
1323 "cannot find platform device from mdio node");
1324 of_node_put(mdio_node);
1325 err = -EIO;
1326 goto err_mdiobus_init_fail;
1327 }
1328
1329 mdio_data = dev_get_drvdata(&mdio_plat->dev);
1330 if (!mdio_data) {
1331 dev_err(&pdev->dev,
1332 "cannot get mii bus reference from device data");
1333 of_node_put(mdio_node);
1334 err = -EIO;
1335 goto err_mdiobus_init_fail;
1336 }
1337
1338 miibus = mdio_data->mii_bus;
1339 miibus_gb = mdio_data->mii_bus;
1340 }
1341
1342 for_each_available_child_of_node(np, pnp) {
1343 const char *mac_addr;
1344
1345 /* this check is needed if parent and daughter dts have
1346 * different number of gmac nodes
1347 */
1348 if (idx_mac == edma_cinfo->num_gmac) {
1349 of_node_put(np);
1350 break;
1351 }
1352
1353 mac_addr = of_get_mac_address(pnp);
1354 if (mac_addr)
1355 memcpy(edma_netdev[idx_mac]->dev_addr, mac_addr, ETH_ALEN);
1356
1357 idx_mac++;
1358 }
1359
1360 /* Populate the adapter structure register the netdevice */
1361 for (i = 0; i < edma_cinfo->num_gmac; i++) {
1362 int k, m;
1363
1364 adapter[i] = netdev_priv(edma_netdev[i]);
1365 adapter[i]->netdev = edma_netdev[i];
1366 adapter[i]->pdev = pdev;
Rakesh Naired29f6b2017-04-04 15:48:08 +05301367 mutex_init(&adapter[i]->poll_mutex);
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301368 for (j = 0; j < CONFIG_NR_CPUS; j++) {
1369 m = i % 2;
1370 adapter[i]->tx_start_offset[j] =
1371 ((j << EDMA_TX_CPU_START_SHIFT) + (m << 1));
1372 /* Share the queues with available net-devices.
1373 * For instance , with 5 net-devices
1374 * eth0/eth2/eth4 will share q0,q1,q4,q5,q8,q9,q12,q13
1375 * and eth1/eth3 will get the remaining.
1376 */
1377 for (k = adapter[i]->tx_start_offset[j]; k <
1378 (adapter[i]->tx_start_offset[j] + 2); k++) {
1379 if (edma_fill_netdev(edma_cinfo, k, i, j)) {
1380 pr_err("Netdev overflow Error\n");
1381 goto err_register;
1382 }
1383 }
1384 }
1385
1386 adapter[i]->edma_cinfo = edma_cinfo;
1387 edma_netdev[i]->netdev_ops = &edma_axi_netdev_ops;
1388 edma_netdev[i]->features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM
1389 | NETIF_F_HW_VLAN_CTAG_TX
1390 | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_SG |
1391 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GRO;
1392 edma_netdev[i]->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
1393 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX
1394 | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
1395 NETIF_F_GRO;
1396 edma_netdev[i]->vlan_features = NETIF_F_HW_CSUM | NETIF_F_SG |
1397 NETIF_F_TSO | NETIF_F_TSO6 |
1398 NETIF_F_GRO;
1399 edma_netdev[i]->wanted_features = NETIF_F_HW_CSUM | NETIF_F_SG |
1400 NETIF_F_TSO | NETIF_F_TSO6 |
1401 NETIF_F_GRO;
1402
1403#ifdef CONFIG_RFS_ACCEL
1404 edma_netdev[i]->features |= NETIF_F_RXHASH | NETIF_F_NTUPLE;
1405 edma_netdev[i]->hw_features |= NETIF_F_RXHASH | NETIF_F_NTUPLE;
1406 edma_netdev[i]->vlan_features |= NETIF_F_RXHASH | NETIF_F_NTUPLE;
1407 edma_netdev[i]->wanted_features |= NETIF_F_RXHASH | NETIF_F_NTUPLE;
1408#endif
1409 if (edma_cinfo->fraglist_mode) {
1410 edma_netdev[i]->features |= NETIF_F_FRAGLIST;
1411 edma_netdev[i]->hw_features |= NETIF_F_FRAGLIST;
1412 edma_netdev[i]->vlan_features |= NETIF_F_FRAGLIST;
1413 edma_netdev[i]->wanted_features |= NETIF_F_FRAGLIST;
1414 }
1415
1416 edma_set_ethtool_ops(edma_netdev[i]);
1417
1418 /* This just fill in some default MAC address
1419 */
1420 if (!is_valid_ether_addr(edma_netdev[i]->dev_addr)) {
1421 random_ether_addr(edma_netdev[i]->dev_addr);
1422 pr_info("EDMA using MAC@ - using");
1423 pr_info("%02x:%02x:%02x:%02x:%02x:%02x\n",
1424 *(edma_netdev[i]->dev_addr),
1425 *(edma_netdev[i]->dev_addr + 1),
1426 *(edma_netdev[i]->dev_addr + 2),
1427 *(edma_netdev[i]->dev_addr + 3),
1428 *(edma_netdev[i]->dev_addr + 4),
1429 *(edma_netdev[i]->dev_addr + 5));
1430 }
1431
1432 err = register_netdev(edma_netdev[i]);
1433 if (err)
1434 goto err_register;
1435
1436 /* carrier off reporting is important to
1437 * ethtool even BEFORE open
1438 */
1439 netif_carrier_off(edma_netdev[i]);
1440
1441 /* Allocate reverse irq cpu mapping structure for
1442 * receive queues
1443 */
1444#ifdef CONFIG_RFS_ACCEL
1445 edma_netdev[i]->rx_cpu_rmap =
1446 alloc_irq_cpu_rmap(EDMA_NETDEV_RX_QUEUE);
1447 if (!edma_netdev[i]->rx_cpu_rmap) {
1448 err = -ENOMEM;
1449 goto err_rmap_alloc_fail;
1450 }
1451#endif
1452 }
1453
1454 for (i = 0; i < EDMA_MAX_PORTID_BITMAP_INDEX; i++)
1455 edma_cinfo->portid_netdev_lookup_tbl[i] = NULL;
1456
1457 for_each_available_child_of_node(np, pnp) {
1458 const uint32_t *vlan_tag = NULL;
1459 int len;
1460
1461 /* this check is needed if parent and daughter dts have
1462 * different number of gmac nodes
1463 */
1464 if (idx == edma_cinfo->num_gmac)
1465 break;
1466
1467 /* Populate port-id to netdev lookup table */
1468 vlan_tag = of_get_property(pnp, "vlan-tag", &len);
1469 if (!vlan_tag) {
1470 pr_err("Vlan tag parsing Failed.\n");
1471 goto err_rmap_alloc_fail;
1472 }
1473
1474 adapter[idx]->default_vlan_tag = of_read_number(vlan_tag, 1);
1475 vlan_tag++;
1476 portid_bmp = of_read_number(vlan_tag, 1);
1477 adapter[idx]->dp_bitmap = portid_bmp;
1478
1479 portid_bmp = portid_bmp >> 1; /* We ignore CPU Port bit 0 */
1480 while (portid_bmp) {
1481 int port_bit = ffs(portid_bmp);
1482
1483 if (port_bit > EDMA_MAX_PORTID_SUPPORTED)
1484 goto err_rmap_alloc_fail;
1485 edma_cinfo->portid_netdev_lookup_tbl[port_bit] =
1486 edma_netdev[idx];
1487 portid_bmp &= ~(1 << (port_bit - 1));
1488 }
1489
1490 if (of_property_read_u32(pnp, "qcom,poll-required-dynamic",
1491 &adapter[idx]->poll_required_dynamic))
1492 adapter[idx]->poll_required_dynamic = 0;
1493
1494 if (!of_property_read_u32(pnp, "qcom,poll-required",
1495 &adapter[idx]->poll_required)) {
1496 if (adapter[idx]->poll_required) {
1497 of_property_read_u32(pnp, "qcom,phy-mdio-addr",
1498 &adapter[idx]->phy_mdio_addr);
1499 of_property_read_u32(pnp, "qcom,forced-speed",
1500 &adapter[idx]->forced_speed);
1501 of_property_read_u32(pnp, "qcom,forced-duplex",
1502 &adapter[idx]->forced_duplex);
1503
1504 /* create a phyid using MDIO bus id
1505 * and MDIO bus address
1506 */
1507 snprintf(adapter[idx]->phy_id,
1508 MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
1509 miibus->id,
1510 adapter[idx]->phy_mdio_addr);
1511 }
1512 } else {
1513 adapter[idx]->poll_required = 0;
1514 adapter[idx]->forced_speed = SPEED_1000;
1515 adapter[idx]->forced_duplex = DUPLEX_FULL;
1516 }
1517
1518 idx++;
1519 }
1520
1521 edma_cinfo->edma_ctl_table_hdr = register_net_sysctl(&init_net,
1522 "net/edma",
1523 edma_table);
1524 if (!edma_cinfo->edma_ctl_table_hdr) {
1525 dev_err(&pdev->dev, "edma sysctl table hdr not registered\n");
1526 goto err_unregister_sysctl_tbl;
1527 }
1528
1529 /* Disable all 16 Tx and 8 rx irqs */
1530 edma_irq_disable(edma_cinfo);
1531
1532 err = edma_reset(edma_cinfo);
1533 if (err) {
1534 err = -EIO;
1535 goto err_reset;
1536 }
1537
1538 /* populate per_core_info, do a napi_Add, request 16 TX irqs,
1539 * 8 RX irqs, do a napi enable
1540 */
1541 for (i = 0; i < CONFIG_NR_CPUS; i++) {
1542 u8 rx_start;
1543
1544 edma_cinfo->edma_percpu_info[i].napi.state = 0;
1545
1546 netif_napi_add(edma_netdev[0],
1547 &edma_cinfo->edma_percpu_info[i].napi,
1548 edma_poll, 64);
1549 napi_enable(&edma_cinfo->edma_percpu_info[i].napi);
1550 edma_cinfo->edma_percpu_info[i].tx_mask = tx_mask[i];
1551 edma_cinfo->edma_percpu_info[i].rx_mask = EDMA_RX_PER_CPU_MASK
1552 << (i << EDMA_RX_PER_CPU_MASK_SHIFT);
1553 edma_cinfo->edma_percpu_info[i].tx_start = tx_start[i];
1554 edma_cinfo->edma_percpu_info[i].rx_start =
1555 i << EDMA_RX_CPU_START_SHIFT;
1556 rx_start = i << EDMA_RX_CPU_START_SHIFT;
1557 edma_cinfo->edma_percpu_info[i].tx_status = 0;
1558 edma_cinfo->edma_percpu_info[i].rx_status = 0;
1559 edma_cinfo->edma_percpu_info[i].edma_cinfo = edma_cinfo;
1560
1561 /* Request irq per core */
1562 for (j = edma_cinfo->edma_percpu_info[i].tx_start;
1563 j < tx_start[i] + 4; j++) {
Rakesh Naira322e422017-03-08 17:38:33 +05301564 snprintf(&edma_tx_irq[j][0], sizeof(edma_tx_irq[0]), "edma_eth_tx%d", j);
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301565 err = request_irq(edma_cinfo->tx_irq[j],
1566 edma_interrupt,
1567#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21))
1568 IRQF_DISABLED,
1569#else
1570 0,
1571#endif
1572 &edma_tx_irq[j][0],
1573 &edma_cinfo->edma_percpu_info[i]);
1574 if (err)
1575 goto err_reset;
1576 }
1577
1578 for (j = edma_cinfo->edma_percpu_info[i].rx_start;
1579 j < (rx_start +
1580 ((edma_cinfo->num_rx_queues == 4) ? 1 : 2));
1581 j++) {
Rakesh Naira322e422017-03-08 17:38:33 +05301582 snprintf(&edma_rx_irq[j][0], sizeof(edma_rx_irq[0]), "edma_eth_rx%d", j);
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301583 err = request_irq(edma_cinfo->rx_irq[j],
1584 edma_interrupt,
1585#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21))
1586 IRQF_DISABLED,
1587#else
1588 0,
1589#endif
1590 &edma_rx_irq[j][0],
1591 &edma_cinfo->edma_percpu_info[i]);
1592 if (err)
1593 goto err_reset;
1594 }
1595
1596#ifdef CONFIG_RFS_ACCEL
1597 for (j = edma_cinfo->edma_percpu_info[i].rx_start;
1598 j < rx_start + 2; j += 2) {
1599 err = irq_cpu_rmap_add(edma_netdev[0]->rx_cpu_rmap,
1600 edma_cinfo->rx_irq[j]);
1601 if (err)
1602 goto err_rmap_add_fail;
1603 }
1604#endif
1605 }
1606
1607 /* Used to clear interrupt status, allocate rx buffer,
1608 * configure edma descriptors registers
1609 */
1610 err = edma_configure(edma_cinfo);
1611 if (err) {
1612 err = -EIO;
1613 goto err_configure;
1614 }
1615
1616 /* Configure RSS indirection table.
Rakesh Nair03824d52017-07-31 17:10:49 +05301617 * RSS Indirection table maps 128 hash values to EDMA HW RX queues
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301618 * 128 hash will be configured in the following
1619 * pattern: hash{0,1,2,3} = {Q0,Q2,Q4,Q6} respectively
Rakesh Nair03824d52017-07-31 17:10:49 +05301620 * and so on */
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301621 for (i = 0; i < EDMA_NUM_IDT; i++)
Rakesh Nairc9fc4cf2017-09-11 19:43:23 +05301622 edma_write_reg(EDMA_REG_RSS_IDT(i),
1623 edma_idt_tbl[edma_cinfo->num_cores - 1][i]);
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301624
1625 /* Configure load balance mapping table.
1626 * 4 table entry will be configured according to the
1627 * following pattern: load_balance{0,1,2,3} = {Q0,Q1,Q3,Q4}
1628 * respectively.
1629 */
1630 edma_write_reg(EDMA_REG_LB_RING, EDMA_LB_REG_VALUE);
1631
1632 /* Configure Virtual queue for Tx rings
1633 * User can also change this value runtime through
1634 * a sysctl
1635 */
1636 edma_write_reg(EDMA_REG_VQ_CTRL0, EDMA_VQ_REG_VALUE);
1637 edma_write_reg(EDMA_REG_VQ_CTRL1, EDMA_VQ_REG_VALUE);
1638
1639 /* Configure Max AXI Burst write size to 128 bytes*/
1640 edma_write_reg(EDMA_REG_AXIW_CTRL_MAXWRSIZE,
1641 EDMA_AXIW_MAXWRSIZE_VALUE);
1642
1643 /* Enable All 16 tx and 8 rx irq mask */
1644 edma_irq_enable(edma_cinfo);
1645 edma_enable_tx_ctrl(&edma_cinfo->hw);
1646 edma_enable_rx_ctrl(&edma_cinfo->hw);
1647
1648 for (i = 0; i < edma_cinfo->num_gmac; i++) {
1649 u32 port_id;
1650 if (!(adapter[i]->poll_required)) {
1651 adapter[i]->phydev = NULL;
1652 } else {
1653 adapter[i]->phydev =
1654 phy_connect(edma_netdev[i],
1655 (const char *)adapter[i]->phy_id,
1656 &edma_adjust_link,
1657 PHY_INTERFACE_MODE_SGMII);
1658 if (IS_ERR(adapter[i]->phydev)) {
1659 dev_dbg(&pdev->dev, "PHY attach FAIL");
1660 err = -EIO;
1661 goto edma_phy_attach_fail;
1662 } else {
1663 adapter[i]->phydev->advertising |=
1664 ADVERTISED_Pause |
1665 ADVERTISED_Asym_Pause;
1666 adapter[i]->phydev->supported |=
1667 SUPPORTED_Pause |
1668 SUPPORTED_Asym_Pause;
1669 portid_bmp = adapter[i]->dp_bitmap >> 1;
1670 port_id = ffs(portid_bmp);
1671 edma_phydev[port_id - 1] = adapter[i]->phydev;
1672 phy_dev_state[port_id - 1] = 1;
1673 }
1674 }
1675 }
1676
1677 spin_lock_init(&edma_cinfo->stats_lock);
1678
1679 init_timer(&edma_stats_timer);
1680 edma_stats_timer.expires = jiffies + 1*HZ;
1681 edma_stats_timer.data = (unsigned long)edma_cinfo;
1682 edma_stats_timer.function = edma_statistics_timer; /* timer handler */
1683 add_timer(&edma_stats_timer);
1684
Rakesh Nair888af952017-06-30 18:41:58 +05301685 /*
1686 * Initialize dscp2ac mapping table
1687 */
1688 for (i = 0 ; i < EDMA_PRECEDENCE_MAX ; i++)
1689 edma_dscp2ac_tbl[i] = EDMA_AC_BE;
1690
Rakesh Nair1c6a18c2017-08-02 21:27:06 +05301691 memset(edma_flow_tbl, 0, sizeof(struct edma_flow_attrib) * EDMA_MAX_IAD_FLOW_STATS_SUPPORTED);
1692
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301693 return 0;
1694
1695edma_phy_attach_fail:
1696 miibus = NULL;
1697err_configure:
1698#ifdef CONFIG_RFS_ACCEL
1699 for (i = 0; i < edma_cinfo->num_gmac; i++) {
1700 free_irq_cpu_rmap(adapter[i]->netdev->rx_cpu_rmap);
1701 adapter[i]->netdev->rx_cpu_rmap = NULL;
1702 }
1703#endif
1704err_rmap_add_fail:
1705 edma_free_irqs(adapter[0]);
1706 for (i = 0; i < CONFIG_NR_CPUS; i++)
1707 napi_disable(&edma_cinfo->edma_percpu_info[i].napi);
1708err_reset:
1709err_unregister_sysctl_tbl:
1710err_rmap_alloc_fail:
1711 for (i = 0; i < edma_cinfo->num_gmac; i++)
1712 unregister_netdev(edma_netdev[i]);
1713err_register:
1714err_mdiobus_init_fail:
1715 edma_free_rx_rings(edma_cinfo);
1716err_rx_rinit:
1717 edma_free_tx_rings(edma_cinfo);
1718err_tx_rinit:
1719 edma_free_queues(edma_cinfo);
1720err_rx_qinit:
1721err_tx_qinit:
1722 iounmap(edma_cinfo->hw.hw_addr);
1723err_ioremap:
1724 for (i = 0; i < edma_cinfo->num_gmac; i++) {
1725 if (edma_netdev[i])
1726 free_netdev(edma_netdev[i]);
1727 }
1728err_cinfo:
1729 kfree(edma_cinfo);
1730err_alloc:
1731 return err;
1732}
1733
1734/* edma_axi_remove()
1735 * Device Removal Routine
1736 *
1737 * edma_axi_remove is called by the platform subsystem to alert the driver
1738 * that it should release a platform device.
1739 */
1740static int edma_axi_remove(struct platform_device *pdev)
1741{
1742 struct edma_adapter *adapter = netdev_priv(edma_netdev[0]);
1743 struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
1744 struct edma_hw *hw = &edma_cinfo->hw;
1745 int i;
1746
1747 for (i = 0; i < edma_cinfo->num_gmac; i++)
1748 unregister_netdev(edma_netdev[i]);
1749
1750 edma_stop_rx_tx(hw);
1751 for (i = 0; i < CONFIG_NR_CPUS; i++)
1752 napi_disable(&edma_cinfo->edma_percpu_info[i].napi);
1753
1754 edma_irq_disable(edma_cinfo);
1755 edma_write_reg(EDMA_REG_RX_ISR, 0xff);
1756 edma_write_reg(EDMA_REG_TX_ISR, 0xffff);
1757#ifdef CONFIG_RFS_ACCEL
1758 for (i = 0; i < edma_cinfo->num_gmac; i++) {
1759 free_irq_cpu_rmap(edma_netdev[0]->rx_cpu_rmap);
1760 edma_netdev[0]->rx_cpu_rmap = NULL;
1761 }
1762#endif
1763
1764 for (i = 0; i < EDMA_MAX_PORTID_SUPPORTED; i++) {
1765 if (edma_phydev[i])
1766 phy_disconnect(edma_phydev[i]);
1767 }
1768
1769 del_timer_sync(&edma_stats_timer);
1770 edma_free_irqs(adapter);
1771 unregister_net_sysctl_table(edma_cinfo->edma_ctl_table_hdr);
1772 edma_free_tx_resources(edma_cinfo);
1773 edma_free_rx_resources(edma_cinfo);
1774 edma_free_tx_rings(edma_cinfo);
1775 edma_free_rx_rings(edma_cinfo);
1776 edma_free_queues(edma_cinfo);
1777 for (i = 0; i < edma_cinfo->num_gmac; i++)
1778 free_netdev(edma_netdev[i]);
1779
1780 kfree(edma_cinfo);
1781
1782 return 0;
1783}
1784
1785static const struct of_device_id edma_of_mtable[] = {
1786 {.compatible = "qcom,ess-edma" },
1787 {}
1788};
1789MODULE_DEVICE_TABLE(of, edma_of_mtable);
1790
1791static struct platform_driver edma_axi_driver = {
1792 .driver = {
1793 .name = edma_axi_driver_name,
1794 .of_match_table = edma_of_mtable,
1795 },
1796 .probe = edma_axi_probe,
1797 .remove = edma_axi_remove,
1798};
1799
1800module_platform_driver(edma_axi_driver);
1801
1802MODULE_DESCRIPTION("QCA ESS EDMA driver");
1803MODULE_LICENSE("Dual BSD/GPL");