blob: f61833741b4de517e6062a220a13aae47b410f21 [file] [log] [blame]
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301/*
2 * Copyright (c) 2014 - 2017, The Linux Foundation. All rights reserved.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14 */
15
16#include <linux/cpu_rmap.h>
17#include <linux/of_net.h>
18#include <linux/timer.h>
19#include <linux/bitmap.h>
20#include "edma.h"
21#include "ess_edma.h"
22
23/* Weight round robin and virtual QID mask */
24#define EDMA_WRR_VID_SCTL_MASK 0xffff
25
26/* Weight round robin and virtual QID shift */
27#define EDMA_WRR_VID_SCTL_SHIFT 16
28
29char edma_axi_driver_name[] = "ess_edma";
30static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
31 NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP;
32
33static u32 edma_hw_addr;
34
35struct timer_list edma_stats_timer;
36static struct mii_bus *miibus_gb;
37
38char edma_tx_irq[16][64];
39char edma_rx_irq[8][64];
40struct net_device *edma_netdev[EDMA_MAX_PORTID_SUPPORTED];
41static struct phy_device *edma_phydev[EDMA_MAX_PORTID_SUPPORTED];
42static int edma_link_detect_bmp;
43static int phy_dev_state[EDMA_MAX_PORTID_SUPPORTED];
44static u16 tx_start[4] = {EDMA_TXQ_START_CORE0, EDMA_TXQ_START_CORE1,
45 EDMA_TXQ_START_CORE2, EDMA_TXQ_START_CORE3};
46static u32 tx_mask[4] = {EDMA_TXQ_IRQ_MASK_CORE0, EDMA_TXQ_IRQ_MASK_CORE1,
47 EDMA_TXQ_IRQ_MASK_CORE2, EDMA_TXQ_IRQ_MASK_CORE3};
48
49static u32 edma_default_ltag __read_mostly = EDMA_LAN_DEFAULT_VLAN;
50static u32 edma_default_wtag __read_mostly = EDMA_WAN_DEFAULT_VLAN;
51static u32 edma_default_group1_vtag __read_mostly = EDMA_DEFAULT_GROUP1_VLAN;
52static u32 edma_default_group2_vtag __read_mostly = EDMA_DEFAULT_GROUP2_VLAN;
53static u32 edma_default_group3_vtag __read_mostly = EDMA_DEFAULT_GROUP3_VLAN;
54static u32 edma_default_group4_vtag __read_mostly = EDMA_DEFAULT_GROUP4_VLAN;
55static u32 edma_default_group5_vtag __read_mostly = EDMA_DEFAULT_GROUP5_VLAN;
56
57static u32 edma_default_group1_bmp __read_mostly = EDMA_DEFAULT_GROUP1_BMP;
58static u32 edma_default_group2_bmp __read_mostly = EDMA_DEFAULT_GROUP2_BMP;
59static u32 edma_disable_rss __read_mostly = EDMA_DEFAULT_DISABLE_RSS;
60
61static int edma_weight_assigned_to_q __read_mostly;
62static int edma_queue_to_virtual_q __read_mostly;
63static bool edma_enable_rstp __read_mostly;
64static int edma_athr_hdr_eth_type __read_mostly;
65
66static int page_mode;
67module_param(page_mode, int, 0);
68MODULE_PARM_DESC(page_mode, "enable page mode");
69
70static int overwrite_mode;
71module_param(overwrite_mode, int, 0);
72MODULE_PARM_DESC(overwrite_mode, "overwrite default page_mode setting");
73
74static int jumbo_mru = EDMA_RX_HEAD_BUFF_SIZE;
75module_param(jumbo_mru, int, 0);
76MODULE_PARM_DESC(jumbo_mru, "enable fraglist support");
77
78static int num_rxq = 4;
79module_param(num_rxq, int, 0);
80MODULE_PARM_DESC(num_rxq, "change the number of rx queues");
81
82void edma_write_reg(u16 reg_addr, u32 reg_value)
83{
84 writel(reg_value, ((void __iomem *)(edma_hw_addr + reg_addr)));
85}
86
87void edma_read_reg(u16 reg_addr, volatile u32 *reg_value)
88{
89 *reg_value = readl((void __iomem *)(edma_hw_addr + reg_addr));
90}
91
92/* edma_change_tx_coalesce()
93 * change tx interrupt moderation timer
94 */
95void edma_change_tx_coalesce(int usecs)
96{
97 u32 reg_value;
98
99 /* Here, we right shift the value from the user by 1, this is
100 * done because IMT resolution timer is 2usecs. 1 count
101 * of this register corresponds to 2 usecs.
102 */
103 edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, &reg_value);
104 reg_value = ((reg_value & 0xffff) | ((usecs >> 1) << 16));
105 edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, reg_value);
106}
107
108/* edma_change_rx_coalesce()
109 * change rx interrupt moderation timer
110 */
111void edma_change_rx_coalesce(int usecs)
112{
113 u32 reg_value;
114
115 /* Here, we right shift the value from the user by 1, this is
116 * done because IMT resolution timer is 2usecs. 1 count
117 * of this register corresponds to 2 usecs.
118 */
119 edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, &reg_value);
120 reg_value = ((reg_value & 0xffff0000) | (usecs >> 1));
121 edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, reg_value);
122}
123
124/* edma_get_tx_rx_coalesce()
125 * Get tx/rx interrupt moderation value
126 */
127void edma_get_tx_rx_coalesce(u32 *reg_val)
128{
129 edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, reg_val);
130}
131
132void edma_read_append_stats(struct edma_common_info *edma_cinfo)
133{
134 u32 *p;
135 int i;
136 u32 stat;
137
138 spin_lock(&edma_cinfo->stats_lock);
139 p = (u32 *)&(edma_cinfo->edma_ethstats);
140
141 for (i = 0; i < EDMA_MAX_TRANSMIT_QUEUE; i++) {
142 edma_read_reg(EDMA_REG_TX_STAT_PKT_Q(i), &stat);
143 *p += stat;
144 p++;
145 }
146
147 for (i = 0; i < EDMA_MAX_TRANSMIT_QUEUE; i++) {
148 edma_read_reg(EDMA_REG_TX_STAT_BYTE_Q(i), &stat);
149 *p += stat;
150 p++;
151 }
152
153 for (i = 0; i < EDMA_MAX_RECEIVE_QUEUE; i++) {
154 edma_read_reg(EDMA_REG_RX_STAT_PKT_Q(i), &stat);
155 *p += stat;
156 p++;
157 }
158
159 for (i = 0; i < EDMA_MAX_RECEIVE_QUEUE; i++) {
160 edma_read_reg(EDMA_REG_RX_STAT_BYTE_Q(i), &stat);
161 *p += stat;
162 p++;
163 }
164
165 spin_unlock(&edma_cinfo->stats_lock);
166}
167
168static void edma_statistics_timer(unsigned long data)
169{
170 struct edma_common_info *edma_cinfo = (struct edma_common_info *)data;
171
172 edma_read_append_stats(edma_cinfo);
173
174 mod_timer(&edma_stats_timer, jiffies + 1*HZ);
175}
176
177static int edma_enable_stp_rstp(struct ctl_table *table, int write,
178 void __user *buffer, size_t *lenp,
179 loff_t *ppos)
180{
181 int ret;
182
183 ret = proc_dointvec(table, write, buffer, lenp, ppos);
184 if (write)
185 edma_set_stp_rstp(edma_enable_rstp);
186
187 return ret;
188}
189
190static int edma_ath_hdr_eth_type(struct ctl_table *table, int write,
191 void __user *buffer, size_t *lenp,
192 loff_t *ppos)
193{
194 int ret;
195
196 ret = proc_dointvec(table, write, buffer, lenp, ppos);
197 if (write)
198 edma_assign_ath_hdr_type(edma_athr_hdr_eth_type);
199
200 return ret;
201}
202
203static int edma_get_port_from_phydev(struct phy_device *phydev)
204{
205 int i;
206
207 for (i = 0; i < EDMA_MAX_PORTID_SUPPORTED; i++) {
208 if (phydev == edma_phydev[i])
209 return i;
210 }
211
212 pr_err("Invalid PHY devive\n");
213 return -1;
214}
215
216static int edma_is_port_used(int portid)
217{
218 int used_portid_bmp;
219 used_portid_bmp = edma_link_detect_bmp >> 1;
220
221 while (used_portid_bmp) {
222 int port_bit_set = ffs(used_portid_bmp);
223 if (port_bit_set == portid)
224 return 1;
225 used_portid_bmp &= ~(1 << (port_bit_set - 1));
226 }
227
228 return 0;
229}
230
231static int edma_change_default_lan_vlan(struct ctl_table *table, int write,
232 void __user *buffer, size_t *lenp,
233 loff_t *ppos)
234{
235 struct edma_adapter *adapter;
236 int ret;
237
238 if (!edma_netdev[1]) {
239 pr_err("Netdevice for default_lan does not exist\n");
240 return -1;
241 }
242
243 adapter = netdev_priv(edma_netdev[1]);
244
245 ret = proc_dointvec(table, write, buffer, lenp, ppos);
246
247 if (write)
248 adapter->default_vlan_tag = edma_default_ltag;
249
250 return ret;
251}
252
253static int edma_change_default_wan_vlan(struct ctl_table *table, int write,
254 void __user *buffer, size_t *lenp,
255 loff_t *ppos)
256{
257 struct edma_adapter *adapter;
258 int ret;
259
260 if (!edma_netdev[0]) {
261 pr_err("Netdevice for default_wan does not exist\n");
262 return -1;
263 }
264
265 adapter = netdev_priv(edma_netdev[0]);
266
267 ret = proc_dointvec(table, write, buffer, lenp, ppos);
268
269 if (write)
270 adapter->default_vlan_tag = edma_default_wtag;
271
272 return ret;
273}
274
275static int edma_change_group1_vtag(struct ctl_table *table, int write,
276 void __user *buffer, size_t *lenp,
277 loff_t *ppos)
278{
279 struct edma_adapter *adapter;
280 struct edma_common_info *edma_cinfo;
281 int ret;
282
283 if (!edma_netdev[0]) {
284 pr_err("Netdevice for Group 1 does not exist\n");
285 return -1;
286 }
287
288 adapter = netdev_priv(edma_netdev[0]);
289 edma_cinfo = adapter->edma_cinfo;
290
291 ret = proc_dointvec(table, write, buffer, lenp, ppos);
292
293 if (write)
294 adapter->default_vlan_tag = edma_default_group1_vtag;
295
296 return ret;
297}
298
299static int edma_change_group2_vtag(struct ctl_table *table, int write,
300 void __user *buffer, size_t *lenp,
301 loff_t *ppos)
302{
303 struct edma_adapter *adapter;
304 struct edma_common_info *edma_cinfo;
305 int ret;
306
307 if (!edma_netdev[1]) {
308 pr_err("Netdevice for Group 2 does not exist\n");
309 return -1;
310 }
311
312 adapter = netdev_priv(edma_netdev[1]);
313 edma_cinfo = adapter->edma_cinfo;
314
315 ret = proc_dointvec(table, write, buffer, lenp, ppos);
316
317 if (write)
318 adapter->default_vlan_tag = edma_default_group2_vtag;
319
320 return ret;
321}
322
323static int edma_change_group3_vtag(struct ctl_table *table, int write,
324 void __user *buffer, size_t *lenp,
325 loff_t *ppos)
326{
327 struct edma_adapter *adapter;
328 struct edma_common_info *edma_cinfo;
329 int ret;
330
331 if (!edma_netdev[2]) {
332 pr_err("Netdevice for Group 3 does not exist\n");
333 return -1;
334 }
335
336 adapter = netdev_priv(edma_netdev[2]);
337 edma_cinfo = adapter->edma_cinfo;
338
339 ret = proc_dointvec(table, write, buffer, lenp, ppos);
340
341 if (write)
342 adapter->default_vlan_tag = edma_default_group3_vtag;
343
344 return ret;
345}
346
347static int edma_change_group4_vtag(struct ctl_table *table, int write,
348 void __user *buffer, size_t *lenp,
349 loff_t *ppos)
350{
351 struct edma_adapter *adapter;
352 struct edma_common_info *edma_cinfo;
353 int ret;
354
355 if (!edma_netdev[3]) {
356 pr_err("Netdevice for Group 4 does not exist\n");
357 return -1;
358 }
359
360 adapter = netdev_priv(edma_netdev[3]);
361 edma_cinfo = adapter->edma_cinfo;
362
363 ret = proc_dointvec(table, write, buffer, lenp, ppos);
364
365 if (write)
366 adapter->default_vlan_tag = edma_default_group4_vtag;
367
368 return ret;
369}
370
371static int edma_change_group5_vtag(struct ctl_table *table, int write,
372 void __user *buffer, size_t *lenp,
373 loff_t *ppos)
374{
375 struct edma_adapter *adapter;
376 struct edma_common_info *edma_cinfo;
377 int ret;
378
379 if (!edma_netdev[4]) {
380 pr_err("Netdevice for Group 5 does not exist\n");
381 return -1;
382 }
383
384 adapter = netdev_priv(edma_netdev[4]);
385 edma_cinfo = adapter->edma_cinfo;
386
387 ret = proc_dointvec(table, write, buffer, lenp, ppos);
388
389 if (write)
390 adapter->default_vlan_tag = edma_default_group5_vtag;
391
392 return ret;
393}
394
395static int edma_change_group1_bmp(struct ctl_table *table, int write,
396 void __user *buffer, size_t *lenp, loff_t *ppos)
397{
398 struct edma_adapter *adapter;
399 struct edma_common_info *edma_cinfo;
400 struct net_device *ndev;
401 struct phy_device *phydev;
402 int ret, num_ports_enabled;
403 u32 portid_bmp, port_bit, prev_bmp, port_id;
404
405 ndev = edma_netdev[0];
406 if (!ndev) {
407 pr_err("Netdevice for Group 0 does not exist\n");
408 return -1;
409 }
410
411 prev_bmp = edma_default_group1_bmp;
412
413 ret = proc_dointvec(table, write, buffer, lenp, ppos);
414 if ((!write) || (prev_bmp == edma_default_group1_bmp))
415 return ret;
416
417 adapter = netdev_priv(ndev);
418 edma_cinfo = adapter->edma_cinfo;
419
420 /* We ignore the bit for CPU Port */
421 portid_bmp = edma_default_group1_bmp >> 1;
422 port_bit = ffs(portid_bmp);
423 if (port_bit > EDMA_MAX_PORTID_SUPPORTED)
424 return -1;
425
426 /* If this group has no ports,
427 * we disable polling for the adapter, stop the queues and return
428 */
429 if (!port_bit) {
430 adapter->dp_bitmap = edma_default_group1_bmp;
431 if (adapter->poll_required) {
432 adapter->poll_required = 0;
433 adapter->link_state = __EDMA_LINKDOWN;
434 netif_carrier_off(ndev);
435 netif_tx_stop_all_queues(ndev);
436 }
437 return 0;
438 }
439
440 /* Our array indexes are for 5 ports (0 - 4) */
441 port_bit--;
442 edma_link_detect_bmp = 0;
443
444 /* Do we have more ports in this group */
445 num_ports_enabled = bitmap_weight((long unsigned int *)&portid_bmp, 32);
446
447 /* If this group has more then one port,
448 * we disable polling for the adapter as link detection
449 * should be disabled, stop the phy state machine of previous
450 * phy adapter attached to group and start the queues
451 */
452 if (num_ports_enabled > 1) {
453 if (adapter->poll_required) {
454 adapter->poll_required = 0;
455 if (adapter->phydev) {
456 port_id = edma_get_port_from_phydev(
457 adapter->phydev);
458
459 /* We check if phydev attached to this group is
460 * already started and if yes, we stop
461 * the state machine for the phy
462 */
463 if (phy_dev_state[port_id]) {
464 phy_stop_machine(adapter->phydev);
465 phy_dev_state[port_id] = 0;
466 }
467
468 adapter->phydev = NULL;
469 }
470
471 /* Start the tx queues for this netdev
472 * with link detection disabled
473 */
474 if (adapter->link_state == __EDMA_LINKDOWN) {
475 adapter->link_state = __EDMA_LINKUP;
476 netif_tx_start_all_queues(ndev);
477 netif_carrier_on(ndev);
478 }
479 }
480 goto set_bitmap;
481 }
482
483 adapter->poll_required = adapter->poll_required_dynamic;
484
485 if (!adapter->poll_required)
486 goto set_bitmap;
487
488 phydev = adapter->phydev;
489
490 /* If this group has only one port,
491 * if phydev exists we start the phy state machine
492 * and if it doesn't we create a phydev and start it.
493 */
494 if (edma_phydev[port_bit]) {
495 adapter->phydev = edma_phydev[port_bit];
496 set_bit(port_bit, (long unsigned int*)&edma_link_detect_bmp);
497
498 /* If the Phy device has changed group,
499 * we need to reassign the netdev
500 */
501 if (adapter->phydev->attached_dev != ndev)
502 adapter->phydev->attached_dev = ndev;
503
504 if (!phy_dev_state[port_bit]) {
505 phy_start_machine(adapter->phydev);
506 phy_dev_state[port_bit] = 1;
507 }
508 } else {
509 snprintf(adapter->phy_id,
510 MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
511 miibus_gb->id,
512 port_bit);
513
514 adapter->phydev = phy_connect(ndev,
515 (const char *)adapter->phy_id,
516 &edma_adjust_link,
517 PHY_INTERFACE_MODE_SGMII);
518
519 if (IS_ERR(adapter->phydev)) {
520 adapter->phydev = phydev;
521 pr_err("PHY attach FAIL for port %d", port_bit);
522 return -1;
523 }
524
525 if (adapter->phydev->attached_dev != ndev)
526 adapter->phydev->attached_dev = ndev;
527
528 edma_phydev[port_bit] = adapter->phydev;
529 phy_dev_state[port_bit] = 1;
530 set_bit(port_bit, (long unsigned int *)&edma_link_detect_bmp);
531 adapter->phydev->advertising |=
532 (ADVERTISED_Pause |
533 ADVERTISED_Asym_Pause);
534 adapter->phydev->supported |=
535 (SUPPORTED_Pause |
536 SUPPORTED_Asym_Pause);
537 phy_start(adapter->phydev);
538 phy_start_aneg(adapter->phydev);
539 }
540
541 /* We check if this phydev is in use by other Groups
542 * and stop phy machine only if it is not stopped
543 */
544 if (phydev) {
545 port_id = edma_get_port_from_phydev(phydev);
546 if (phy_dev_state[port_id]) {
547 phy_stop_machine(phydev);
548 phy_dev_state[port_id] = 0;
549 }
550 }
551
552 adapter->poll_required = 1;
553 adapter->link_state = __EDMA_LINKDOWN;
554
555set_bitmap:
556 while (portid_bmp) {
557 int port_bit_set = ffs(portid_bmp);
558 edma_cinfo->portid_netdev_lookup_tbl[port_bit_set] = ndev;
559 portid_bmp &= ~(1 << (port_bit_set - 1));
560 }
561
562 adapter->dp_bitmap = edma_default_group1_bmp;
563
564 return 0;
565}
566
567static int edma_change_group2_bmp(struct ctl_table *table, int write,
568 void __user *buffer, size_t *lenp, loff_t *ppos)
569{
570 struct edma_adapter *adapter;
571 struct edma_common_info *edma_cinfo;
572 struct net_device *ndev;
573 struct phy_device *phydev;
574 int ret;
575 u32 prev_bmp, portid_bmp, port_bit, num_ports_enabled, port_id;
576
577 ndev = edma_netdev[1];
578 if (!ndev) {
579 pr_err("Netdevice for Group 1 does not exist\n");
580 return -1;
581 }
582
583 prev_bmp = edma_default_group2_bmp;
584
585 ret = proc_dointvec(table, write, buffer, lenp, ppos);
586 if ((!write) || (prev_bmp == edma_default_group2_bmp))
587 return ret;
588
589 adapter = netdev_priv(ndev);
590 edma_cinfo = adapter->edma_cinfo;
591
592 /* We ignore the bit for CPU Port */
593 portid_bmp = edma_default_group2_bmp >> 1;
594 port_bit = ffs(portid_bmp);
595 if (port_bit > EDMA_MAX_PORTID_SUPPORTED)
596 return -1;
597
598 /* If this group has no ports,
599 * we disable polling for the adapter, stop the queues and return
600 */
601 if (!port_bit) {
602 adapter->dp_bitmap = edma_default_group2_bmp;
603 if (adapter->poll_required) {
604 adapter->poll_required = 0;
605 adapter->link_state = __EDMA_LINKDOWN;
606 netif_carrier_off(ndev);
607 netif_tx_stop_all_queues(ndev);
608 }
609 return 0;
610 }
611
612 /* Our array indexes are for 5 ports (0 - 4) */
613 port_bit--;
614
615 /* Do we have more ports in this group */
616 num_ports_enabled = bitmap_weight((long unsigned int *)&portid_bmp, 32);
617
618 /* If this group has more then one port,
619 * we disable polling for the adapter as link detection
620 * should be disabled, stop the phy state machine of previous
621 * phy adapter attached to group and start the queues
622 */
623 if (num_ports_enabled > 1) {
624 if (adapter->poll_required) {
625 adapter->poll_required = 0;
626 if (adapter->phydev) {
627 port_id = edma_get_port_from_phydev(
628 adapter->phydev);
629
630 /* We check if this phydev is in use by
631 * other Groups and stop phy machine only
632 * if that is NOT the case
633 */
634 if (!edma_is_port_used(port_id)) {
635 if (phy_dev_state[port_id]) {
636 phy_stop_machine(
637 adapter->phydev);
638 phy_dev_state[port_id] = 0;
639 }
640 }
641
642 adapter->phydev = NULL;
643 }
644
645 /* Start the tx queues for this netdev
646 * with link detection disabled
647 */
648 if (adapter->link_state == __EDMA_LINKDOWN) {
649 adapter->link_state = __EDMA_LINKUP;
650 netif_carrier_on(ndev);
651 netif_tx_start_all_queues(ndev);
652 }
653 }
654 goto set_bitmap;
655 }
656
657 adapter->poll_required = adapter->poll_required_dynamic;
658
659 if (!adapter->poll_required)
660 goto set_bitmap;
661
662 phydev = adapter->phydev;
663
664 /* If this group has only one port,
665 * if phydev exists we start the phy state machine
666 * and if it doesn't we create a phydev and start it.
667 */
668 if (edma_phydev[port_bit]) {
669 adapter->phydev = edma_phydev[port_bit];
670
671 /* If the Phy device has changed group,
672 * we need to reassign the netdev
673 */
674 if (adapter->phydev->attached_dev != ndev)
675 adapter->phydev->attached_dev = ndev;
676
677 if (!phy_dev_state[port_bit]) {
678 phy_start_machine(adapter->phydev);
679 phy_dev_state[port_bit] = 1;
680 set_bit(port_bit,
681 (long unsigned int *)&edma_link_detect_bmp);
682 }
683 } else {
684 snprintf(adapter->phy_id,
685 MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
686 miibus_gb->id,
687 port_bit);
688
689 adapter->phydev = phy_connect(ndev,
690 (const char *)adapter->phy_id,
691 &edma_adjust_link,
692 PHY_INTERFACE_MODE_SGMII);
693
694 if (IS_ERR(adapter->phydev)) {
695 adapter->phydev = phydev;
696 pr_err("PHY attach FAIL for port %d", port_bit);
697 return -1;
698 }
699
700 if (adapter->phydev->attached_dev != ndev)
701 adapter->phydev->attached_dev = ndev;
702
703 edma_phydev[port_bit] = adapter->phydev;
704 phy_dev_state[port_bit] = 1;
705 set_bit(port_bit, (long unsigned int *)&edma_link_detect_bmp);
706 adapter->phydev->advertising |=
707 (ADVERTISED_Pause |
708 ADVERTISED_Asym_Pause);
709 adapter->phydev->supported |=
710 (SUPPORTED_Pause |
711 SUPPORTED_Asym_Pause);
712 phy_start(adapter->phydev);
713 phy_start_aneg(adapter->phydev);
714 }
715
716 /* We check if this phydev is in use by other Groups
717 * and stop phy machine only if that is NOT the case
718 */
719 if (phydev) {
720 port_id = edma_get_port_from_phydev(phydev);
721 if (!edma_is_port_used(port_id)) {
722 if (phy_dev_state[port_id]) {
723 phy_stop_machine(phydev);
724 phy_dev_state[port_id] = 0;
725 }
726 }
727 }
728
729 adapter->poll_required = 1;
730 adapter->link_state = __EDMA_LINKDOWN;
731
732set_bitmap:
733 while (portid_bmp) {
734 int port_bit_set = ffs(portid_bmp);
735 edma_cinfo->portid_netdev_lookup_tbl[port_bit_set] = ndev;
736 portid_bmp &= ~(1 << (port_bit_set - 1));
737 }
738
739 adapter->dp_bitmap = edma_default_group2_bmp;
740
741 return 0;
742}
743
744static int edma_disable_rss_func(struct ctl_table *table, int write,
745 void __user *buffer, size_t *lenp,
746 loff_t *ppos)
747{
748 struct edma_adapter *adapter;
749 struct edma_common_info *edma_cinfo;
750 struct edma_hw *hw;
751 int ret;
752
753 if (!edma_netdev[0]) {
754 pr_err("Invalid Netdevice\n");
755 return -1;
756 }
757
758 adapter = netdev_priv(edma_netdev[0]);
759 edma_cinfo = adapter->edma_cinfo;
760 hw = &edma_cinfo->hw;
761
762 ret = proc_dointvec(table, write, buffer, lenp, ppos);
763
764 if ((!write) || (ret))
765 return ret;
766
767 switch (edma_disable_rss) {
768 case EDMA_RSS_DISABLE:
769 hw->rss_type = 0;
770 edma_write_reg(EDMA_REG_RSS_TYPE, hw->rss_type);
771 break;
772 case EDMA_RSS_ENABLE:
773 hw->rss_type = EDMA_RSS_TYPE_IPV4TCP |
774 EDMA_RSS_TYPE_IPV6_TCP |
775 EDMA_RSS_TYPE_IPV4_UDP |
776 EDMA_RSS_TYPE_IPV6UDP |
777 EDMA_RSS_TYPE_IPV4 |
778 EDMA_RSS_TYPE_IPV6;
779 edma_write_reg(EDMA_REG_RSS_TYPE, hw->rss_type);
780 break;
781 default:
782 pr_err("Invalid input\n");
783 ret = -1;
784 break;
785 }
786
787 return ret;
788}
789
790static int edma_weight_assigned_to_queues(struct ctl_table *table, int write,
791 void __user *buffer, size_t *lenp,
792 loff_t *ppos)
793{
794 int ret, queue_id, weight;
795 u32 reg_data, data, reg_addr;
796
797 ret = proc_dointvec(table, write, buffer, lenp, ppos);
798 if (write) {
799 queue_id = edma_weight_assigned_to_q & EDMA_WRR_VID_SCTL_MASK;
800 if (queue_id < 0 || queue_id > 15) {
801 pr_err("queue_id not within desired range\n");
802 return -EINVAL;
803 }
804
805 weight = edma_weight_assigned_to_q >> EDMA_WRR_VID_SCTL_SHIFT;
806 if (weight < 0 || weight > 0xF) {
807 pr_err("queue_id not within desired range\n");
808 return -EINVAL;
809 }
810
811 data = weight << EDMA_WRR_SHIFT(queue_id);
812
813 reg_addr = EDMA_REG_WRR_CTRL_Q0_Q3 + (queue_id & ~0x3);
814 edma_read_reg(reg_addr, &reg_data);
815 reg_data &= ~(1 << EDMA_WRR_SHIFT(queue_id));
816 edma_write_reg(reg_addr, data | reg_data);
817 }
818
819 return ret;
820}
821
822static int edma_queue_to_virtual_queue_map(struct ctl_table *table, int write,
823 void __user *buffer, size_t *lenp,
824 loff_t *ppos)
825{
826 int ret, queue_id, virtual_qid;
827 u32 reg_data, data, reg_addr;
828
829 ret = proc_dointvec(table, write, buffer, lenp, ppos);
830 if (write) {
831 queue_id = edma_queue_to_virtual_q & EDMA_WRR_VID_SCTL_MASK;
832 if (queue_id < 0 || queue_id > 15) {
833 pr_err("queue_id not within desired range\n");
834 return -EINVAL;
835 }
836
837 virtual_qid = edma_queue_to_virtual_q >>
838 EDMA_WRR_VID_SCTL_SHIFT;
839 if (virtual_qid < 0 || virtual_qid > 8) {
840 pr_err("queue_id not within desired range\n");
841 return -EINVAL;
842 }
843
844 data = virtual_qid << EDMA_VQ_ID_SHIFT(queue_id);
845
846 reg_addr = EDMA_REG_VQ_CTRL0 + (queue_id & ~0x3);
847 edma_read_reg(reg_addr, &reg_data);
848 reg_data &= ~(1 << EDMA_VQ_ID_SHIFT(queue_id));
849 edma_write_reg(reg_addr, data | reg_data);
850 }
851
852 return ret;
853}
854
855static struct ctl_table edma_table[] = {
856 {
857 .procname = "default_lan_tag",
858 .data = &edma_default_ltag,
859 .maxlen = sizeof(int),
860 .mode = 0644,
861 .proc_handler = edma_change_default_lan_vlan
862 },
863 {
864 .procname = "default_wan_tag",
865 .data = &edma_default_wtag,
866 .maxlen = sizeof(int),
867 .mode = 0644,
868 .proc_handler = edma_change_default_wan_vlan
869 },
870 {
871 .procname = "weight_assigned_to_queues",
872 .data = &edma_weight_assigned_to_q,
873 .maxlen = sizeof(int),
874 .mode = 0644,
875 .proc_handler = edma_weight_assigned_to_queues
876 },
877 {
878 .procname = "queue_to_virtual_queue_map",
879 .data = &edma_queue_to_virtual_q,
880 .maxlen = sizeof(int),
881 .mode = 0644,
882 .proc_handler = edma_queue_to_virtual_queue_map
883 },
884 {
885 .procname = "enable_stp_rstp",
886 .data = &edma_enable_rstp,
887 .maxlen = sizeof(int),
888 .mode = 0644,
889 .proc_handler = edma_enable_stp_rstp
890 },
891 {
892 .procname = "athr_hdr_eth_type",
893 .data = &edma_athr_hdr_eth_type,
894 .maxlen = sizeof(int),
895 .mode = 0644,
896 .proc_handler = edma_ath_hdr_eth_type
897 },
898 {
899 .procname = "default_group1_vlan_tag",
900 .data = &edma_default_group1_vtag,
901 .maxlen = sizeof(int),
902 .mode = 0644,
903 .proc_handler = edma_change_group1_vtag
904 },
905 {
906 .procname = "default_group2_vlan_tag",
907 .data = &edma_default_group2_vtag,
908 .maxlen = sizeof(int),
909 .mode = 0644,
910 .proc_handler = edma_change_group2_vtag
911 },
912 {
913 .procname = "default_group3_vlan_tag",
914 .data = &edma_default_group3_vtag,
915 .maxlen = sizeof(int),
916 .mode = 0644,
917 .proc_handler = edma_change_group3_vtag
918 },
919 {
920 .procname = "default_group4_vlan_tag",
921 .data = &edma_default_group4_vtag,
922 .maxlen = sizeof(int),
923 .mode = 0644,
924 .proc_handler = edma_change_group4_vtag
925 },
926 {
927 .procname = "default_group5_vlan_tag",
928 .data = &edma_default_group5_vtag,
929 .maxlen = sizeof(int),
930 .mode = 0644,
931 .proc_handler = edma_change_group5_vtag
932 },
933 {
934 .procname = "default_group1_bmp",
935 .data = &edma_default_group1_bmp,
936 .maxlen = sizeof(int),
937 .mode = 0644,
938 .proc_handler = edma_change_group1_bmp
939 },
940 {
941 .procname = "default_group2_bmp",
942 .data = &edma_default_group2_bmp,
943 .maxlen = sizeof(int),
944 .mode = 0644,
945 .proc_handler = edma_change_group2_bmp
946 },
947 {
948 .procname = "edma_disable_rss",
949 .data = &edma_disable_rss,
950 .maxlen = sizeof(int),
951 .mode = 0644,
952 .proc_handler = edma_disable_rss_func
953 },
954 {}
955};
956
957/* edma_axi_netdev_ops
958 * Describe the operations supported by registered netdevices
959 *
960 * static const struct net_device_ops edma_axi_netdev_ops = {
961 * .ndo_open = edma_open,
962 * .ndo_stop = edma_close,
963 * .ndo_start_xmit = edma_xmit_frame,
964 * .ndo_set_mac_address = edma_set_mac_addr,
965 * }
966 */
967static const struct net_device_ops edma_axi_netdev_ops = {
968 .ndo_open = edma_open,
969 .ndo_stop = edma_close,
970 .ndo_start_xmit = edma_xmit,
971 .ndo_set_mac_address = edma_set_mac_addr,
972 .ndo_select_queue = edma_select_xps_queue,
973#ifdef CONFIG_RFS_ACCEL
974 .ndo_rx_flow_steer = edma_rx_flow_steer,
975 .ndo_register_rfs_filter = edma_register_rfs_filter,
976 .ndo_get_default_vlan_tag = edma_get_default_vlan_tag,
977#endif
978 .ndo_get_stats = edma_get_stats,
979 .ndo_change_mtu = edma_change_mtu,
980};
981
982/* edma_axi_probe()
983 * Initialise an adapter identified by a platform_device structure.
984 *
985 * The OS initialization, configuring of the adapter private structure,
986 * and a hardware reset occur in the probe.
987 */
988static int edma_axi_probe(struct platform_device *pdev)
989{
990 struct edma_common_info *edma_cinfo;
991 struct edma_hw *hw;
992 struct edma_adapter *adapter[EDMA_MAX_PORTID_SUPPORTED];
993 struct resource *res;
994 struct device_node *np = pdev->dev.of_node;
995 struct device_node *pnp;
996 struct device_node *mdio_node = NULL;
997 struct platform_device *mdio_plat = NULL;
998 struct mii_bus *miibus = NULL;
999 struct edma_mdio_data *mdio_data = NULL;
1000 int i, j, k, err = 0;
1001 u32 portid_bmp;
1002 int idx = 0, idx_mac = 0;
1003
1004 if (CONFIG_NR_CPUS != EDMA_CPU_CORES_SUPPORTED) {
1005 dev_err(&pdev->dev, "Invalid CPU Cores\n");
1006 return -EINVAL;
1007 }
1008
1009 if ((num_rxq != 4) && (num_rxq != 8)) {
1010 dev_err(&pdev->dev, "Invalid RX queue, edma probe failed\n");
1011 return -EINVAL;
1012 }
1013 edma_cinfo = kzalloc(sizeof(struct edma_common_info), GFP_KERNEL);
1014 if (!edma_cinfo) {
1015 err = -ENOMEM;
1016 goto err_alloc;
1017 }
1018
1019 edma_cinfo->pdev = pdev;
1020
1021 of_property_read_u32(np, "qcom,num-gmac", &edma_cinfo->num_gmac);
1022 if (edma_cinfo->num_gmac > EDMA_MAX_PORTID_SUPPORTED) {
1023 pr_err("Invalid DTSI Entry for qcom,num_gmac\n");
1024 err = -EINVAL;
1025 goto err_cinfo;
1026 }
1027
1028 /* Initialize the netdev array before allocation
1029 * to avoid double free
1030 */
1031 for (i = 0 ; i < edma_cinfo->num_gmac ; i++)
1032 edma_netdev[i] = NULL;
1033
1034 for (i = 0 ; i < edma_cinfo->num_gmac ; i++) {
1035 edma_netdev[i] = alloc_etherdev_mqs(sizeof(struct edma_adapter),
1036 EDMA_NETDEV_TX_QUEUE, EDMA_NETDEV_RX_QUEUE);
1037
1038 if (!edma_netdev[i]) {
1039 dev_err(&pdev->dev,
1040 "net device alloc fails for index=%d\n", i);
1041 err = -ENODEV;
1042 goto err_ioremap;
1043 }
1044
1045 SET_NETDEV_DEV(edma_netdev[i], &pdev->dev);
1046 platform_set_drvdata(pdev, edma_netdev[i]);
1047 edma_cinfo->netdev[i] = edma_netdev[i];
1048 }
1049
1050 /* Fill ring details */
1051 edma_cinfo->num_tx_queues = EDMA_MAX_TRANSMIT_QUEUE;
1052 edma_cinfo->num_txq_per_core = (EDMA_MAX_TRANSMIT_QUEUE / 4);
1053 edma_cinfo->tx_ring_count = EDMA_TX_RING_SIZE;
1054
1055 /* Update num rx queues based on module parameter */
1056 edma_cinfo->num_rx_queues = num_rxq;
1057 edma_cinfo->num_rxq_per_core = ((num_rxq == 4) ? 1 : 2);
1058
1059 edma_cinfo->rx_ring_count = EDMA_RX_RING_SIZE;
1060
1061 hw = &edma_cinfo->hw;
1062
1063 /* Fill HW defaults */
1064 hw->tx_intr_mask = EDMA_TX_IMR_NORMAL_MASK;
1065 hw->rx_intr_mask = EDMA_RX_IMR_NORMAL_MASK;
1066
1067 of_property_read_u32(np, "qcom,page-mode", &edma_cinfo->page_mode);
1068 of_property_read_u32(np, "qcom,rx-head-buf-size",
1069 &hw->rx_head_buff_size);
1070
1071 if (overwrite_mode) {
1072 dev_info(&pdev->dev, "page mode overwritten");
1073 edma_cinfo->page_mode = page_mode;
1074 }
1075
1076 if (jumbo_mru)
1077 edma_cinfo->fraglist_mode = 1;
1078
1079 if (edma_cinfo->page_mode)
1080 hw->rx_head_buff_size = EDMA_RX_HEAD_BUFF_SIZE_JUMBO;
1081 else if (edma_cinfo->fraglist_mode)
1082 hw->rx_head_buff_size = jumbo_mru;
1083 else if (!hw->rx_head_buff_size)
1084 hw->rx_head_buff_size = EDMA_RX_HEAD_BUFF_SIZE;
1085
1086 hw->misc_intr_mask = 0;
1087 hw->wol_intr_mask = 0;
1088
1089 hw->intr_clear_type = EDMA_INTR_CLEAR_TYPE;
1090 hw->intr_sw_idx_w = EDMA_INTR_SW_IDX_W_TYPE;
1091
1092 /* configure RSS type to the different protocol that can be
1093 * supported
1094 */
1095 hw->rss_type = EDMA_RSS_TYPE_IPV4TCP | EDMA_RSS_TYPE_IPV6_TCP |
1096 EDMA_RSS_TYPE_IPV4_UDP | EDMA_RSS_TYPE_IPV6UDP |
1097 EDMA_RSS_TYPE_IPV4 | EDMA_RSS_TYPE_IPV6;
1098
1099 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1100
1101 edma_cinfo->hw.hw_addr = devm_ioremap_resource(&pdev->dev, res);
1102 if (IS_ERR(edma_cinfo->hw.hw_addr)) {
1103 err = PTR_ERR(edma_cinfo->hw.hw_addr);
1104 goto err_ioremap;
1105 }
1106
1107 edma_hw_addr = (u32)edma_cinfo->hw.hw_addr;
1108
1109 /* Parse tx queue interrupt number from device tree */
1110 for (i = 0; i < edma_cinfo->num_tx_queues; i++)
1111 edma_cinfo->tx_irq[i] = platform_get_irq(pdev, i);
1112
1113 /* Parse rx queue interrupt number from device tree
1114 * Here we are setting j to point to the point where we
1115 * left tx interrupt parsing(i.e 16) and run run the loop
1116 * from 0 to 7 to parse rx interrupt number.
1117 */
1118 for (i = 0, j = edma_cinfo->num_tx_queues, k = 0;
1119 i < edma_cinfo->num_rx_queues; i++) {
1120 edma_cinfo->rx_irq[k] = platform_get_irq(pdev, j);
1121 k += ((num_rxq == 4) ? 2 : 1);
1122 j += ((num_rxq == 4) ? 2 : 1);
1123 }
1124
1125 edma_cinfo->rx_head_buffer_len = edma_cinfo->hw.rx_head_buff_size;
1126 edma_cinfo->rx_page_buffer_len = PAGE_SIZE;
1127
1128 err = edma_alloc_queues_tx(edma_cinfo);
1129 if (err) {
1130 dev_err(&pdev->dev, "Allocation of TX queue failed\n");
1131 goto err_tx_qinit;
1132 }
1133
1134 err = edma_alloc_queues_rx(edma_cinfo);
1135 if (err) {
1136 dev_err(&pdev->dev, "Allocation of RX queue failed\n");
1137 goto err_rx_qinit;
1138 }
1139
1140 err = edma_alloc_tx_rings(edma_cinfo);
1141 if (err) {
1142 dev_err(&pdev->dev, "Allocation of TX resources failed\n");
1143 goto err_tx_rinit;
1144 }
1145
1146 err = edma_alloc_rx_rings(edma_cinfo);
1147 if (err) {
1148 dev_err(&pdev->dev, "Allocation of RX resources failed\n");
1149 goto err_rx_rinit;
1150 }
1151
1152 /* Initialize netdev and netdev bitmap for transmit descriptor rings */
1153 for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
1154 struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[i];
1155 int j;
1156
1157 etdr->netdev_bmp = 0;
1158 for (j = 0; j < EDMA_MAX_NETDEV_PER_QUEUE; j++) {
1159 etdr->netdev[j] = NULL;
1160 etdr->nq[j] = NULL;
1161 }
1162 }
1163
1164 if (of_property_read_bool(np, "qcom,mdio-supported")) {
1165 mdio_node = of_find_compatible_node(NULL, NULL,
1166 "qcom,ipq40xx-mdio");
1167 if (!mdio_node) {
1168 dev_err(&pdev->dev, "cannot find mdio node by phandle");
1169 err = -EIO;
1170 goto err_mdiobus_init_fail;
1171 }
1172
1173 mdio_plat = of_find_device_by_node(mdio_node);
1174 if (!mdio_plat) {
1175 dev_err(&pdev->dev,
1176 "cannot find platform device from mdio node");
1177 of_node_put(mdio_node);
1178 err = -EIO;
1179 goto err_mdiobus_init_fail;
1180 }
1181
1182 mdio_data = dev_get_drvdata(&mdio_plat->dev);
1183 if (!mdio_data) {
1184 dev_err(&pdev->dev,
1185 "cannot get mii bus reference from device data");
1186 of_node_put(mdio_node);
1187 err = -EIO;
1188 goto err_mdiobus_init_fail;
1189 }
1190
1191 miibus = mdio_data->mii_bus;
1192 miibus_gb = mdio_data->mii_bus;
1193 }
1194
1195 for_each_available_child_of_node(np, pnp) {
1196 const char *mac_addr;
1197
1198 /* this check is needed if parent and daughter dts have
1199 * different number of gmac nodes
1200 */
1201 if (idx_mac == edma_cinfo->num_gmac) {
1202 of_node_put(np);
1203 break;
1204 }
1205
1206 mac_addr = of_get_mac_address(pnp);
1207 if (mac_addr)
1208 memcpy(edma_netdev[idx_mac]->dev_addr, mac_addr, ETH_ALEN);
1209
1210 idx_mac++;
1211 }
1212
1213 /* Populate the adapter structure register the netdevice */
1214 for (i = 0; i < edma_cinfo->num_gmac; i++) {
1215 int k, m;
1216
1217 adapter[i] = netdev_priv(edma_netdev[i]);
1218 adapter[i]->netdev = edma_netdev[i];
1219 adapter[i]->pdev = pdev;
1220 for (j = 0; j < CONFIG_NR_CPUS; j++) {
1221 m = i % 2;
1222 adapter[i]->tx_start_offset[j] =
1223 ((j << EDMA_TX_CPU_START_SHIFT) + (m << 1));
1224 /* Share the queues with available net-devices.
1225 * For instance , with 5 net-devices
1226 * eth0/eth2/eth4 will share q0,q1,q4,q5,q8,q9,q12,q13
1227 * and eth1/eth3 will get the remaining.
1228 */
1229 for (k = adapter[i]->tx_start_offset[j]; k <
1230 (adapter[i]->tx_start_offset[j] + 2); k++) {
1231 if (edma_fill_netdev(edma_cinfo, k, i, j)) {
1232 pr_err("Netdev overflow Error\n");
1233 goto err_register;
1234 }
1235 }
1236 }
1237
1238 adapter[i]->edma_cinfo = edma_cinfo;
1239 edma_netdev[i]->netdev_ops = &edma_axi_netdev_ops;
1240 edma_netdev[i]->features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM
1241 | NETIF_F_HW_VLAN_CTAG_TX
1242 | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_SG |
1243 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GRO;
1244 edma_netdev[i]->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
1245 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX
1246 | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
1247 NETIF_F_GRO;
1248 edma_netdev[i]->vlan_features = NETIF_F_HW_CSUM | NETIF_F_SG |
1249 NETIF_F_TSO | NETIF_F_TSO6 |
1250 NETIF_F_GRO;
1251 edma_netdev[i]->wanted_features = NETIF_F_HW_CSUM | NETIF_F_SG |
1252 NETIF_F_TSO | NETIF_F_TSO6 |
1253 NETIF_F_GRO;
1254
1255#ifdef CONFIG_RFS_ACCEL
1256 edma_netdev[i]->features |= NETIF_F_RXHASH | NETIF_F_NTUPLE;
1257 edma_netdev[i]->hw_features |= NETIF_F_RXHASH | NETIF_F_NTUPLE;
1258 edma_netdev[i]->vlan_features |= NETIF_F_RXHASH | NETIF_F_NTUPLE;
1259 edma_netdev[i]->wanted_features |= NETIF_F_RXHASH | NETIF_F_NTUPLE;
1260#endif
1261 if (edma_cinfo->fraglist_mode) {
1262 edma_netdev[i]->features |= NETIF_F_FRAGLIST;
1263 edma_netdev[i]->hw_features |= NETIF_F_FRAGLIST;
1264 edma_netdev[i]->vlan_features |= NETIF_F_FRAGLIST;
1265 edma_netdev[i]->wanted_features |= NETIF_F_FRAGLIST;
1266 }
1267
1268 edma_set_ethtool_ops(edma_netdev[i]);
1269
1270 /* This just fill in some default MAC address
1271 */
1272 if (!is_valid_ether_addr(edma_netdev[i]->dev_addr)) {
1273 random_ether_addr(edma_netdev[i]->dev_addr);
1274 pr_info("EDMA using MAC@ - using");
1275 pr_info("%02x:%02x:%02x:%02x:%02x:%02x\n",
1276 *(edma_netdev[i]->dev_addr),
1277 *(edma_netdev[i]->dev_addr + 1),
1278 *(edma_netdev[i]->dev_addr + 2),
1279 *(edma_netdev[i]->dev_addr + 3),
1280 *(edma_netdev[i]->dev_addr + 4),
1281 *(edma_netdev[i]->dev_addr + 5));
1282 }
1283
1284 err = register_netdev(edma_netdev[i]);
1285 if (err)
1286 goto err_register;
1287
1288 /* carrier off reporting is important to
1289 * ethtool even BEFORE open
1290 */
1291 netif_carrier_off(edma_netdev[i]);
1292
1293 /* Allocate reverse irq cpu mapping structure for
1294 * receive queues
1295 */
1296#ifdef CONFIG_RFS_ACCEL
1297 edma_netdev[i]->rx_cpu_rmap =
1298 alloc_irq_cpu_rmap(EDMA_NETDEV_RX_QUEUE);
1299 if (!edma_netdev[i]->rx_cpu_rmap) {
1300 err = -ENOMEM;
1301 goto err_rmap_alloc_fail;
1302 }
1303#endif
1304 }
1305
1306 for (i = 0; i < EDMA_MAX_PORTID_BITMAP_INDEX; i++)
1307 edma_cinfo->portid_netdev_lookup_tbl[i] = NULL;
1308
1309 for_each_available_child_of_node(np, pnp) {
1310 const uint32_t *vlan_tag = NULL;
1311 int len;
1312
1313 /* this check is needed if parent and daughter dts have
1314 * different number of gmac nodes
1315 */
1316 if (idx == edma_cinfo->num_gmac)
1317 break;
1318
1319 /* Populate port-id to netdev lookup table */
1320 vlan_tag = of_get_property(pnp, "vlan-tag", &len);
1321 if (!vlan_tag) {
1322 pr_err("Vlan tag parsing Failed.\n");
1323 goto err_rmap_alloc_fail;
1324 }
1325
1326 adapter[idx]->default_vlan_tag = of_read_number(vlan_tag, 1);
1327 vlan_tag++;
1328 portid_bmp = of_read_number(vlan_tag, 1);
1329 adapter[idx]->dp_bitmap = portid_bmp;
1330
1331 portid_bmp = portid_bmp >> 1; /* We ignore CPU Port bit 0 */
1332 while (portid_bmp) {
1333 int port_bit = ffs(portid_bmp);
1334
1335 if (port_bit > EDMA_MAX_PORTID_SUPPORTED)
1336 goto err_rmap_alloc_fail;
1337 edma_cinfo->portid_netdev_lookup_tbl[port_bit] =
1338 edma_netdev[idx];
1339 portid_bmp &= ~(1 << (port_bit - 1));
1340 }
1341
1342 if (of_property_read_u32(pnp, "qcom,poll-required-dynamic",
1343 &adapter[idx]->poll_required_dynamic))
1344 adapter[idx]->poll_required_dynamic = 0;
1345
1346 if (!of_property_read_u32(pnp, "qcom,poll-required",
1347 &adapter[idx]->poll_required)) {
1348 if (adapter[idx]->poll_required) {
1349 of_property_read_u32(pnp, "qcom,phy-mdio-addr",
1350 &adapter[idx]->phy_mdio_addr);
1351 of_property_read_u32(pnp, "qcom,forced-speed",
1352 &adapter[idx]->forced_speed);
1353 of_property_read_u32(pnp, "qcom,forced-duplex",
1354 &adapter[idx]->forced_duplex);
1355
1356 /* create a phyid using MDIO bus id
1357 * and MDIO bus address
1358 */
1359 snprintf(adapter[idx]->phy_id,
1360 MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
1361 miibus->id,
1362 adapter[idx]->phy_mdio_addr);
1363 }
1364 } else {
1365 adapter[idx]->poll_required = 0;
1366 adapter[idx]->forced_speed = SPEED_1000;
1367 adapter[idx]->forced_duplex = DUPLEX_FULL;
1368 }
1369
1370 idx++;
1371 }
1372
1373 edma_cinfo->edma_ctl_table_hdr = register_net_sysctl(&init_net,
1374 "net/edma",
1375 edma_table);
1376 if (!edma_cinfo->edma_ctl_table_hdr) {
1377 dev_err(&pdev->dev, "edma sysctl table hdr not registered\n");
1378 goto err_unregister_sysctl_tbl;
1379 }
1380
1381 /* Disable all 16 Tx and 8 rx irqs */
1382 edma_irq_disable(edma_cinfo);
1383
1384 err = edma_reset(edma_cinfo);
1385 if (err) {
1386 err = -EIO;
1387 goto err_reset;
1388 }
1389
1390 /* populate per_core_info, do a napi_Add, request 16 TX irqs,
1391 * 8 RX irqs, do a napi enable
1392 */
1393 for (i = 0; i < CONFIG_NR_CPUS; i++) {
1394 u8 rx_start;
1395
1396 edma_cinfo->edma_percpu_info[i].napi.state = 0;
1397
1398 netif_napi_add(edma_netdev[0],
1399 &edma_cinfo->edma_percpu_info[i].napi,
1400 edma_poll, 64);
1401 napi_enable(&edma_cinfo->edma_percpu_info[i].napi);
1402 edma_cinfo->edma_percpu_info[i].tx_mask = tx_mask[i];
1403 edma_cinfo->edma_percpu_info[i].rx_mask = EDMA_RX_PER_CPU_MASK
1404 << (i << EDMA_RX_PER_CPU_MASK_SHIFT);
1405 edma_cinfo->edma_percpu_info[i].tx_start = tx_start[i];
1406 edma_cinfo->edma_percpu_info[i].rx_start =
1407 i << EDMA_RX_CPU_START_SHIFT;
1408 rx_start = i << EDMA_RX_CPU_START_SHIFT;
1409 edma_cinfo->edma_percpu_info[i].tx_status = 0;
1410 edma_cinfo->edma_percpu_info[i].rx_status = 0;
1411 edma_cinfo->edma_percpu_info[i].edma_cinfo = edma_cinfo;
1412
1413 /* Request irq per core */
1414 for (j = edma_cinfo->edma_percpu_info[i].tx_start;
1415 j < tx_start[i] + 4; j++) {
Rakesh Naira322e422017-03-08 17:38:33 +05301416 snprintf(&edma_tx_irq[j][0], sizeof(edma_tx_irq[0]), "edma_eth_tx%d", j);
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301417 err = request_irq(edma_cinfo->tx_irq[j],
1418 edma_interrupt,
1419#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21))
1420 IRQF_DISABLED,
1421#else
1422 0,
1423#endif
1424 &edma_tx_irq[j][0],
1425 &edma_cinfo->edma_percpu_info[i]);
1426 if (err)
1427 goto err_reset;
1428 }
1429
1430 for (j = edma_cinfo->edma_percpu_info[i].rx_start;
1431 j < (rx_start +
1432 ((edma_cinfo->num_rx_queues == 4) ? 1 : 2));
1433 j++) {
Rakesh Naira322e422017-03-08 17:38:33 +05301434 snprintf(&edma_rx_irq[j][0], sizeof(edma_rx_irq[0]), "edma_eth_rx%d", j);
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301435 err = request_irq(edma_cinfo->rx_irq[j],
1436 edma_interrupt,
1437#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21))
1438 IRQF_DISABLED,
1439#else
1440 0,
1441#endif
1442 &edma_rx_irq[j][0],
1443 &edma_cinfo->edma_percpu_info[i]);
1444 if (err)
1445 goto err_reset;
1446 }
1447
1448#ifdef CONFIG_RFS_ACCEL
1449 for (j = edma_cinfo->edma_percpu_info[i].rx_start;
1450 j < rx_start + 2; j += 2) {
1451 err = irq_cpu_rmap_add(edma_netdev[0]->rx_cpu_rmap,
1452 edma_cinfo->rx_irq[j]);
1453 if (err)
1454 goto err_rmap_add_fail;
1455 }
1456#endif
1457 }
1458
1459 /* Used to clear interrupt status, allocate rx buffer,
1460 * configure edma descriptors registers
1461 */
1462 err = edma_configure(edma_cinfo);
1463 if (err) {
1464 err = -EIO;
1465 goto err_configure;
1466 }
1467
1468 /* Configure RSS indirection table.
1469 * 128 hash will be configured in the following
1470 * pattern: hash{0,1,2,3} = {Q0,Q2,Q4,Q6} respectively
1471 * and so on
1472 */
1473 for (i = 0; i < EDMA_NUM_IDT; i++)
1474 edma_write_reg(EDMA_REG_RSS_IDT(i), EDMA_RSS_IDT_VALUE);
1475
1476 /* Configure load balance mapping table.
1477 * 4 table entry will be configured according to the
1478 * following pattern: load_balance{0,1,2,3} = {Q0,Q1,Q3,Q4}
1479 * respectively.
1480 */
1481 edma_write_reg(EDMA_REG_LB_RING, EDMA_LB_REG_VALUE);
1482
1483 /* Configure Virtual queue for Tx rings
1484 * User can also change this value runtime through
1485 * a sysctl
1486 */
1487 edma_write_reg(EDMA_REG_VQ_CTRL0, EDMA_VQ_REG_VALUE);
1488 edma_write_reg(EDMA_REG_VQ_CTRL1, EDMA_VQ_REG_VALUE);
1489
1490 /* Configure Max AXI Burst write size to 128 bytes*/
1491 edma_write_reg(EDMA_REG_AXIW_CTRL_MAXWRSIZE,
1492 EDMA_AXIW_MAXWRSIZE_VALUE);
1493
1494 /* Enable All 16 tx and 8 rx irq mask */
1495 edma_irq_enable(edma_cinfo);
1496 edma_enable_tx_ctrl(&edma_cinfo->hw);
1497 edma_enable_rx_ctrl(&edma_cinfo->hw);
1498
1499 for (i = 0; i < edma_cinfo->num_gmac; i++) {
1500 u32 port_id;
1501 if (!(adapter[i]->poll_required)) {
1502 adapter[i]->phydev = NULL;
1503 } else {
1504 adapter[i]->phydev =
1505 phy_connect(edma_netdev[i],
1506 (const char *)adapter[i]->phy_id,
1507 &edma_adjust_link,
1508 PHY_INTERFACE_MODE_SGMII);
1509 if (IS_ERR(adapter[i]->phydev)) {
1510 dev_dbg(&pdev->dev, "PHY attach FAIL");
1511 err = -EIO;
1512 goto edma_phy_attach_fail;
1513 } else {
1514 adapter[i]->phydev->advertising |=
1515 ADVERTISED_Pause |
1516 ADVERTISED_Asym_Pause;
1517 adapter[i]->phydev->supported |=
1518 SUPPORTED_Pause |
1519 SUPPORTED_Asym_Pause;
1520 portid_bmp = adapter[i]->dp_bitmap >> 1;
1521 port_id = ffs(portid_bmp);
1522 edma_phydev[port_id - 1] = adapter[i]->phydev;
1523 phy_dev_state[port_id - 1] = 1;
1524 }
1525 }
1526 }
1527
1528 spin_lock_init(&edma_cinfo->stats_lock);
1529
1530 init_timer(&edma_stats_timer);
1531 edma_stats_timer.expires = jiffies + 1*HZ;
1532 edma_stats_timer.data = (unsigned long)edma_cinfo;
1533 edma_stats_timer.function = edma_statistics_timer; /* timer handler */
1534 add_timer(&edma_stats_timer);
1535
1536 return 0;
1537
1538edma_phy_attach_fail:
1539 miibus = NULL;
1540err_configure:
1541#ifdef CONFIG_RFS_ACCEL
1542 for (i = 0; i < edma_cinfo->num_gmac; i++) {
1543 free_irq_cpu_rmap(adapter[i]->netdev->rx_cpu_rmap);
1544 adapter[i]->netdev->rx_cpu_rmap = NULL;
1545 }
1546#endif
1547err_rmap_add_fail:
1548 edma_free_irqs(adapter[0]);
1549 for (i = 0; i < CONFIG_NR_CPUS; i++)
1550 napi_disable(&edma_cinfo->edma_percpu_info[i].napi);
1551err_reset:
1552err_unregister_sysctl_tbl:
1553err_rmap_alloc_fail:
1554 for (i = 0; i < edma_cinfo->num_gmac; i++)
1555 unregister_netdev(edma_netdev[i]);
1556err_register:
1557err_mdiobus_init_fail:
1558 edma_free_rx_rings(edma_cinfo);
1559err_rx_rinit:
1560 edma_free_tx_rings(edma_cinfo);
1561err_tx_rinit:
1562 edma_free_queues(edma_cinfo);
1563err_rx_qinit:
1564err_tx_qinit:
1565 iounmap(edma_cinfo->hw.hw_addr);
1566err_ioremap:
1567 for (i = 0; i < edma_cinfo->num_gmac; i++) {
1568 if (edma_netdev[i])
1569 free_netdev(edma_netdev[i]);
1570 }
1571err_cinfo:
1572 kfree(edma_cinfo);
1573err_alloc:
1574 return err;
1575}
1576
1577/* edma_axi_remove()
1578 * Device Removal Routine
1579 *
1580 * edma_axi_remove is called by the platform subsystem to alert the driver
1581 * that it should release a platform device.
1582 */
1583static int edma_axi_remove(struct platform_device *pdev)
1584{
1585 struct edma_adapter *adapter = netdev_priv(edma_netdev[0]);
1586 struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
1587 struct edma_hw *hw = &edma_cinfo->hw;
1588 int i;
1589
1590 for (i = 0; i < edma_cinfo->num_gmac; i++)
1591 unregister_netdev(edma_netdev[i]);
1592
1593 edma_stop_rx_tx(hw);
1594 for (i = 0; i < CONFIG_NR_CPUS; i++)
1595 napi_disable(&edma_cinfo->edma_percpu_info[i].napi);
1596
1597 edma_irq_disable(edma_cinfo);
1598 edma_write_reg(EDMA_REG_RX_ISR, 0xff);
1599 edma_write_reg(EDMA_REG_TX_ISR, 0xffff);
1600#ifdef CONFIG_RFS_ACCEL
1601 for (i = 0; i < edma_cinfo->num_gmac; i++) {
1602 free_irq_cpu_rmap(edma_netdev[0]->rx_cpu_rmap);
1603 edma_netdev[0]->rx_cpu_rmap = NULL;
1604 }
1605#endif
1606
1607 for (i = 0; i < EDMA_MAX_PORTID_SUPPORTED; i++) {
1608 if (edma_phydev[i])
1609 phy_disconnect(edma_phydev[i]);
1610 }
1611
1612 del_timer_sync(&edma_stats_timer);
1613 edma_free_irqs(adapter);
1614 unregister_net_sysctl_table(edma_cinfo->edma_ctl_table_hdr);
1615 edma_free_tx_resources(edma_cinfo);
1616 edma_free_rx_resources(edma_cinfo);
1617 edma_free_tx_rings(edma_cinfo);
1618 edma_free_rx_rings(edma_cinfo);
1619 edma_free_queues(edma_cinfo);
1620 for (i = 0; i < edma_cinfo->num_gmac; i++)
1621 free_netdev(edma_netdev[i]);
1622
1623 kfree(edma_cinfo);
1624
1625 return 0;
1626}
1627
1628static const struct of_device_id edma_of_mtable[] = {
1629 {.compatible = "qcom,ess-edma" },
1630 {}
1631};
1632MODULE_DEVICE_TABLE(of, edma_of_mtable);
1633
1634static struct platform_driver edma_axi_driver = {
1635 .driver = {
1636 .name = edma_axi_driver_name,
1637 .of_match_table = edma_of_mtable,
1638 },
1639 .probe = edma_axi_probe,
1640 .remove = edma_axi_remove,
1641};
1642
1643module_platform_driver(edma_axi_driver);
1644
1645MODULE_DESCRIPTION("QCA ESS EDMA driver");
1646MODULE_LICENSE("Dual BSD/GPL");