blob: a7da017112a7d4302317a25d85752b71e1a88b45 [file] [log] [blame]
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301/*
Rakesh Nair8016fbd2018-01-03 15:46:06 +05302 * Copyright (c) 2014 - 2018, The Linux Foundation. All rights reserved.
Rakesh Nair9bcf2602017-01-06 16:02:16 +05303 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14 */
15
16#include <linux/cpu_rmap.h>
17#include <linux/of_net.h>
18#include <linux/timer.h>
19#include <linux/bitmap.h>
20#include "edma.h"
21#include "ess_edma.h"
22
23/* Weight round robin and virtual QID mask */
24#define EDMA_WRR_VID_SCTL_MASK 0xffff
25
26/* Weight round robin and virtual QID shift */
27#define EDMA_WRR_VID_SCTL_SHIFT 16
28
Rakesh Nairc9fc4cf2017-09-11 19:43:23 +053029static const u32 edma_idt_tbl[EDMA_CPU_CORES_SUPPORTED][EDMA_NUM_IDT] = {
Rakesh Nair03824d52017-07-31 17:10:49 +053030
31 /* For 1 core */
32 {
Rakesh Nairc9fc4cf2017-09-11 19:43:23 +053033 0x0, 0x0, 0x0, 0x0,
34 0x0, 0x0, 0x0, 0x0,
35 0x0, 0x0, 0x0, 0x0,
36 0x0, 0x0, 0x0, 0x0
Rakesh Nair03824d52017-07-31 17:10:49 +053037 },
38
39 /* For 2 cores */
40 {
41 0x20202020, 0x20202020, 0x20202020, 0x20202020,
42 0x20202020, 0x20202020, 0x20202020, 0x20202020,
43 0x20202020, 0x20202020, 0x20202020, 0x20202020,
44 0x20202020, 0x20202020, 0x20202020, 0x20202020
45 },
46
47 /* For 3 cores */
48 {
49 0x20420420, 0x04204204, 0x42042042, 0x20420420,
50 0x04204204, 0x42042042, 0x20420420, 0x04204204,
51 0x42042042, 0x20420420, 0x04204204, 0x42042042,
52 0x20420420, 0x04204204, 0x42042042, 0x20420420
53 },
54
55 /* For 4 cores */
56 {
57 0x64206420, 0x64206420, 0x64206420, 0x64206420,
58 0x64206420, 0x64206420, 0x64206420, 0x64206420,
59 0x64206420, 0x64206420, 0x64206420, 0x64206420,
60 0x64206420, 0x64206420, 0x64206420, 0x64206420
61 }
62};
63
Rakesh Nair8016fbd2018-01-03 15:46:06 +053064/* Set the mask for tx-completion mapping for tx packets on each core.
65 * We set tx-mask for each core, depending on number of cores supported
66 * by the platform.
67 * For instance, For platform supporting 2 cores, we have the following map
68 * 0x00F0: For tx on Core0, map the tx-completion to Core1
69 * 0x000F: For tx on Core1, map the tx-completion to Core0
70 * 0xF000: For tx on Core2, map the tx-completion to Core3
71 * 0x0F00: For tx on Core3, map the tx-completion to Core2
72 */
73static const u32 edma_tx_mask[EDMA_CPU_CORES_SUPPORTED][4] = {
74 {
75 0x000F, 0x00F0, 0x0F00, 0xF000 /* For 1 core */
76 },
77
78 {
79 0x00F0, 0x000F, 0xF000, 0x0F00 /* For 2 cores */
80 },
81
82 {
83 0x00F0, 0x0F00, 0x000F, 0xF000 /* For 3 cores */
84 },
85
86 {
87 0x0F00, 0xF000, 0x000F, 0x00F0 /* For 4 cores */
88 },
89};
90
91/* tx_comp_start will be the start of the queue on which Tx-completion
92 * for that Core will take place which means We will have 4 queues
93 * for each core start from tx_comp_start
94 *
95 * For instance, For platform supporting 2 cores, we have the following map
96 * 4: For tx on Core0, Tx-completion will be on Core 1
97 * hence tx_comp_start will commence from tx_queue 4
98 * 0: For tx on Core1, Tx-completion will be on Core 0
99 * hence tx_comp_start will commence from tx_queue 01
100 * 12: For tx on Core2, Tx-completion will be on Core 3
101 * hence tx_comp_start will commence from tx_queue 12
102 * 8: For tx on Core3, Tx-completion will be on Core 2
103 * hence tx_comp_start will commence from tx_queue 8
104 */
105static const u32 edma_tx_comp_start[EDMA_CPU_CORES_SUPPORTED][4] = {
106 {
107 0, 4, 8, 12 /* For 1 core */
108 },
109
110 {
111 4, 0, 12, 8 /* For 2 cores */
112 },
113
114 {
115 4, 8, 0, 12 /* For 3 cores */
116 },
117
118 {
119 8, 12, 0, 4 /* For 4 cores */
120 },
121};
122
123/* We export these values for smp_affinity as sysctls
124 * which is read and set by the qca-edma script at run-time.
125 */
126static const u32 edma_tx_affinity[EDMA_CPU_CORES_SUPPORTED][4] = {
127 /* Order is from left to right; Core0 to Core3 */
128 {
129 1, 2, 4, 8 /* For 1 core */
130 },
131
132 {
133 2, 1, 8, 4 /* For 2 cores */
134 },
135
136 {
137 2, 4, 1, 8 /* For 3 cores */
138 },
139
140 {
141 4, 8, 1, 2 /* For 4 cores */
142 },
143};
144
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530145char edma_axi_driver_name[] = "ess_edma";
146static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
147 NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP;
148
149static u32 edma_hw_addr;
150
151struct timer_list edma_stats_timer;
152static struct mii_bus *miibus_gb;
153
154char edma_tx_irq[16][64];
155char edma_rx_irq[8][64];
156struct net_device *edma_netdev[EDMA_MAX_PORTID_SUPPORTED];
Rakesh Nair888af952017-06-30 18:41:58 +0530157
158extern u8 edma_dscp2ac_tbl[EDMA_PRECEDENCE_MAX];
Rakesh Nair1c6a18c2017-08-02 21:27:06 +0530159extern u32 edma_per_prec_stats_enable;
160extern u32 edma_prec_stats_reset;
161extern u32 edma_iad_stats_enable;
162extern u32 edma_iad_stats_reset;
163extern u32 edma_max_valid_ifd_usec;
164static u32 edma_print_flow_table __read_mostly = 0;
165extern struct edma_flow_attrib edma_flow_tbl[EDMA_MAX_IAD_FLOW_STATS_SUPPORTED];
Rakesh Nair888af952017-06-30 18:41:58 +0530166
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530167static struct phy_device *edma_phydev[EDMA_MAX_PORTID_SUPPORTED];
168static int edma_link_detect_bmp;
169static int phy_dev_state[EDMA_MAX_PORTID_SUPPORTED];
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530170
171static u32 edma_default_ltag __read_mostly = EDMA_LAN_DEFAULT_VLAN;
172static u32 edma_default_wtag __read_mostly = EDMA_WAN_DEFAULT_VLAN;
173static u32 edma_default_group1_vtag __read_mostly = EDMA_DEFAULT_GROUP1_VLAN;
174static u32 edma_default_group2_vtag __read_mostly = EDMA_DEFAULT_GROUP2_VLAN;
175static u32 edma_default_group3_vtag __read_mostly = EDMA_DEFAULT_GROUP3_VLAN;
176static u32 edma_default_group4_vtag __read_mostly = EDMA_DEFAULT_GROUP4_VLAN;
177static u32 edma_default_group5_vtag __read_mostly = EDMA_DEFAULT_GROUP5_VLAN;
Rakesh Nair8016fbd2018-01-03 15:46:06 +0530178static u32 edma_num_cores __read_mostly = EDMA_CPU_CORES_SUPPORTED;
179static u32 edma_core_completion_affinity[EDMA_CPU_CORES_SUPPORTED];
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530180
181static u32 edma_default_group1_bmp __read_mostly = EDMA_DEFAULT_GROUP1_BMP;
182static u32 edma_default_group2_bmp __read_mostly = EDMA_DEFAULT_GROUP2_BMP;
183static u32 edma_disable_rss __read_mostly = EDMA_DEFAULT_DISABLE_RSS;
184
Rakesh Naird4a11502017-11-07 17:02:11 +0530185u32 edma_disable_queue_stop __read_mostly = EDMA_DEFAULT_DISABLE_QUEUE_STOP;
186
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530187static int edma_weight_assigned_to_q __read_mostly;
188static int edma_queue_to_virtual_q __read_mostly;
189static bool edma_enable_rstp __read_mostly;
190static int edma_athr_hdr_eth_type __read_mostly;
191
192static int page_mode;
193module_param(page_mode, int, 0);
194MODULE_PARM_DESC(page_mode, "enable page mode");
195
196static int overwrite_mode;
197module_param(overwrite_mode, int, 0);
198MODULE_PARM_DESC(overwrite_mode, "overwrite default page_mode setting");
199
200static int jumbo_mru = EDMA_RX_HEAD_BUFF_SIZE;
201module_param(jumbo_mru, int, 0);
202MODULE_PARM_DESC(jumbo_mru, "enable fraglist support");
203
204static int num_rxq = 4;
205module_param(num_rxq, int, 0);
206MODULE_PARM_DESC(num_rxq, "change the number of rx queues");
207
208void edma_write_reg(u16 reg_addr, u32 reg_value)
209{
210 writel(reg_value, ((void __iomem *)(edma_hw_addr + reg_addr)));
211}
212
213void edma_read_reg(u16 reg_addr, volatile u32 *reg_value)
214{
215 *reg_value = readl((void __iomem *)(edma_hw_addr + reg_addr));
216}
217
218/* edma_change_tx_coalesce()
219 * change tx interrupt moderation timer
220 */
221void edma_change_tx_coalesce(int usecs)
222{
223 u32 reg_value;
224
225 /* Here, we right shift the value from the user by 1, this is
226 * done because IMT resolution timer is 2usecs. 1 count
227 * of this register corresponds to 2 usecs.
228 */
229 edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, &reg_value);
230 reg_value = ((reg_value & 0xffff) | ((usecs >> 1) << 16));
231 edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, reg_value);
232}
233
234/* edma_change_rx_coalesce()
235 * change rx interrupt moderation timer
236 */
237void edma_change_rx_coalesce(int usecs)
238{
239 u32 reg_value;
240
241 /* Here, we right shift the value from the user by 1, this is
242 * done because IMT resolution timer is 2usecs. 1 count
243 * of this register corresponds to 2 usecs.
244 */
245 edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, &reg_value);
246 reg_value = ((reg_value & 0xffff0000) | (usecs >> 1));
247 edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, reg_value);
248}
249
250/* edma_get_tx_rx_coalesce()
251 * Get tx/rx interrupt moderation value
252 */
253void edma_get_tx_rx_coalesce(u32 *reg_val)
254{
255 edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, reg_val);
256}
257
258void edma_read_append_stats(struct edma_common_info *edma_cinfo)
259{
Bhaskar Valabojue429bab2017-03-15 09:01:23 +0530260 u64 *p;
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530261 int i;
262 u32 stat;
263
264 spin_lock(&edma_cinfo->stats_lock);
Bhaskar Valabojue429bab2017-03-15 09:01:23 +0530265 p = (u64 *)&(edma_cinfo->edma_ethstats);
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530266
267 for (i = 0; i < EDMA_MAX_TRANSMIT_QUEUE; i++) {
268 edma_read_reg(EDMA_REG_TX_STAT_PKT_Q(i), &stat);
269 *p += stat;
270 p++;
271 }
272
273 for (i = 0; i < EDMA_MAX_TRANSMIT_QUEUE; i++) {
274 edma_read_reg(EDMA_REG_TX_STAT_BYTE_Q(i), &stat);
275 *p += stat;
276 p++;
277 }
278
279 for (i = 0; i < EDMA_MAX_RECEIVE_QUEUE; i++) {
280 edma_read_reg(EDMA_REG_RX_STAT_PKT_Q(i), &stat);
281 *p += stat;
282 p++;
283 }
284
285 for (i = 0; i < EDMA_MAX_RECEIVE_QUEUE; i++) {
286 edma_read_reg(EDMA_REG_RX_STAT_BYTE_Q(i), &stat);
287 *p += stat;
288 p++;
289 }
290
291 spin_unlock(&edma_cinfo->stats_lock);
292}
293
294static void edma_statistics_timer(unsigned long data)
295{
296 struct edma_common_info *edma_cinfo = (struct edma_common_info *)data;
297
298 edma_read_append_stats(edma_cinfo);
299
300 mod_timer(&edma_stats_timer, jiffies + 1*HZ);
301}
302
303static int edma_enable_stp_rstp(struct ctl_table *table, int write,
304 void __user *buffer, size_t *lenp,
305 loff_t *ppos)
306{
307 int ret;
308
309 ret = proc_dointvec(table, write, buffer, lenp, ppos);
310 if (write)
311 edma_set_stp_rstp(edma_enable_rstp);
312
313 return ret;
314}
315
316static int edma_ath_hdr_eth_type(struct ctl_table *table, int write,
317 void __user *buffer, size_t *lenp,
318 loff_t *ppos)
319{
320 int ret;
321
322 ret = proc_dointvec(table, write, buffer, lenp, ppos);
323 if (write)
324 edma_assign_ath_hdr_type(edma_athr_hdr_eth_type);
325
326 return ret;
327}
328
329static int edma_get_port_from_phydev(struct phy_device *phydev)
330{
331 int i;
332
333 for (i = 0; i < EDMA_MAX_PORTID_SUPPORTED; i++) {
334 if (phydev == edma_phydev[i])
335 return i;
336 }
337
338 pr_err("Invalid PHY devive\n");
339 return -1;
340}
341
342static int edma_is_port_used(int portid)
343{
344 int used_portid_bmp;
345 used_portid_bmp = edma_link_detect_bmp >> 1;
346
347 while (used_portid_bmp) {
348 int port_bit_set = ffs(used_portid_bmp);
349 if (port_bit_set == portid)
350 return 1;
351 used_portid_bmp &= ~(1 << (port_bit_set - 1));
352 }
353
354 return 0;
355}
356
357static int edma_change_default_lan_vlan(struct ctl_table *table, int write,
358 void __user *buffer, size_t *lenp,
359 loff_t *ppos)
360{
361 struct edma_adapter *adapter;
362 int ret;
363
364 if (!edma_netdev[1]) {
365 pr_err("Netdevice for default_lan does not exist\n");
366 return -1;
367 }
368
369 adapter = netdev_priv(edma_netdev[1]);
370
371 ret = proc_dointvec(table, write, buffer, lenp, ppos);
372
373 if (write)
374 adapter->default_vlan_tag = edma_default_ltag;
375
376 return ret;
377}
378
379static int edma_change_default_wan_vlan(struct ctl_table *table, int write,
380 void __user *buffer, size_t *lenp,
381 loff_t *ppos)
382{
383 struct edma_adapter *adapter;
384 int ret;
385
386 if (!edma_netdev[0]) {
387 pr_err("Netdevice for default_wan does not exist\n");
388 return -1;
389 }
390
391 adapter = netdev_priv(edma_netdev[0]);
392
393 ret = proc_dointvec(table, write, buffer, lenp, ppos);
394
395 if (write)
396 adapter->default_vlan_tag = edma_default_wtag;
397
398 return ret;
399}
400
401static int edma_change_group1_vtag(struct ctl_table *table, int write,
402 void __user *buffer, size_t *lenp,
403 loff_t *ppos)
404{
405 struct edma_adapter *adapter;
406 struct edma_common_info *edma_cinfo;
407 int ret;
408
409 if (!edma_netdev[0]) {
410 pr_err("Netdevice for Group 1 does not exist\n");
411 return -1;
412 }
413
414 adapter = netdev_priv(edma_netdev[0]);
415 edma_cinfo = adapter->edma_cinfo;
416
417 ret = proc_dointvec(table, write, buffer, lenp, ppos);
418
419 if (write)
420 adapter->default_vlan_tag = edma_default_group1_vtag;
421
422 return ret;
423}
424
425static int edma_change_group2_vtag(struct ctl_table *table, int write,
426 void __user *buffer, size_t *lenp,
427 loff_t *ppos)
428{
429 struct edma_adapter *adapter;
430 struct edma_common_info *edma_cinfo;
431 int ret;
432
433 if (!edma_netdev[1]) {
434 pr_err("Netdevice for Group 2 does not exist\n");
435 return -1;
436 }
437
438 adapter = netdev_priv(edma_netdev[1]);
439 edma_cinfo = adapter->edma_cinfo;
440
441 ret = proc_dointvec(table, write, buffer, lenp, ppos);
442
443 if (write)
444 adapter->default_vlan_tag = edma_default_group2_vtag;
445
446 return ret;
447}
448
449static int edma_change_group3_vtag(struct ctl_table *table, int write,
450 void __user *buffer, size_t *lenp,
451 loff_t *ppos)
452{
453 struct edma_adapter *adapter;
454 struct edma_common_info *edma_cinfo;
455 int ret;
456
457 if (!edma_netdev[2]) {
458 pr_err("Netdevice for Group 3 does not exist\n");
459 return -1;
460 }
461
462 adapter = netdev_priv(edma_netdev[2]);
463 edma_cinfo = adapter->edma_cinfo;
464
465 ret = proc_dointvec(table, write, buffer, lenp, ppos);
466
467 if (write)
468 adapter->default_vlan_tag = edma_default_group3_vtag;
469
470 return ret;
471}
472
473static int edma_change_group4_vtag(struct ctl_table *table, int write,
474 void __user *buffer, size_t *lenp,
475 loff_t *ppos)
476{
477 struct edma_adapter *adapter;
478 struct edma_common_info *edma_cinfo;
479 int ret;
480
481 if (!edma_netdev[3]) {
482 pr_err("Netdevice for Group 4 does not exist\n");
483 return -1;
484 }
485
486 adapter = netdev_priv(edma_netdev[3]);
487 edma_cinfo = adapter->edma_cinfo;
488
489 ret = proc_dointvec(table, write, buffer, lenp, ppos);
490
491 if (write)
492 adapter->default_vlan_tag = edma_default_group4_vtag;
493
494 return ret;
495}
496
497static int edma_change_group5_vtag(struct ctl_table *table, int write,
498 void __user *buffer, size_t *lenp,
499 loff_t *ppos)
500{
501 struct edma_adapter *adapter;
502 struct edma_common_info *edma_cinfo;
503 int ret;
504
505 if (!edma_netdev[4]) {
506 pr_err("Netdevice for Group 5 does not exist\n");
507 return -1;
508 }
509
510 adapter = netdev_priv(edma_netdev[4]);
511 edma_cinfo = adapter->edma_cinfo;
512
513 ret = proc_dointvec(table, write, buffer, lenp, ppos);
514
515 if (write)
516 adapter->default_vlan_tag = edma_default_group5_vtag;
517
518 return ret;
519}
520
521static int edma_change_group1_bmp(struct ctl_table *table, int write,
522 void __user *buffer, size_t *lenp, loff_t *ppos)
523{
524 struct edma_adapter *adapter;
525 struct edma_common_info *edma_cinfo;
526 struct net_device *ndev;
527 struct phy_device *phydev;
528 int ret, num_ports_enabled;
529 u32 portid_bmp, port_bit, prev_bmp, port_id;
530
531 ndev = edma_netdev[0];
532 if (!ndev) {
533 pr_err("Netdevice for Group 0 does not exist\n");
534 return -1;
535 }
536
537 prev_bmp = edma_default_group1_bmp;
538
539 ret = proc_dointvec(table, write, buffer, lenp, ppos);
540 if ((!write) || (prev_bmp == edma_default_group1_bmp))
541 return ret;
542
543 adapter = netdev_priv(ndev);
544 edma_cinfo = adapter->edma_cinfo;
545
546 /* We ignore the bit for CPU Port */
547 portid_bmp = edma_default_group1_bmp >> 1;
548 port_bit = ffs(portid_bmp);
549 if (port_bit > EDMA_MAX_PORTID_SUPPORTED)
550 return -1;
551
552 /* If this group has no ports,
553 * we disable polling for the adapter, stop the queues and return
554 */
555 if (!port_bit) {
556 adapter->dp_bitmap = edma_default_group1_bmp;
557 if (adapter->poll_required) {
Rakesh Naired29f6b2017-04-04 15:48:08 +0530558 mutex_lock(&adapter->poll_mutex);
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530559 adapter->poll_required = 0;
Rakesh Naired29f6b2017-04-04 15:48:08 +0530560 mutex_unlock(&adapter->poll_mutex);
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530561 adapter->link_state = __EDMA_LINKDOWN;
562 netif_carrier_off(ndev);
563 netif_tx_stop_all_queues(ndev);
564 }
565 return 0;
566 }
567
568 /* Our array indexes are for 5 ports (0 - 4) */
569 port_bit--;
570 edma_link_detect_bmp = 0;
571
572 /* Do we have more ports in this group */
573 num_ports_enabled = bitmap_weight((long unsigned int *)&portid_bmp, 32);
574
575 /* If this group has more then one port,
576 * we disable polling for the adapter as link detection
577 * should be disabled, stop the phy state machine of previous
578 * phy adapter attached to group and start the queues
579 */
580 if (num_ports_enabled > 1) {
Rakesh Naired29f6b2017-04-04 15:48:08 +0530581 mutex_lock(&adapter->poll_mutex);
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530582 if (adapter->poll_required) {
583 adapter->poll_required = 0;
584 if (adapter->phydev) {
585 port_id = edma_get_port_from_phydev(
586 adapter->phydev);
587
588 /* We check if phydev attached to this group is
589 * already started and if yes, we stop
590 * the state machine for the phy
591 */
592 if (phy_dev_state[port_id]) {
593 phy_stop_machine(adapter->phydev);
594 phy_dev_state[port_id] = 0;
595 }
596
597 adapter->phydev = NULL;
598 }
599
600 /* Start the tx queues for this netdev
601 * with link detection disabled
602 */
603 if (adapter->link_state == __EDMA_LINKDOWN) {
604 adapter->link_state = __EDMA_LINKUP;
605 netif_tx_start_all_queues(ndev);
606 netif_carrier_on(ndev);
607 }
608 }
Rakesh Naired29f6b2017-04-04 15:48:08 +0530609 mutex_unlock(&adapter->poll_mutex);
610
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530611 goto set_bitmap;
612 }
613
Rakesh Naired29f6b2017-04-04 15:48:08 +0530614 mutex_lock(&adapter->poll_mutex);
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530615 adapter->poll_required = adapter->poll_required_dynamic;
Rakesh Naired29f6b2017-04-04 15:48:08 +0530616 mutex_unlock(&adapter->poll_mutex);
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530617
618 if (!adapter->poll_required)
619 goto set_bitmap;
620
621 phydev = adapter->phydev;
622
623 /* If this group has only one port,
624 * if phydev exists we start the phy state machine
625 * and if it doesn't we create a phydev and start it.
626 */
627 if (edma_phydev[port_bit]) {
628 adapter->phydev = edma_phydev[port_bit];
629 set_bit(port_bit, (long unsigned int*)&edma_link_detect_bmp);
630
631 /* If the Phy device has changed group,
632 * we need to reassign the netdev
633 */
634 if (adapter->phydev->attached_dev != ndev)
635 adapter->phydev->attached_dev = ndev;
636
637 if (!phy_dev_state[port_bit]) {
638 phy_start_machine(adapter->phydev);
639 phy_dev_state[port_bit] = 1;
640 }
641 } else {
642 snprintf(adapter->phy_id,
643 MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
644 miibus_gb->id,
645 port_bit);
646
647 adapter->phydev = phy_connect(ndev,
648 (const char *)adapter->phy_id,
649 &edma_adjust_link,
650 PHY_INTERFACE_MODE_SGMII);
651
652 if (IS_ERR(adapter->phydev)) {
653 adapter->phydev = phydev;
654 pr_err("PHY attach FAIL for port %d", port_bit);
655 return -1;
656 }
657
658 if (adapter->phydev->attached_dev != ndev)
659 adapter->phydev->attached_dev = ndev;
660
661 edma_phydev[port_bit] = adapter->phydev;
662 phy_dev_state[port_bit] = 1;
663 set_bit(port_bit, (long unsigned int *)&edma_link_detect_bmp);
664 adapter->phydev->advertising |=
665 (ADVERTISED_Pause |
666 ADVERTISED_Asym_Pause);
667 adapter->phydev->supported |=
668 (SUPPORTED_Pause |
669 SUPPORTED_Asym_Pause);
670 phy_start(adapter->phydev);
671 phy_start_aneg(adapter->phydev);
672 }
673
674 /* We check if this phydev is in use by other Groups
675 * and stop phy machine only if it is not stopped
676 */
677 if (phydev) {
678 port_id = edma_get_port_from_phydev(phydev);
679 if (phy_dev_state[port_id]) {
680 phy_stop_machine(phydev);
681 phy_dev_state[port_id] = 0;
682 }
683 }
684
Rakesh Naired29f6b2017-04-04 15:48:08 +0530685 mutex_lock(&adapter->poll_mutex);
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530686 adapter->poll_required = 1;
Rakesh Naired29f6b2017-04-04 15:48:08 +0530687 mutex_unlock(&adapter->poll_mutex);
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530688 adapter->link_state = __EDMA_LINKDOWN;
689
690set_bitmap:
691 while (portid_bmp) {
692 int port_bit_set = ffs(portid_bmp);
693 edma_cinfo->portid_netdev_lookup_tbl[port_bit_set] = ndev;
694 portid_bmp &= ~(1 << (port_bit_set - 1));
695 }
696
697 adapter->dp_bitmap = edma_default_group1_bmp;
698
699 return 0;
700}
701
702static int edma_change_group2_bmp(struct ctl_table *table, int write,
703 void __user *buffer, size_t *lenp, loff_t *ppos)
704{
705 struct edma_adapter *adapter;
706 struct edma_common_info *edma_cinfo;
707 struct net_device *ndev;
708 struct phy_device *phydev;
709 int ret;
710 u32 prev_bmp, portid_bmp, port_bit, num_ports_enabled, port_id;
711
712 ndev = edma_netdev[1];
713 if (!ndev) {
714 pr_err("Netdevice for Group 1 does not exist\n");
715 return -1;
716 }
717
718 prev_bmp = edma_default_group2_bmp;
719
720 ret = proc_dointvec(table, write, buffer, lenp, ppos);
721 if ((!write) || (prev_bmp == edma_default_group2_bmp))
722 return ret;
723
724 adapter = netdev_priv(ndev);
725 edma_cinfo = adapter->edma_cinfo;
726
727 /* We ignore the bit for CPU Port */
728 portid_bmp = edma_default_group2_bmp >> 1;
729 port_bit = ffs(portid_bmp);
730 if (port_bit > EDMA_MAX_PORTID_SUPPORTED)
731 return -1;
732
733 /* If this group has no ports,
734 * we disable polling for the adapter, stop the queues and return
735 */
736 if (!port_bit) {
737 adapter->dp_bitmap = edma_default_group2_bmp;
738 if (adapter->poll_required) {
Rakesh Naired29f6b2017-04-04 15:48:08 +0530739 mutex_lock(&adapter->poll_mutex);
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530740 adapter->poll_required = 0;
Rakesh Naired29f6b2017-04-04 15:48:08 +0530741 mutex_unlock(&adapter->poll_mutex);
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530742 adapter->link_state = __EDMA_LINKDOWN;
743 netif_carrier_off(ndev);
744 netif_tx_stop_all_queues(ndev);
745 }
746 return 0;
747 }
748
749 /* Our array indexes are for 5 ports (0 - 4) */
750 port_bit--;
751
752 /* Do we have more ports in this group */
753 num_ports_enabled = bitmap_weight((long unsigned int *)&portid_bmp, 32);
754
755 /* If this group has more then one port,
756 * we disable polling for the adapter as link detection
757 * should be disabled, stop the phy state machine of previous
758 * phy adapter attached to group and start the queues
759 */
760 if (num_ports_enabled > 1) {
Rakesh Naired29f6b2017-04-04 15:48:08 +0530761 mutex_lock(&adapter->poll_mutex);
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530762 if (adapter->poll_required) {
763 adapter->poll_required = 0;
764 if (adapter->phydev) {
765 port_id = edma_get_port_from_phydev(
766 adapter->phydev);
767
768 /* We check if this phydev is in use by
769 * other Groups and stop phy machine only
770 * if that is NOT the case
771 */
772 if (!edma_is_port_used(port_id)) {
773 if (phy_dev_state[port_id]) {
774 phy_stop_machine(
775 adapter->phydev);
776 phy_dev_state[port_id] = 0;
777 }
778 }
779
780 adapter->phydev = NULL;
781 }
782
783 /* Start the tx queues for this netdev
784 * with link detection disabled
785 */
786 if (adapter->link_state == __EDMA_LINKDOWN) {
787 adapter->link_state = __EDMA_LINKUP;
788 netif_carrier_on(ndev);
789 netif_tx_start_all_queues(ndev);
790 }
791 }
Rakesh Naired29f6b2017-04-04 15:48:08 +0530792 mutex_unlock(&adapter->poll_mutex);
793
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530794 goto set_bitmap;
795 }
796
Rakesh Naired29f6b2017-04-04 15:48:08 +0530797 mutex_lock(&adapter->poll_mutex);
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530798 adapter->poll_required = adapter->poll_required_dynamic;
Rakesh Naired29f6b2017-04-04 15:48:08 +0530799 mutex_unlock(&adapter->poll_mutex);
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530800
801 if (!adapter->poll_required)
802 goto set_bitmap;
803
804 phydev = adapter->phydev;
805
806 /* If this group has only one port,
807 * if phydev exists we start the phy state machine
808 * and if it doesn't we create a phydev and start it.
809 */
810 if (edma_phydev[port_bit]) {
811 adapter->phydev = edma_phydev[port_bit];
812
813 /* If the Phy device has changed group,
814 * we need to reassign the netdev
815 */
816 if (adapter->phydev->attached_dev != ndev)
817 adapter->phydev->attached_dev = ndev;
818
819 if (!phy_dev_state[port_bit]) {
820 phy_start_machine(adapter->phydev);
821 phy_dev_state[port_bit] = 1;
822 set_bit(port_bit,
823 (long unsigned int *)&edma_link_detect_bmp);
824 }
825 } else {
826 snprintf(adapter->phy_id,
827 MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
828 miibus_gb->id,
829 port_bit);
830
831 adapter->phydev = phy_connect(ndev,
832 (const char *)adapter->phy_id,
833 &edma_adjust_link,
834 PHY_INTERFACE_MODE_SGMII);
835
836 if (IS_ERR(adapter->phydev)) {
837 adapter->phydev = phydev;
838 pr_err("PHY attach FAIL for port %d", port_bit);
839 return -1;
840 }
841
842 if (adapter->phydev->attached_dev != ndev)
843 adapter->phydev->attached_dev = ndev;
844
845 edma_phydev[port_bit] = adapter->phydev;
846 phy_dev_state[port_bit] = 1;
847 set_bit(port_bit, (long unsigned int *)&edma_link_detect_bmp);
848 adapter->phydev->advertising |=
849 (ADVERTISED_Pause |
850 ADVERTISED_Asym_Pause);
851 adapter->phydev->supported |=
852 (SUPPORTED_Pause |
853 SUPPORTED_Asym_Pause);
854 phy_start(adapter->phydev);
855 phy_start_aneg(adapter->phydev);
856 }
857
858 /* We check if this phydev is in use by other Groups
859 * and stop phy machine only if that is NOT the case
860 */
861 if (phydev) {
862 port_id = edma_get_port_from_phydev(phydev);
863 if (!edma_is_port_used(port_id)) {
864 if (phy_dev_state[port_id]) {
865 phy_stop_machine(phydev);
866 phy_dev_state[port_id] = 0;
867 }
868 }
869 }
870
Rakesh Naired29f6b2017-04-04 15:48:08 +0530871 mutex_lock(&adapter->poll_mutex);
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530872 adapter->poll_required = 1;
Rakesh Naired29f6b2017-04-04 15:48:08 +0530873 mutex_unlock(&adapter->poll_mutex);
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530874 adapter->link_state = __EDMA_LINKDOWN;
875
876set_bitmap:
877 while (portid_bmp) {
878 int port_bit_set = ffs(portid_bmp);
879 edma_cinfo->portid_netdev_lookup_tbl[port_bit_set] = ndev;
880 portid_bmp &= ~(1 << (port_bit_set - 1));
881 }
882
883 adapter->dp_bitmap = edma_default_group2_bmp;
884
885 return 0;
886}
887
888static int edma_disable_rss_func(struct ctl_table *table, int write,
889 void __user *buffer, size_t *lenp,
890 loff_t *ppos)
891{
892 struct edma_adapter *adapter;
893 struct edma_common_info *edma_cinfo;
894 struct edma_hw *hw;
895 int ret;
896
897 if (!edma_netdev[0]) {
898 pr_err("Invalid Netdevice\n");
899 return -1;
900 }
901
902 adapter = netdev_priv(edma_netdev[0]);
903 edma_cinfo = adapter->edma_cinfo;
904 hw = &edma_cinfo->hw;
905
906 ret = proc_dointvec(table, write, buffer, lenp, ppos);
907
908 if ((!write) || (ret))
909 return ret;
910
911 switch (edma_disable_rss) {
912 case EDMA_RSS_DISABLE:
913 hw->rss_type = 0;
914 edma_write_reg(EDMA_REG_RSS_TYPE, hw->rss_type);
915 break;
916 case EDMA_RSS_ENABLE:
917 hw->rss_type = EDMA_RSS_TYPE_IPV4TCP |
918 EDMA_RSS_TYPE_IPV6_TCP |
919 EDMA_RSS_TYPE_IPV4_UDP |
920 EDMA_RSS_TYPE_IPV6UDP |
921 EDMA_RSS_TYPE_IPV4 |
922 EDMA_RSS_TYPE_IPV6;
923 edma_write_reg(EDMA_REG_RSS_TYPE, hw->rss_type);
924 break;
925 default:
926 pr_err("Invalid input\n");
927 ret = -1;
928 break;
929 }
930
931 return ret;
932}
933
934static int edma_weight_assigned_to_queues(struct ctl_table *table, int write,
935 void __user *buffer, size_t *lenp,
936 loff_t *ppos)
937{
938 int ret, queue_id, weight;
939 u32 reg_data, data, reg_addr;
940
941 ret = proc_dointvec(table, write, buffer, lenp, ppos);
942 if (write) {
943 queue_id = edma_weight_assigned_to_q & EDMA_WRR_VID_SCTL_MASK;
944 if (queue_id < 0 || queue_id > 15) {
945 pr_err("queue_id not within desired range\n");
946 return -EINVAL;
947 }
948
949 weight = edma_weight_assigned_to_q >> EDMA_WRR_VID_SCTL_SHIFT;
950 if (weight < 0 || weight > 0xF) {
951 pr_err("queue_id not within desired range\n");
952 return -EINVAL;
953 }
954
955 data = weight << EDMA_WRR_SHIFT(queue_id);
956
957 reg_addr = EDMA_REG_WRR_CTRL_Q0_Q3 + (queue_id & ~0x3);
958 edma_read_reg(reg_addr, &reg_data);
959 reg_data &= ~(1 << EDMA_WRR_SHIFT(queue_id));
960 edma_write_reg(reg_addr, data | reg_data);
961 }
962
963 return ret;
964}
965
966static int edma_queue_to_virtual_queue_map(struct ctl_table *table, int write,
967 void __user *buffer, size_t *lenp,
968 loff_t *ppos)
969{
970 int ret, queue_id, virtual_qid;
971 u32 reg_data, data, reg_addr;
972
973 ret = proc_dointvec(table, write, buffer, lenp, ppos);
974 if (write) {
975 queue_id = edma_queue_to_virtual_q & EDMA_WRR_VID_SCTL_MASK;
976 if (queue_id < 0 || queue_id > 15) {
977 pr_err("queue_id not within desired range\n");
978 return -EINVAL;
979 }
980
981 virtual_qid = edma_queue_to_virtual_q >>
982 EDMA_WRR_VID_SCTL_SHIFT;
Swaraj Sha28bca2e2017-11-24 14:17:22 +0530983 if (virtual_qid < 0 || virtual_qid > 7) {
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530984 pr_err("queue_id not within desired range\n");
985 return -EINVAL;
986 }
987
988 data = virtual_qid << EDMA_VQ_ID_SHIFT(queue_id);
989
Swaraj Sha28bca2e2017-11-24 14:17:22 +0530990 reg_addr = EDMA_REG_VQ_CTRL0 + ((queue_id & ~0x7) >> 1);
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530991 edma_read_reg(reg_addr, &reg_data);
Swaraj Sha28bca2e2017-11-24 14:17:22 +0530992 reg_data &= ~(0x7 << EDMA_VQ_ID_SHIFT(queue_id));
Rakesh Nair9bcf2602017-01-06 16:02:16 +0530993 edma_write_reg(reg_addr, data | reg_data);
994 }
995
996 return ret;
997}
998
Rakesh Naird4a11502017-11-07 17:02:11 +0530999static int edma_disable_queue_stop_func(struct ctl_table *table, int write,
1000 void __user *buffer, size_t *lenp,
1001 loff_t *ppos)
1002{
1003 struct edma_adapter *adapter;
1004 int ret;
1005
1006 adapter = netdev_priv(edma_netdev[0]);
1007
1008 ret = proc_dointvec(table, write, buffer, lenp, ppos);
1009
1010 return ret;
1011}
1012
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301013static struct ctl_table edma_table[] = {
1014 {
1015 .procname = "default_lan_tag",
1016 .data = &edma_default_ltag,
1017 .maxlen = sizeof(int),
1018 .mode = 0644,
1019 .proc_handler = edma_change_default_lan_vlan
1020 },
1021 {
1022 .procname = "default_wan_tag",
1023 .data = &edma_default_wtag,
1024 .maxlen = sizeof(int),
1025 .mode = 0644,
1026 .proc_handler = edma_change_default_wan_vlan
1027 },
1028 {
1029 .procname = "weight_assigned_to_queues",
1030 .data = &edma_weight_assigned_to_q,
1031 .maxlen = sizeof(int),
1032 .mode = 0644,
1033 .proc_handler = edma_weight_assigned_to_queues
1034 },
1035 {
1036 .procname = "queue_to_virtual_queue_map",
1037 .data = &edma_queue_to_virtual_q,
1038 .maxlen = sizeof(int),
1039 .mode = 0644,
1040 .proc_handler = edma_queue_to_virtual_queue_map
1041 },
1042 {
1043 .procname = "enable_stp_rstp",
1044 .data = &edma_enable_rstp,
1045 .maxlen = sizeof(int),
1046 .mode = 0644,
1047 .proc_handler = edma_enable_stp_rstp
1048 },
1049 {
1050 .procname = "athr_hdr_eth_type",
1051 .data = &edma_athr_hdr_eth_type,
1052 .maxlen = sizeof(int),
1053 .mode = 0644,
1054 .proc_handler = edma_ath_hdr_eth_type
1055 },
1056 {
1057 .procname = "default_group1_vlan_tag",
1058 .data = &edma_default_group1_vtag,
1059 .maxlen = sizeof(int),
1060 .mode = 0644,
1061 .proc_handler = edma_change_group1_vtag
1062 },
1063 {
1064 .procname = "default_group2_vlan_tag",
1065 .data = &edma_default_group2_vtag,
1066 .maxlen = sizeof(int),
1067 .mode = 0644,
1068 .proc_handler = edma_change_group2_vtag
1069 },
1070 {
1071 .procname = "default_group3_vlan_tag",
1072 .data = &edma_default_group3_vtag,
1073 .maxlen = sizeof(int),
1074 .mode = 0644,
1075 .proc_handler = edma_change_group3_vtag
1076 },
1077 {
1078 .procname = "default_group4_vlan_tag",
1079 .data = &edma_default_group4_vtag,
1080 .maxlen = sizeof(int),
1081 .mode = 0644,
1082 .proc_handler = edma_change_group4_vtag
1083 },
1084 {
1085 .procname = "default_group5_vlan_tag",
1086 .data = &edma_default_group5_vtag,
1087 .maxlen = sizeof(int),
1088 .mode = 0644,
1089 .proc_handler = edma_change_group5_vtag
1090 },
1091 {
1092 .procname = "default_group1_bmp",
1093 .data = &edma_default_group1_bmp,
1094 .maxlen = sizeof(int),
1095 .mode = 0644,
1096 .proc_handler = edma_change_group1_bmp
1097 },
1098 {
1099 .procname = "default_group2_bmp",
1100 .data = &edma_default_group2_bmp,
1101 .maxlen = sizeof(int),
1102 .mode = 0644,
1103 .proc_handler = edma_change_group2_bmp
1104 },
1105 {
1106 .procname = "edma_disable_rss",
1107 .data = &edma_disable_rss,
1108 .maxlen = sizeof(int),
1109 .mode = 0644,
1110 .proc_handler = edma_disable_rss_func
1111 },
Rakesh Nair888af952017-06-30 18:41:58 +05301112 {
1113 .procname = "dscp2ac",
1114 .data = &edma_dscp2ac_tbl,
1115 .maxlen = sizeof(int),
1116 .mode = 0644,
1117 .proc_handler = edma_dscp2ac_mapping_update
1118 },
1119 {
1120 .procname = "per_prec_stats_enable",
1121 .data = &edma_per_prec_stats_enable,
1122 .maxlen = sizeof(int),
1123 .mode = 0644,
1124 .proc_handler = edma_per_prec_stats_enable_handler,
1125 },
1126 {
1127 .procname = "per_prec_stats_reset",
1128 .data = &edma_prec_stats_reset,
1129 .maxlen = sizeof(int),
1130 .mode = 0644,
1131 .proc_handler = edma_prec_stats_reset_handler,
1132 },
Rakesh Nair1c6a18c2017-08-02 21:27:06 +05301133 {
1134 .procname = "iad_stats_enable",
1135 .data = &edma_iad_stats_enable,
1136 .maxlen = sizeof(int),
1137 .mode = 0644,
1138 .proc_handler = edma_iad_stats_enable_handler,
1139 },
1140 {
1141 .procname = "iad_stats_reset",
1142 .data = &edma_iad_stats_reset,
1143 .maxlen = sizeof(int),
1144 .mode = 0644,
1145 .proc_handler = edma_iad_stats_reset_handler,
1146 },
1147 {
1148 .procname = "iad_print_flow_table",
1149 .data = &edma_print_flow_table,
1150 .maxlen = sizeof(int),
1151 .mode = 0644,
1152 .proc_handler = edma_print_flow_table_handler,
1153 },
1154 {
1155 .procname = "max_valid_ifd_usec",
1156 .data = &edma_max_valid_ifd_usec,
1157 .maxlen = sizeof(int),
1158 .mode = 0644,
1159 .proc_handler = edma_max_valid_ifd_usec_handler,
1160 },
Rakesh Naird4a11502017-11-07 17:02:11 +05301161 {
1162 .procname = "edma_disable_queue_stop",
1163 .data = &edma_disable_queue_stop,
1164 .maxlen = sizeof(int),
1165 .mode = 0644,
1166 .proc_handler = edma_disable_queue_stop_func
1167 },
Rakesh Nair8016fbd2018-01-03 15:46:06 +05301168 {
1169 .procname = "core0_completion_affinity",
1170 .data = &edma_core_completion_affinity[0],
1171 .maxlen = sizeof(int),
1172 .mode = 0444,
1173 .proc_handler = proc_dointvec,
1174 },
1175 {
1176 .procname = "core1_completion_affinity",
1177 .data = &edma_core_completion_affinity[1],
1178 .maxlen = sizeof(int),
1179 .mode = 0444,
1180 .proc_handler = proc_dointvec,
1181 },
1182 {
1183 .procname = "core2_completion_affinity",
1184 .data = &edma_core_completion_affinity[2],
1185 .maxlen = sizeof(int),
1186 .mode = 0444,
1187 .proc_handler = proc_dointvec,
1188 },
1189 {
1190 .procname = "core3_completion_affinity",
1191 .data = &edma_core_completion_affinity[3],
1192 .maxlen = sizeof(int),
1193 .mode = 0444,
1194 .proc_handler = proc_dointvec,
1195 },
1196 {
1197 .procname = "num_cores",
1198 .data = &edma_num_cores,
1199 .maxlen = sizeof(int),
1200 .mode = 0444,
1201 .proc_handler = proc_dointvec,
1202 },
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301203 {}
1204};
1205
1206/* edma_axi_netdev_ops
1207 * Describe the operations supported by registered netdevices
1208 *
1209 * static const struct net_device_ops edma_axi_netdev_ops = {
1210 * .ndo_open = edma_open,
1211 * .ndo_stop = edma_close,
1212 * .ndo_start_xmit = edma_xmit_frame,
1213 * .ndo_set_mac_address = edma_set_mac_addr,
1214 * }
1215 */
1216static const struct net_device_ops edma_axi_netdev_ops = {
1217 .ndo_open = edma_open,
1218 .ndo_stop = edma_close,
1219 .ndo_start_xmit = edma_xmit,
1220 .ndo_set_mac_address = edma_set_mac_addr,
1221 .ndo_select_queue = edma_select_xps_queue,
1222#ifdef CONFIG_RFS_ACCEL
1223 .ndo_rx_flow_steer = edma_rx_flow_steer,
1224 .ndo_register_rfs_filter = edma_register_rfs_filter,
1225 .ndo_get_default_vlan_tag = edma_get_default_vlan_tag,
1226#endif
Bhaskar Valabojue429bab2017-03-15 09:01:23 +05301227 .ndo_get_stats64 = edma_get_stats64,
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301228 .ndo_change_mtu = edma_change_mtu,
1229};
1230
1231/* edma_axi_probe()
1232 * Initialise an adapter identified by a platform_device structure.
1233 *
1234 * The OS initialization, configuring of the adapter private structure,
1235 * and a hardware reset occur in the probe.
1236 */
1237static int edma_axi_probe(struct platform_device *pdev)
1238{
1239 struct edma_common_info *edma_cinfo;
1240 struct edma_hw *hw;
1241 struct edma_adapter *adapter[EDMA_MAX_PORTID_SUPPORTED];
1242 struct resource *res;
1243 struct device_node *np = pdev->dev.of_node;
1244 struct device_node *pnp;
1245 struct device_node *mdio_node = NULL;
1246 struct platform_device *mdio_plat = NULL;
1247 struct mii_bus *miibus = NULL;
1248 struct edma_mdio_data *mdio_data = NULL;
1249 int i, j, k, err = 0;
1250 u32 portid_bmp;
1251 int idx = 0, idx_mac = 0;
1252
1253 if (CONFIG_NR_CPUS != EDMA_CPU_CORES_SUPPORTED) {
1254 dev_err(&pdev->dev, "Invalid CPU Cores\n");
1255 return -EINVAL;
1256 }
1257
1258 if ((num_rxq != 4) && (num_rxq != 8)) {
1259 dev_err(&pdev->dev, "Invalid RX queue, edma probe failed\n");
1260 return -EINVAL;
1261 }
1262 edma_cinfo = kzalloc(sizeof(struct edma_common_info), GFP_KERNEL);
1263 if (!edma_cinfo) {
1264 err = -ENOMEM;
1265 goto err_alloc;
1266 }
1267
1268 edma_cinfo->pdev = pdev;
1269
1270 of_property_read_u32(np, "qcom,num-gmac", &edma_cinfo->num_gmac);
1271 if (edma_cinfo->num_gmac > EDMA_MAX_PORTID_SUPPORTED) {
1272 pr_err("Invalid DTSI Entry for qcom,num_gmac\n");
1273 err = -EINVAL;
1274 goto err_cinfo;
1275 }
1276
1277 /* Initialize the netdev array before allocation
1278 * to avoid double free
1279 */
1280 for (i = 0 ; i < edma_cinfo->num_gmac ; i++)
1281 edma_netdev[i] = NULL;
1282
1283 for (i = 0 ; i < edma_cinfo->num_gmac ; i++) {
1284 edma_netdev[i] = alloc_etherdev_mqs(sizeof(struct edma_adapter),
1285 EDMA_NETDEV_TX_QUEUE, EDMA_NETDEV_RX_QUEUE);
1286
1287 if (!edma_netdev[i]) {
1288 dev_err(&pdev->dev,
1289 "net device alloc fails for index=%d\n", i);
1290 err = -ENODEV;
1291 goto err_ioremap;
1292 }
1293
1294 SET_NETDEV_DEV(edma_netdev[i], &pdev->dev);
1295 platform_set_drvdata(pdev, edma_netdev[i]);
1296 edma_cinfo->netdev[i] = edma_netdev[i];
1297 }
1298
1299 /* Fill ring details */
1300 edma_cinfo->num_tx_queues = EDMA_MAX_TRANSMIT_QUEUE;
1301 edma_cinfo->num_txq_per_core = (EDMA_MAX_TRANSMIT_QUEUE / 4);
1302 edma_cinfo->tx_ring_count = EDMA_TX_RING_SIZE;
1303
1304 /* Update num rx queues based on module parameter */
1305 edma_cinfo->num_rx_queues = num_rxq;
1306 edma_cinfo->num_rxq_per_core = ((num_rxq == 4) ? 1 : 2);
1307
1308 edma_cinfo->rx_ring_count = EDMA_RX_RING_SIZE;
1309
1310 hw = &edma_cinfo->hw;
1311
1312 /* Fill HW defaults */
1313 hw->tx_intr_mask = EDMA_TX_IMR_NORMAL_MASK;
1314 hw->rx_intr_mask = EDMA_RX_IMR_NORMAL_MASK;
1315
Rakesh Nair8016fbd2018-01-03 15:46:06 +05301316 edma_num_cores = EDMA_CPU_CORES_SUPPORTED;
Rakesh Nair03824d52017-07-31 17:10:49 +05301317
1318 if (of_property_read_bool(np, "qcom,num-cores")) {
Rakesh Nairc9fc4cf2017-09-11 19:43:23 +05301319 of_property_read_u32(np, "qcom,num-cores",
Rakesh Nair8016fbd2018-01-03 15:46:06 +05301320 &edma_num_cores);
Rakesh Nair03824d52017-07-31 17:10:49 +05301321
Rakesh Nair8016fbd2018-01-03 15:46:06 +05301322 if (!edma_num_cores ||
1323 edma_num_cores > EDMA_CPU_CORES_SUPPORTED)
1324 edma_num_cores = EDMA_CPU_CORES_SUPPORTED;
Rakesh Nair03824d52017-07-31 17:10:49 +05301325 }
1326
Rakesh Nair3a756882017-11-15 12:18:21 +05301327 if (of_property_read_bool(np, "qcom,tx-ring-count"))
1328 of_property_read_u32(np, "qcom,tx-ring-count",
1329 &edma_cinfo->tx_ring_count);
1330
1331 if (of_property_read_bool(np, "qcom,rx-ring-count"))
1332 of_property_read_u32(np, "qcom,rx-ring-count",
Rakesh Nair8016fbd2018-01-03 15:46:06 +05301333 &edma_cinfo->rx_ring_count);
1334
1335 /* Set tx completion affinity map for the edma script */
1336 for (i = 0; i < EDMA_CPU_CORES_SUPPORTED; i++) {
1337 edma_core_completion_affinity[i] =
1338 edma_tx_affinity[edma_num_cores - 1][i];
1339 }
Rakesh Nair03824d52017-07-31 17:10:49 +05301340
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301341 of_property_read_u32(np, "qcom,page-mode", &edma_cinfo->page_mode);
1342 of_property_read_u32(np, "qcom,rx-head-buf-size",
1343 &hw->rx_head_buff_size);
1344
1345 if (overwrite_mode) {
1346 dev_info(&pdev->dev, "page mode overwritten");
1347 edma_cinfo->page_mode = page_mode;
1348 }
1349
1350 if (jumbo_mru)
1351 edma_cinfo->fraglist_mode = 1;
1352
1353 if (edma_cinfo->page_mode)
1354 hw->rx_head_buff_size = EDMA_RX_HEAD_BUFF_SIZE_JUMBO;
1355 else if (edma_cinfo->fraglist_mode)
1356 hw->rx_head_buff_size = jumbo_mru;
1357 else if (!hw->rx_head_buff_size)
1358 hw->rx_head_buff_size = EDMA_RX_HEAD_BUFF_SIZE;
1359
1360 hw->misc_intr_mask = 0;
1361 hw->wol_intr_mask = 0;
1362
1363 hw->intr_clear_type = EDMA_INTR_CLEAR_TYPE;
1364 hw->intr_sw_idx_w = EDMA_INTR_SW_IDX_W_TYPE;
1365
1366 /* configure RSS type to the different protocol that can be
1367 * supported
1368 */
1369 hw->rss_type = EDMA_RSS_TYPE_IPV4TCP | EDMA_RSS_TYPE_IPV6_TCP |
1370 EDMA_RSS_TYPE_IPV4_UDP | EDMA_RSS_TYPE_IPV6UDP |
1371 EDMA_RSS_TYPE_IPV4 | EDMA_RSS_TYPE_IPV6;
1372
1373 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1374
1375 edma_cinfo->hw.hw_addr = devm_ioremap_resource(&pdev->dev, res);
1376 if (IS_ERR(edma_cinfo->hw.hw_addr)) {
1377 err = PTR_ERR(edma_cinfo->hw.hw_addr);
1378 goto err_ioremap;
1379 }
1380
1381 edma_hw_addr = (u32)edma_cinfo->hw.hw_addr;
1382
1383 /* Parse tx queue interrupt number from device tree */
1384 for (i = 0; i < edma_cinfo->num_tx_queues; i++)
1385 edma_cinfo->tx_irq[i] = platform_get_irq(pdev, i);
1386
1387 /* Parse rx queue interrupt number from device tree
1388 * Here we are setting j to point to the point where we
1389 * left tx interrupt parsing(i.e 16) and run run the loop
1390 * from 0 to 7 to parse rx interrupt number.
1391 */
1392 for (i = 0, j = edma_cinfo->num_tx_queues, k = 0;
1393 i < edma_cinfo->num_rx_queues; i++) {
1394 edma_cinfo->rx_irq[k] = platform_get_irq(pdev, j);
1395 k += ((num_rxq == 4) ? 2 : 1);
1396 j += ((num_rxq == 4) ? 2 : 1);
1397 }
1398
1399 edma_cinfo->rx_head_buffer_len = edma_cinfo->hw.rx_head_buff_size;
1400 edma_cinfo->rx_page_buffer_len = PAGE_SIZE;
1401
1402 err = edma_alloc_queues_tx(edma_cinfo);
1403 if (err) {
1404 dev_err(&pdev->dev, "Allocation of TX queue failed\n");
1405 goto err_tx_qinit;
1406 }
1407
1408 err = edma_alloc_queues_rx(edma_cinfo);
1409 if (err) {
1410 dev_err(&pdev->dev, "Allocation of RX queue failed\n");
1411 goto err_rx_qinit;
1412 }
1413
1414 err = edma_alloc_tx_rings(edma_cinfo);
1415 if (err) {
1416 dev_err(&pdev->dev, "Allocation of TX resources failed\n");
1417 goto err_tx_rinit;
1418 }
1419
1420 err = edma_alloc_rx_rings(edma_cinfo);
1421 if (err) {
1422 dev_err(&pdev->dev, "Allocation of RX resources failed\n");
1423 goto err_rx_rinit;
1424 }
1425
1426 /* Initialize netdev and netdev bitmap for transmit descriptor rings */
1427 for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
1428 struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[i];
1429 int j;
1430
1431 etdr->netdev_bmp = 0;
1432 for (j = 0; j < EDMA_MAX_NETDEV_PER_QUEUE; j++) {
1433 etdr->netdev[j] = NULL;
1434 etdr->nq[j] = NULL;
1435 }
1436 }
1437
1438 if (of_property_read_bool(np, "qcom,mdio-supported")) {
1439 mdio_node = of_find_compatible_node(NULL, NULL,
1440 "qcom,ipq40xx-mdio");
1441 if (!mdio_node) {
1442 dev_err(&pdev->dev, "cannot find mdio node by phandle");
1443 err = -EIO;
1444 goto err_mdiobus_init_fail;
1445 }
1446
1447 mdio_plat = of_find_device_by_node(mdio_node);
1448 if (!mdio_plat) {
1449 dev_err(&pdev->dev,
1450 "cannot find platform device from mdio node");
1451 of_node_put(mdio_node);
1452 err = -EIO;
1453 goto err_mdiobus_init_fail;
1454 }
1455
1456 mdio_data = dev_get_drvdata(&mdio_plat->dev);
1457 if (!mdio_data) {
1458 dev_err(&pdev->dev,
1459 "cannot get mii bus reference from device data");
1460 of_node_put(mdio_node);
1461 err = -EIO;
1462 goto err_mdiobus_init_fail;
1463 }
1464
1465 miibus = mdio_data->mii_bus;
1466 miibus_gb = mdio_data->mii_bus;
1467 }
1468
1469 for_each_available_child_of_node(np, pnp) {
1470 const char *mac_addr;
1471
1472 /* this check is needed if parent and daughter dts have
1473 * different number of gmac nodes
1474 */
1475 if (idx_mac == edma_cinfo->num_gmac) {
1476 of_node_put(np);
1477 break;
1478 }
1479
1480 mac_addr = of_get_mac_address(pnp);
1481 if (mac_addr)
1482 memcpy(edma_netdev[idx_mac]->dev_addr, mac_addr, ETH_ALEN);
1483
1484 idx_mac++;
1485 }
1486
1487 /* Populate the adapter structure register the netdevice */
1488 for (i = 0; i < edma_cinfo->num_gmac; i++) {
1489 int k, m;
1490
1491 adapter[i] = netdev_priv(edma_netdev[i]);
1492 adapter[i]->netdev = edma_netdev[i];
1493 adapter[i]->pdev = pdev;
Rakesh Naired29f6b2017-04-04 15:48:08 +05301494 mutex_init(&adapter[i]->poll_mutex);
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301495 for (j = 0; j < CONFIG_NR_CPUS; j++) {
1496 m = i % 2;
1497 adapter[i]->tx_start_offset[j] =
1498 ((j << EDMA_TX_CPU_START_SHIFT) + (m << 1));
1499 /* Share the queues with available net-devices.
1500 * For instance , with 5 net-devices
1501 * eth0/eth2/eth4 will share q0,q1,q4,q5,q8,q9,q12,q13
1502 * and eth1/eth3 will get the remaining.
1503 */
1504 for (k = adapter[i]->tx_start_offset[j]; k <
1505 (adapter[i]->tx_start_offset[j] + 2); k++) {
1506 if (edma_fill_netdev(edma_cinfo, k, i, j)) {
1507 pr_err("Netdev overflow Error\n");
1508 goto err_register;
1509 }
1510 }
1511 }
1512
1513 adapter[i]->edma_cinfo = edma_cinfo;
1514 edma_netdev[i]->netdev_ops = &edma_axi_netdev_ops;
1515 edma_netdev[i]->features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM
1516 | NETIF_F_HW_VLAN_CTAG_TX
1517 | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_SG |
1518 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GRO;
1519 edma_netdev[i]->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
1520 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX
1521 | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
1522 NETIF_F_GRO;
1523 edma_netdev[i]->vlan_features = NETIF_F_HW_CSUM | NETIF_F_SG |
1524 NETIF_F_TSO | NETIF_F_TSO6 |
1525 NETIF_F_GRO;
1526 edma_netdev[i]->wanted_features = NETIF_F_HW_CSUM | NETIF_F_SG |
1527 NETIF_F_TSO | NETIF_F_TSO6 |
1528 NETIF_F_GRO;
1529
1530#ifdef CONFIG_RFS_ACCEL
1531 edma_netdev[i]->features |= NETIF_F_RXHASH | NETIF_F_NTUPLE;
1532 edma_netdev[i]->hw_features |= NETIF_F_RXHASH | NETIF_F_NTUPLE;
1533 edma_netdev[i]->vlan_features |= NETIF_F_RXHASH | NETIF_F_NTUPLE;
1534 edma_netdev[i]->wanted_features |= NETIF_F_RXHASH | NETIF_F_NTUPLE;
1535#endif
1536 if (edma_cinfo->fraglist_mode) {
1537 edma_netdev[i]->features |= NETIF_F_FRAGLIST;
1538 edma_netdev[i]->hw_features |= NETIF_F_FRAGLIST;
1539 edma_netdev[i]->vlan_features |= NETIF_F_FRAGLIST;
1540 edma_netdev[i]->wanted_features |= NETIF_F_FRAGLIST;
1541 }
1542
1543 edma_set_ethtool_ops(edma_netdev[i]);
1544
1545 /* This just fill in some default MAC address
1546 */
1547 if (!is_valid_ether_addr(edma_netdev[i]->dev_addr)) {
1548 random_ether_addr(edma_netdev[i]->dev_addr);
1549 pr_info("EDMA using MAC@ - using");
1550 pr_info("%02x:%02x:%02x:%02x:%02x:%02x\n",
1551 *(edma_netdev[i]->dev_addr),
1552 *(edma_netdev[i]->dev_addr + 1),
1553 *(edma_netdev[i]->dev_addr + 2),
1554 *(edma_netdev[i]->dev_addr + 3),
1555 *(edma_netdev[i]->dev_addr + 4),
1556 *(edma_netdev[i]->dev_addr + 5));
1557 }
1558
1559 err = register_netdev(edma_netdev[i]);
1560 if (err)
1561 goto err_register;
1562
1563 /* carrier off reporting is important to
1564 * ethtool even BEFORE open
1565 */
1566 netif_carrier_off(edma_netdev[i]);
1567
1568 /* Allocate reverse irq cpu mapping structure for
1569 * receive queues
1570 */
1571#ifdef CONFIG_RFS_ACCEL
1572 edma_netdev[i]->rx_cpu_rmap =
1573 alloc_irq_cpu_rmap(EDMA_NETDEV_RX_QUEUE);
1574 if (!edma_netdev[i]->rx_cpu_rmap) {
1575 err = -ENOMEM;
1576 goto err_rmap_alloc_fail;
1577 }
1578#endif
1579 }
1580
1581 for (i = 0; i < EDMA_MAX_PORTID_BITMAP_INDEX; i++)
1582 edma_cinfo->portid_netdev_lookup_tbl[i] = NULL;
1583
1584 for_each_available_child_of_node(np, pnp) {
1585 const uint32_t *vlan_tag = NULL;
1586 int len;
1587
1588 /* this check is needed if parent and daughter dts have
1589 * different number of gmac nodes
1590 */
1591 if (idx == edma_cinfo->num_gmac)
1592 break;
1593
1594 /* Populate port-id to netdev lookup table */
1595 vlan_tag = of_get_property(pnp, "vlan-tag", &len);
1596 if (!vlan_tag) {
1597 pr_err("Vlan tag parsing Failed.\n");
1598 goto err_rmap_alloc_fail;
1599 }
1600
1601 adapter[idx]->default_vlan_tag = of_read_number(vlan_tag, 1);
1602 vlan_tag++;
1603 portid_bmp = of_read_number(vlan_tag, 1);
1604 adapter[idx]->dp_bitmap = portid_bmp;
1605
1606 portid_bmp = portid_bmp >> 1; /* We ignore CPU Port bit 0 */
1607 while (portid_bmp) {
1608 int port_bit = ffs(portid_bmp);
1609
1610 if (port_bit > EDMA_MAX_PORTID_SUPPORTED)
1611 goto err_rmap_alloc_fail;
1612 edma_cinfo->portid_netdev_lookup_tbl[port_bit] =
1613 edma_netdev[idx];
1614 portid_bmp &= ~(1 << (port_bit - 1));
1615 }
1616
1617 if (of_property_read_u32(pnp, "qcom,poll-required-dynamic",
1618 &adapter[idx]->poll_required_dynamic))
1619 adapter[idx]->poll_required_dynamic = 0;
1620
1621 if (!of_property_read_u32(pnp, "qcom,poll-required",
1622 &adapter[idx]->poll_required)) {
1623 if (adapter[idx]->poll_required) {
1624 of_property_read_u32(pnp, "qcom,phy-mdio-addr",
1625 &adapter[idx]->phy_mdio_addr);
1626 of_property_read_u32(pnp, "qcom,forced-speed",
1627 &adapter[idx]->forced_speed);
1628 of_property_read_u32(pnp, "qcom,forced-duplex",
1629 &adapter[idx]->forced_duplex);
1630
1631 /* create a phyid using MDIO bus id
1632 * and MDIO bus address
1633 */
1634 snprintf(adapter[idx]->phy_id,
1635 MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
1636 miibus->id,
1637 adapter[idx]->phy_mdio_addr);
1638 }
1639 } else {
1640 adapter[idx]->poll_required = 0;
1641 adapter[idx]->forced_speed = SPEED_1000;
1642 adapter[idx]->forced_duplex = DUPLEX_FULL;
1643 }
1644
1645 idx++;
1646 }
1647
1648 edma_cinfo->edma_ctl_table_hdr = register_net_sysctl(&init_net,
1649 "net/edma",
1650 edma_table);
1651 if (!edma_cinfo->edma_ctl_table_hdr) {
1652 dev_err(&pdev->dev, "edma sysctl table hdr not registered\n");
1653 goto err_unregister_sysctl_tbl;
1654 }
1655
1656 /* Disable all 16 Tx and 8 rx irqs */
1657 edma_irq_disable(edma_cinfo);
1658
1659 err = edma_reset(edma_cinfo);
1660 if (err) {
1661 err = -EIO;
1662 goto err_reset;
1663 }
1664
1665 /* populate per_core_info, do a napi_Add, request 16 TX irqs,
1666 * 8 RX irqs, do a napi enable
1667 */
1668 for (i = 0; i < CONFIG_NR_CPUS; i++) {
1669 u8 rx_start;
1670
1671 edma_cinfo->edma_percpu_info[i].napi.state = 0;
1672
1673 netif_napi_add(edma_netdev[0],
1674 &edma_cinfo->edma_percpu_info[i].napi,
1675 edma_poll, 64);
1676 napi_enable(&edma_cinfo->edma_percpu_info[i].napi);
Rakesh Nair8016fbd2018-01-03 15:46:06 +05301677 edma_cinfo->edma_percpu_info[i].tx_mask =
1678 edma_tx_mask[edma_num_cores - 1][i];
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301679 edma_cinfo->edma_percpu_info[i].rx_mask = EDMA_RX_PER_CPU_MASK
1680 << (i << EDMA_RX_PER_CPU_MASK_SHIFT);
Rakesh Nair8016fbd2018-01-03 15:46:06 +05301681 edma_cinfo->edma_percpu_info[i].tx_comp_start =
1682 edma_tx_comp_start[edma_num_cores - 1][i];
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301683 edma_cinfo->edma_percpu_info[i].rx_start =
1684 i << EDMA_RX_CPU_START_SHIFT;
1685 rx_start = i << EDMA_RX_CPU_START_SHIFT;
1686 edma_cinfo->edma_percpu_info[i].tx_status = 0;
1687 edma_cinfo->edma_percpu_info[i].rx_status = 0;
1688 edma_cinfo->edma_percpu_info[i].edma_cinfo = edma_cinfo;
1689
1690 /* Request irq per core */
Rakesh Nair8016fbd2018-01-03 15:46:06 +05301691 for (j = edma_cinfo->edma_percpu_info[i].tx_comp_start;
1692 j < edma_tx_comp_start[edma_num_cores - 1][i] + 4; j++) {
Rakesh Naira322e422017-03-08 17:38:33 +05301693 snprintf(&edma_tx_irq[j][0], sizeof(edma_tx_irq[0]), "edma_eth_tx%d", j);
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301694 err = request_irq(edma_cinfo->tx_irq[j],
1695 edma_interrupt,
1696#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21))
1697 IRQF_DISABLED,
1698#else
1699 0,
1700#endif
1701 &edma_tx_irq[j][0],
1702 &edma_cinfo->edma_percpu_info[i]);
1703 if (err)
1704 goto err_reset;
1705 }
1706
1707 for (j = edma_cinfo->edma_percpu_info[i].rx_start;
1708 j < (rx_start +
1709 ((edma_cinfo->num_rx_queues == 4) ? 1 : 2));
1710 j++) {
Rakesh Naira322e422017-03-08 17:38:33 +05301711 snprintf(&edma_rx_irq[j][0], sizeof(edma_rx_irq[0]), "edma_eth_rx%d", j);
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301712 err = request_irq(edma_cinfo->rx_irq[j],
1713 edma_interrupt,
1714#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21))
1715 IRQF_DISABLED,
1716#else
1717 0,
1718#endif
1719 &edma_rx_irq[j][0],
1720 &edma_cinfo->edma_percpu_info[i]);
1721 if (err)
1722 goto err_reset;
1723 }
1724
1725#ifdef CONFIG_RFS_ACCEL
1726 for (j = edma_cinfo->edma_percpu_info[i].rx_start;
1727 j < rx_start + 2; j += 2) {
1728 err = irq_cpu_rmap_add(edma_netdev[0]->rx_cpu_rmap,
1729 edma_cinfo->rx_irq[j]);
1730 if (err)
1731 goto err_rmap_add_fail;
1732 }
1733#endif
1734 }
1735
1736 /* Used to clear interrupt status, allocate rx buffer,
1737 * configure edma descriptors registers
1738 */
1739 err = edma_configure(edma_cinfo);
1740 if (err) {
1741 err = -EIO;
1742 goto err_configure;
1743 }
1744
1745 /* Configure RSS indirection table.
Rakesh Nair03824d52017-07-31 17:10:49 +05301746 * RSS Indirection table maps 128 hash values to EDMA HW RX queues
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301747 * 128 hash will be configured in the following
1748 * pattern: hash{0,1,2,3} = {Q0,Q2,Q4,Q6} respectively
Rakesh Nair03824d52017-07-31 17:10:49 +05301749 * and so on */
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301750 for (i = 0; i < EDMA_NUM_IDT; i++)
Rakesh Nairc9fc4cf2017-09-11 19:43:23 +05301751 edma_write_reg(EDMA_REG_RSS_IDT(i),
Rakesh Nair8016fbd2018-01-03 15:46:06 +05301752 edma_idt_tbl[edma_num_cores - 1][i]);
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301753
1754 /* Configure load balance mapping table.
1755 * 4 table entry will be configured according to the
1756 * following pattern: load_balance{0,1,2,3} = {Q0,Q1,Q3,Q4}
1757 * respectively.
1758 */
1759 edma_write_reg(EDMA_REG_LB_RING, EDMA_LB_REG_VALUE);
1760
1761 /* Configure Virtual queue for Tx rings
1762 * User can also change this value runtime through
1763 * a sysctl
1764 */
1765 edma_write_reg(EDMA_REG_VQ_CTRL0, EDMA_VQ_REG_VALUE);
1766 edma_write_reg(EDMA_REG_VQ_CTRL1, EDMA_VQ_REG_VALUE);
1767
1768 /* Configure Max AXI Burst write size to 128 bytes*/
1769 edma_write_reg(EDMA_REG_AXIW_CTRL_MAXWRSIZE,
1770 EDMA_AXIW_MAXWRSIZE_VALUE);
1771
1772 /* Enable All 16 tx and 8 rx irq mask */
1773 edma_irq_enable(edma_cinfo);
1774 edma_enable_tx_ctrl(&edma_cinfo->hw);
1775 edma_enable_rx_ctrl(&edma_cinfo->hw);
1776
1777 for (i = 0; i < edma_cinfo->num_gmac; i++) {
1778 u32 port_id;
1779 if (!(adapter[i]->poll_required)) {
1780 adapter[i]->phydev = NULL;
1781 } else {
1782 adapter[i]->phydev =
1783 phy_connect(edma_netdev[i],
1784 (const char *)adapter[i]->phy_id,
1785 &edma_adjust_link,
1786 PHY_INTERFACE_MODE_SGMII);
1787 if (IS_ERR(adapter[i]->phydev)) {
1788 dev_dbg(&pdev->dev, "PHY attach FAIL");
1789 err = -EIO;
1790 goto edma_phy_attach_fail;
1791 } else {
1792 adapter[i]->phydev->advertising |=
1793 ADVERTISED_Pause |
1794 ADVERTISED_Asym_Pause;
1795 adapter[i]->phydev->supported |=
1796 SUPPORTED_Pause |
1797 SUPPORTED_Asym_Pause;
1798 portid_bmp = adapter[i]->dp_bitmap >> 1;
1799 port_id = ffs(portid_bmp);
1800 edma_phydev[port_id - 1] = adapter[i]->phydev;
1801 phy_dev_state[port_id - 1] = 1;
1802 }
1803 }
1804 }
1805
1806 spin_lock_init(&edma_cinfo->stats_lock);
1807
1808 init_timer(&edma_stats_timer);
1809 edma_stats_timer.expires = jiffies + 1*HZ;
1810 edma_stats_timer.data = (unsigned long)edma_cinfo;
1811 edma_stats_timer.function = edma_statistics_timer; /* timer handler */
1812 add_timer(&edma_stats_timer);
1813
Rakesh Nair888af952017-06-30 18:41:58 +05301814 /*
1815 * Initialize dscp2ac mapping table
1816 */
1817 for (i = 0 ; i < EDMA_PRECEDENCE_MAX ; i++)
1818 edma_dscp2ac_tbl[i] = EDMA_AC_BE;
1819
Rakesh Nair1c6a18c2017-08-02 21:27:06 +05301820 memset(edma_flow_tbl, 0, sizeof(struct edma_flow_attrib) * EDMA_MAX_IAD_FLOW_STATS_SUPPORTED);
1821
Rakesh Nair9bcf2602017-01-06 16:02:16 +05301822 return 0;
1823
1824edma_phy_attach_fail:
1825 miibus = NULL;
1826err_configure:
1827#ifdef CONFIG_RFS_ACCEL
1828 for (i = 0; i < edma_cinfo->num_gmac; i++) {
1829 free_irq_cpu_rmap(adapter[i]->netdev->rx_cpu_rmap);
1830 adapter[i]->netdev->rx_cpu_rmap = NULL;
1831 }
1832#endif
1833err_rmap_add_fail:
1834 edma_free_irqs(adapter[0]);
1835 for (i = 0; i < CONFIG_NR_CPUS; i++)
1836 napi_disable(&edma_cinfo->edma_percpu_info[i].napi);
1837err_reset:
1838err_unregister_sysctl_tbl:
1839err_rmap_alloc_fail:
1840 for (i = 0; i < edma_cinfo->num_gmac; i++)
1841 unregister_netdev(edma_netdev[i]);
1842err_register:
1843err_mdiobus_init_fail:
1844 edma_free_rx_rings(edma_cinfo);
1845err_rx_rinit:
1846 edma_free_tx_rings(edma_cinfo);
1847err_tx_rinit:
1848 edma_free_queues(edma_cinfo);
1849err_rx_qinit:
1850err_tx_qinit:
1851 iounmap(edma_cinfo->hw.hw_addr);
1852err_ioremap:
1853 for (i = 0; i < edma_cinfo->num_gmac; i++) {
1854 if (edma_netdev[i])
1855 free_netdev(edma_netdev[i]);
1856 }
1857err_cinfo:
1858 kfree(edma_cinfo);
1859err_alloc:
1860 return err;
1861}
1862
1863/* edma_axi_remove()
1864 * Device Removal Routine
1865 *
1866 * edma_axi_remove is called by the platform subsystem to alert the driver
1867 * that it should release a platform device.
1868 */
1869static int edma_axi_remove(struct platform_device *pdev)
1870{
1871 struct edma_adapter *adapter = netdev_priv(edma_netdev[0]);
1872 struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
1873 struct edma_hw *hw = &edma_cinfo->hw;
1874 int i;
1875
1876 for (i = 0; i < edma_cinfo->num_gmac; i++)
1877 unregister_netdev(edma_netdev[i]);
1878
1879 edma_stop_rx_tx(hw);
1880 for (i = 0; i < CONFIG_NR_CPUS; i++)
1881 napi_disable(&edma_cinfo->edma_percpu_info[i].napi);
1882
1883 edma_irq_disable(edma_cinfo);
1884 edma_write_reg(EDMA_REG_RX_ISR, 0xff);
1885 edma_write_reg(EDMA_REG_TX_ISR, 0xffff);
1886#ifdef CONFIG_RFS_ACCEL
1887 for (i = 0; i < edma_cinfo->num_gmac; i++) {
1888 free_irq_cpu_rmap(edma_netdev[0]->rx_cpu_rmap);
1889 edma_netdev[0]->rx_cpu_rmap = NULL;
1890 }
1891#endif
1892
1893 for (i = 0; i < EDMA_MAX_PORTID_SUPPORTED; i++) {
1894 if (edma_phydev[i])
1895 phy_disconnect(edma_phydev[i]);
1896 }
1897
1898 del_timer_sync(&edma_stats_timer);
1899 edma_free_irqs(adapter);
1900 unregister_net_sysctl_table(edma_cinfo->edma_ctl_table_hdr);
1901 edma_free_tx_resources(edma_cinfo);
1902 edma_free_rx_resources(edma_cinfo);
1903 edma_free_tx_rings(edma_cinfo);
1904 edma_free_rx_rings(edma_cinfo);
1905 edma_free_queues(edma_cinfo);
1906 for (i = 0; i < edma_cinfo->num_gmac; i++)
1907 free_netdev(edma_netdev[i]);
1908
1909 kfree(edma_cinfo);
1910
1911 return 0;
1912}
1913
1914static const struct of_device_id edma_of_mtable[] = {
1915 {.compatible = "qcom,ess-edma" },
1916 {}
1917};
1918MODULE_DEVICE_TABLE(of, edma_of_mtable);
1919
1920static struct platform_driver edma_axi_driver = {
1921 .driver = {
1922 .name = edma_axi_driver_name,
1923 .of_match_table = edma_of_mtable,
1924 },
1925 .probe = edma_axi_probe,
1926 .remove = edma_axi_remove,
1927};
1928
1929module_platform_driver(edma_axi_driver);
1930
1931MODULE_DESCRIPTION("QCA ESS EDMA driver");
1932MODULE_LICENSE("Dual BSD/GPL");